diff options
author | Benjamin Peterson <benjamin@python.org> | 2008-06-13 19:13:39 (GMT) |
---|---|---|
committer | Benjamin Peterson <benjamin@python.org> | 2008-06-13 19:13:39 (GMT) |
commit | dfd79494ce78868c937dc91eddd630cbdcae5611 (patch) | |
tree | 497db9bd37079421b144f33635c6bdd4b7058db5 /Lib | |
parent | c9798fc7094c8ddcd73cc73870bbe0a1d4b5b1b1 (diff) | |
download | cpython-dfd79494ce78868c937dc91eddd630cbdcae5611.zip cpython-dfd79494ce78868c937dc91eddd630cbdcae5611.tar.gz cpython-dfd79494ce78868c937dc91eddd630cbdcae5611.tar.bz2 |
convert multiprocessing to unix line endings
Diffstat (limited to 'Lib')
-rw-r--r-- | Lib/multiprocessing/__init__.py | 12 | ||||
-rw-r--r-- | Lib/multiprocessing/connection.py | 14 | ||||
-rw-r--r-- | Lib/multiprocessing/dummy/__init__.py | 286 | ||||
-rw-r--r-- | Lib/multiprocessing/dummy/connection.py | 122 | ||||
-rw-r--r-- | Lib/multiprocessing/forking.py | 20 | ||||
-rw-r--r-- | Lib/multiprocessing/heap.py | 402 | ||||
-rw-r--r-- | Lib/multiprocessing/managers.py | 50 | ||||
-rw-r--r-- | Lib/multiprocessing/pool.py | 60 | ||||
-rw-r--r-- | Lib/multiprocessing/process.py | 12 | ||||
-rw-r--r-- | Lib/multiprocessing/queues.py | 34 | ||||
-rw-r--r-- | Lib/multiprocessing/reduction.py | 16 | ||||
-rw-r--r-- | Lib/multiprocessing/sharedctypes.py | 36 | ||||
-rw-r--r-- | Lib/multiprocessing/synchronize.py | 14 | ||||
-rw-r--r-- | Lib/multiprocessing/util.py | 2 | ||||
-rw-r--r-- | Lib/test/test_multiprocessing.py | 3582 |
15 files changed, 2331 insertions, 2331 deletions
diff --git a/Lib/multiprocessing/__init__.py b/Lib/multiprocessing/__init__.py index 1cb3222..decb2ad 100644 --- a/Lib/multiprocessing/__init__.py +++ b/Lib/multiprocessing/__init__.py @@ -68,10 +68,10 @@ from multiprocessing.process import Process, current_process, active_children class ProcessError(Exception):
pass
-
+
class BufferTooShort(ProcessError):
pass
-
+
class TimeoutError(ProcessError):
pass
@@ -123,7 +123,7 @@ def cpu_count(): num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0
-
+
if num >= 1:
return num
else:
@@ -151,13 +151,13 @@ def log_to_stderr(level=None): '''
from multiprocessing.util import log_to_stderr
return log_to_stderr(level)
-
+
def allow_connection_pickling():
'''
Install support for sending connections and sockets between processes
'''
from multiprocessing import reduction
-
+
#
# Definitions depending on native semaphores
#
@@ -263,7 +263,7 @@ if sys.platform == 'win32': '''
Sets the path to a python.exe or pythonw.exe binary used to run
child processes on Windows instead of sys.executable.
- Useful for people embedding Python.
+ Useful for people embedding Python.
'''
from multiprocessing.forking import set_executable
set_executable(executable)
diff --git a/Lib/multiprocessing/connection.py b/Lib/multiprocessing/connection.py index 752d9ab..f5a3301 100644 --- a/Lib/multiprocessing/connection.py +++ b/Lib/multiprocessing/connection.py @@ -50,7 +50,7 @@ def arbitrary_address(family): '''
if family == 'AF_INET':
return ('localhost', 0)
- elif family == 'AF_UNIX':
+ elif family == 'AF_UNIX':
return tempfile.mktemp(prefix='listener-', dir=get_temp_dir())
elif family == 'AF_PIPE':
return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
@@ -160,7 +160,7 @@ if sys.platform != 'win32': c2 = _multiprocessing.Connection(fd2, readable=False)
return c1, c2
-
+
else:
from ._multiprocessing import win32
@@ -200,7 +200,7 @@ else: c1 = _multiprocessing.PipeConnection(h1, writable=duplex)
c2 = _multiprocessing.PipeConnection(h2, readable=duplex)
-
+
return c1, c2
#
@@ -290,14 +290,14 @@ if sys.platform == 'win32': )
self._handle_queue = [handle]
self._last_accepted = None
-
+
sub_debug('listener created with address=%r', self._address)
self.close = Finalize(
self, PipeListener._finalize_pipe_listener,
args=(self._handle_queue, self._address), exitpriority=0
)
-
+
def accept(self):
newhandle = win32.CreateNamedPipe(
self._address, win32.PIPE_ACCESS_DUPLEX,
@@ -320,7 +320,7 @@ if sys.platform == 'win32': sub_debug('closing listener with address=%r', address)
for handle in queue:
close(handle)
-
+
def PipeClient(address):
'''
Return a connection object connected to the pipe given by `address`
@@ -397,7 +397,7 @@ class ConnectionWrapper(object): self._loads = loads
for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'):
obj = getattr(conn, attr)
- setattr(self, attr, obj)
+ setattr(self, attr, obj)
def send(self, obj):
s = self._dumps(obj)
self._conn.send_bytes(s)
diff --git a/Lib/multiprocessing/dummy/__init__.py b/Lib/multiprocessing/dummy/__init__.py index cabf580..dd0f07b 100644 --- a/Lib/multiprocessing/dummy/__init__.py +++ b/Lib/multiprocessing/dummy/__init__.py @@ -1,143 +1,143 @@ -#
-# Support for the API of the multiprocessing package using threads
-#
-# multiprocessing/dummy/__init__.py
-#
-# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
-#
-
-__all__ = [
- 'Process', 'current_process', 'active_children', 'freeze_support',
- 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
- 'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
- ]
-
-#
-# Imports
-#
-
-import threading
-import sys
-import weakref
-import array
-import itertools
-
-from multiprocessing import TimeoutError, cpu_count
-from multiprocessing.dummy.connection import Pipe
-from threading import Lock, RLock, Semaphore, BoundedSemaphore
-from threading import Event
-from Queue import Queue
-
-#
-#
-#
-
-class DummyProcess(threading.Thread):
-
- def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
- threading.Thread.__init__(self, group, target, name, args, kwargs)
- self._pid = None
- self._children = weakref.WeakKeyDictionary()
- self._start_called = False
- self._parent = current_process()
-
- def start(self):
- assert self._parent is current_process()
- self._start_called = True
- self._parent._children[self] = None
- threading.Thread.start(self)
-
- def get_exitcode(self):
- if self._start_called and not self.is_alive():
- return 0
- else:
- return None
-
- # XXX
- if sys.version_info < (3, 0):
- is_alive = threading.Thread.is_alive.im_func
- get_name = threading.Thread.get_name.im_func
- set_name = threading.Thread.set_name.im_func
- is_daemon = threading.Thread.is_daemon.im_func
- set_daemon = threading.Thread.set_daemon.im_func
- else:
- is_alive = threading.Thread.is_alive
- get_name = threading.Thread.get_name
- set_name = threading.Thread.set_name
- is_daemon = threading.Thread.is_daemon
- set_daemon = threading.Thread.set_daemon
-
-#
-#
-#
-
-class Condition(threading._Condition):
- # XXX
- if sys.version_info < (3, 0):
- notify_all = threading._Condition.notify_all.im_func
- else:
- notify_all = threading._Condition.notify_all
-
-#
-#
-#
-
-Process = DummyProcess
-current_process = threading.current_thread
-current_process()._children = weakref.WeakKeyDictionary()
-
-def active_children():
- children = current_process()._children
- for p in list(children):
- if not p.is_alive():
- children.pop(p, None)
- return list(children)
-
-def freeze_support():
- pass
-
-#
-#
-#
-
-class Namespace(object):
- def __init__(self, **kwds):
- self.__dict__.update(kwds)
- def __repr__(self):
- items = self.__dict__.items()
- temp = []
- for name, value in items:
- if not name.startswith('_'):
- temp.append('%s=%r' % (name, value))
- temp.sort()
- return 'Namespace(%s)' % str.join(', ', temp)
-
-dict = dict
-list = list
-
-def Array(typecode, sequence, lock=True):
- return array.array(typecode, sequence)
-
-class Value(object):
- def __init__(self, typecode, value, lock=True):
- self._typecode = typecode
- self._value = value
- def _get(self):
- return self._value
- def _set(self, value):
- self._value = value
- value = property(_get, _set)
- def __repr__(self):
- return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
-
-def Manager():
- return sys.modules[__name__]
-
-def shutdown():
- pass
-
-def Pool(processes=None, initializer=None, initargs=()):
- from multiprocessing.pool import ThreadPool
- return ThreadPool(processes, initializer, initargs)
-
-JoinableQueue = Queue
+# +# Support for the API of the multiprocessing package using threads +# +# multiprocessing/dummy/__init__.py +# +# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt +# + +__all__ = [ + 'Process', 'current_process', 'active_children', 'freeze_support', + 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', + 'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' + ] + +# +# Imports +# + +import threading +import sys +import weakref +import array +import itertools + +from multiprocessing import TimeoutError, cpu_count +from multiprocessing.dummy.connection import Pipe +from threading import Lock, RLock, Semaphore, BoundedSemaphore +from threading import Event +from Queue import Queue + +# +# +# + +class DummyProcess(threading.Thread): + + def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): + threading.Thread.__init__(self, group, target, name, args, kwargs) + self._pid = None + self._children = weakref.WeakKeyDictionary() + self._start_called = False + self._parent = current_process() + + def start(self): + assert self._parent is current_process() + self._start_called = True + self._parent._children[self] = None + threading.Thread.start(self) + + def get_exitcode(self): + if self._start_called and not self.is_alive(): + return 0 + else: + return None + + # XXX + if sys.version_info < (3, 0): + is_alive = threading.Thread.is_alive.im_func + get_name = threading.Thread.get_name.im_func + set_name = threading.Thread.set_name.im_func + is_daemon = threading.Thread.is_daemon.im_func + set_daemon = threading.Thread.set_daemon.im_func + else: + is_alive = threading.Thread.is_alive + get_name = threading.Thread.get_name + set_name = threading.Thread.set_name + is_daemon = threading.Thread.is_daemon + set_daemon = threading.Thread.set_daemon + +# +# +# + +class Condition(threading._Condition): + # XXX + if sys.version_info < (3, 0): + notify_all = threading._Condition.notify_all.im_func + else: + notify_all = threading._Condition.notify_all + +# +# +# + +Process = DummyProcess +current_process = threading.current_thread +current_process()._children = weakref.WeakKeyDictionary() + +def active_children(): + children = current_process()._children + for p in list(children): + if not p.is_alive(): + children.pop(p, None) + return list(children) + +def freeze_support(): + pass + +# +# +# + +class Namespace(object): + def __init__(self, **kwds): + self.__dict__.update(kwds) + def __repr__(self): + items = self.__dict__.items() + temp = [] + for name, value in items: + if not name.startswith('_'): + temp.append('%s=%r' % (name, value)) + temp.sort() + return 'Namespace(%s)' % str.join(', ', temp) + +dict = dict +list = list + +def Array(typecode, sequence, lock=True): + return array.array(typecode, sequence) + +class Value(object): + def __init__(self, typecode, value, lock=True): + self._typecode = typecode + self._value = value + def _get(self): + return self._value + def _set(self, value): + self._value = value + value = property(_get, _set) + def __repr__(self): + return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value) + +def Manager(): + return sys.modules[__name__] + +def shutdown(): + pass + +def Pool(processes=None, initializer=None, initargs=()): + from multiprocessing.pool import ThreadPool + return ThreadPool(processes, initializer, initargs) + +JoinableQueue = Queue diff --git a/Lib/multiprocessing/dummy/connection.py b/Lib/multiprocessing/dummy/connection.py index dd2bcb9..4f0a680 100644 --- a/Lib/multiprocessing/dummy/connection.py +++ b/Lib/multiprocessing/dummy/connection.py @@ -1,61 +1,61 @@ -#
-# Analogue of `multiprocessing.connection` which uses queues instead of sockets
-#
-# multiprocessing/dummy/connection.py
-#
-# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
-#
-
-__all__ = [ 'Client', 'Listener', 'Pipe' ]
-
-from Queue import Queue
-
-
-families = [None]
-
-
-class Listener(object):
-
- def __init__(self, address=None, family=None, backlog=1):
- self._backlog_queue = Queue(backlog)
-
- def accept(self):
- return Connection(*self._backlog_queue.get())
-
- def close(self):
- self._backlog_queue = None
-
- address = property(lambda self: self._backlog_queue)
-
-
-def Client(address):
- _in, _out = Queue(), Queue()
- address.put((_out, _in))
- return Connection(_in, _out)
-
-
-def Pipe(duplex=True):
- a, b = Queue(), Queue()
- return Connection(a, b), Connection(b, a)
-
-
-class Connection(object):
-
- def __init__(self, _in, _out):
- self._out = _out
- self._in = _in
- self.send = self.send_bytes = _out.put
- self.recv = self.recv_bytes = _in.get
-
- def poll(self, timeout=0.0):
- if self._in.qsize() > 0:
- return True
- if timeout <= 0.0:
- return False
- self._in.not_empty.acquire()
- self._in.not_empty.wait(timeout)
- self._in.not_empty.release()
- return self._in.qsize() > 0
-
- def close(self):
- pass
+# +# Analogue of `multiprocessing.connection` which uses queues instead of sockets +# +# multiprocessing/dummy/connection.py +# +# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt +# + +__all__ = [ 'Client', 'Listener', 'Pipe' ] + +from Queue import Queue + + +families = [None] + + +class Listener(object): + + def __init__(self, address=None, family=None, backlog=1): + self._backlog_queue = Queue(backlog) + + def accept(self): + return Connection(*self._backlog_queue.get()) + + def close(self): + self._backlog_queue = None + + address = property(lambda self: self._backlog_queue) + + +def Client(address): + _in, _out = Queue(), Queue() + address.put((_out, _in)) + return Connection(_in, _out) + + +def Pipe(duplex=True): + a, b = Queue(), Queue() + return Connection(a, b), Connection(b, a) + + +class Connection(object): + + def __init__(self, _in, _out): + self._out = _out + self._in = _in + self.send = self.send_bytes = _out.put + self.recv = self.recv_bytes = _in.get + + def poll(self, timeout=0.0): + if self._in.qsize() > 0: + return True + if timeout <= 0.0: + return False + self._in.not_empty.acquire() + self._in.not_empty.wait(timeout) + self._in.not_empty.release() + return self._in.qsize() > 0 + + def close(self): + pass diff --git a/Lib/multiprocessing/forking.py b/Lib/multiprocessing/forking.py index 2c1d3cf..6107f07 100644 --- a/Lib/multiprocessing/forking.py +++ b/Lib/multiprocessing/forking.py @@ -92,7 +92,7 @@ if sys.platform != 'win32': except OSError, e:
if self.wait(timeout=0.1) is None:
raise
-
+
@staticmethod
def thread_is_spawning():
return False
@@ -107,10 +107,10 @@ else: import _subprocess
import copy_reg
import time
-
+
from ._multiprocessing import win32, Connection, PipeConnection
from .util import Finalize
-
+
try:
from cPickle import dump, load, HIGHEST_PROTOCOL
except ImportError:
@@ -217,7 +217,7 @@ else: if code == TERMINATE:
code = -signal.SIGTERM
self.returncode = code
-
+
return self.returncode
def poll(self):
@@ -230,7 +230,7 @@ else: except WindowsError:
if self.wait(timeout=0.1) is None:
raise
-
+
#
#
#
@@ -308,7 +308,7 @@ else: Return info about parent needed by child to unpickle process object
'''
from .util import _logger, _log_to_stderr
-
+
d = dict(
name=name,
sys_path=sys.path,
@@ -317,7 +317,7 @@ else: orig_dir=process.ORIGINAL_DIR,
authkey=process.current_process().get_authkey(),
)
-
+
if _logger is not None:
d['log_level'] = _logger.getEffectiveLevel()
@@ -336,7 +336,7 @@ else: #
# Make (Pipe)Connection picklable
#
-
+
def reduce_connection(conn):
if not Popen.thread_is_spawning():
raise RuntimeError(
@@ -345,7 +345,7 @@ else: )
return type(conn), (Popen.duplicate_for_child(conn.fileno()),
conn.readable, conn.writable)
-
+
copy_reg.pickle(Connection, reduce_connection)
copy_reg.pickle(PipeConnection, reduce_connection)
@@ -367,7 +367,7 @@ def prepare(data): if 'authkey' in data:
process.current_process()._authkey = data['authkey']
-
+
if 'log_to_stderr' in data and data['log_to_stderr']:
util.log_to_stderr()
diff --git a/Lib/multiprocessing/heap.py b/Lib/multiprocessing/heap.py index 7e596ca..f6b3404 100644 --- a/Lib/multiprocessing/heap.py +++ b/Lib/multiprocessing/heap.py @@ -1,201 +1,201 @@ -# -# Module which supports allocation of memory from an mmap -# -# multiprocessing/heap.py -# -# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt -# - -import bisect -import mmap -import tempfile -import os -import sys -import threading -import itertools - -import _multiprocessing -from multiprocessing.util import Finalize, info -from multiprocessing.forking import assert_spawning - -__all__ = ['BufferWrapper'] - -# -# Inheirtable class which wraps an mmap, and from which blocks can be allocated -# - -if sys.platform == 'win32': - - from ._multiprocessing import win32 - - class Arena(object): - - _counter = itertools.count() - - def __init__(self, size): - self.size = size - self.name = 'pym-%d-%d' % (os.getpid(), Arena._counter.next()) - self.buffer = mmap.mmap(-1, self.size, tagname=self.name) - assert win32.GetLastError() == 0, 'tagname already in use' - self._state = (self.size, self.name) - - def __getstate__(self): - assert_spawning(self) - return self._state - - def __setstate__(self, state): - self.size, self.name = self._state = state - self.buffer = mmap.mmap(-1, self.size, tagname=self.name) - assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS - -else: - - class Arena(object): - - def __init__(self, size): - self.buffer = mmap.mmap(-1, size) - self.size = size - self.name = None - -# -# Class allowing allocation of chunks of memory from arenas -# - -class Heap(object): - - _alignment = 8 - - def __init__(self, size=mmap.PAGESIZE): - self._lastpid = os.getpid() - self._lock = threading.Lock() - self._size = size - self._lengths = [] - self._len_to_seq = {} - self._start_to_block = {} - self._stop_to_block = {} - self._allocated_blocks = set() - self._arenas = [] - - @staticmethod - def _roundup(n, alignment): - # alignment must be a power of 2 - mask = alignment - 1 - return (n + mask) & ~mask - - def _malloc(self, size): - # returns a large enough block -- it might be much larger - i = bisect.bisect_left(self._lengths, size) - if i == len(self._lengths): - length = self._roundup(max(self._size, size), mmap.PAGESIZE) - self._size *= 2 - info('allocating a new mmap of length %d', length) - arena = Arena(length) - self._arenas.append(arena) - return (arena, 0, length) - else: - length = self._lengths[i] - seq = self._len_to_seq[length] - block = seq.pop() - if not seq: - del self._len_to_seq[length], self._lengths[i] - - (arena, start, stop) = block - del self._start_to_block[(arena, start)] - del self._stop_to_block[(arena, stop)] - return block - - def _free(self, block): - # free location and try to merge with neighbours - (arena, start, stop) = block - - try: - prev_block = self._stop_to_block[(arena, start)] - except KeyError: - pass - else: - start, _ = self._absorb(prev_block) - - try: - next_block = self._start_to_block[(arena, stop)] - except KeyError: - pass - else: - _, stop = self._absorb(next_block) - - block = (arena, start, stop) - length = stop - start - - try: - self._len_to_seq[length].append(block) - except KeyError: - self._len_to_seq[length] = [block] - bisect.insort(self._lengths, length) - - self._start_to_block[(arena, start)] = block - self._stop_to_block[(arena, stop)] = block - - def _absorb(self, block): - # deregister this block so it can be merged with a neighbour - (arena, start, stop) = block - del self._start_to_block[(arena, start)] - del self._stop_to_block[(arena, stop)] - - length = stop - start - seq = self._len_to_seq[length] - seq.remove(block) - if not seq: - del self._len_to_seq[length] - self._lengths.remove(length) - - return start, stop - - def free(self, block): - # free a block returned by malloc() - assert os.getpid() == self._lastpid - self._lock.acquire() - try: - self._allocated_blocks.remove(block) - self._free(block) - finally: - self._lock.release() - - def malloc(self, size): - # return a block of right size (possibly rounded up) - assert 0 <= size < sys.maxint - if os.getpid() != self._lastpid: - self.__init__() # reinitialize after fork - self._lock.acquire() - try: - size = self._roundup(max(size,1), self._alignment) - (arena, start, stop) = self._malloc(size) - new_stop = start + size - if new_stop < stop: - self._free((arena, new_stop, stop)) - block = (arena, start, new_stop) - self._allocated_blocks.add(block) - return block - finally: - self._lock.release() - -# -# Class representing a chunk of an mmap -- can be inherited -# - -class BufferWrapper(object): - - _heap = Heap() - - def __init__(self, size): - assert 0 <= size < sys.maxint - block = BufferWrapper._heap.malloc(size) - self._state = (block, size) - Finalize(self, BufferWrapper._heap.free, args=(block,)) - - def get_address(self): - (arena, start, stop), size = self._state - address, length = _multiprocessing.address_of_buffer(arena.buffer) - assert size <= length - return address + start - - def get_size(self): - return self._state[1] +#
+# Module which supports allocation of memory from an mmap
+#
+# multiprocessing/heap.py
+#
+# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
+#
+
+import bisect
+import mmap
+import tempfile
+import os
+import sys
+import threading
+import itertools
+
+import _multiprocessing
+from multiprocessing.util import Finalize, info
+from multiprocessing.forking import assert_spawning
+
+__all__ = ['BufferWrapper']
+
+#
+# Inheirtable class which wraps an mmap, and from which blocks can be allocated
+#
+
+if sys.platform == 'win32':
+
+ from ._multiprocessing import win32
+
+ class Arena(object):
+
+ _counter = itertools.count()
+
+ def __init__(self, size):
+ self.size = size
+ self.name = 'pym-%d-%d' % (os.getpid(), Arena._counter.next())
+ self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
+ assert win32.GetLastError() == 0, 'tagname already in use'
+ self._state = (self.size, self.name)
+
+ def __getstate__(self):
+ assert_spawning(self)
+ return self._state
+
+ def __setstate__(self, state):
+ self.size, self.name = self._state = state
+ self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
+ assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS
+
+else:
+
+ class Arena(object):
+
+ def __init__(self, size):
+ self.buffer = mmap.mmap(-1, size)
+ self.size = size
+ self.name = None
+
+#
+# Class allowing allocation of chunks of memory from arenas
+#
+
+class Heap(object):
+
+ _alignment = 8
+
+ def __init__(self, size=mmap.PAGESIZE):
+ self._lastpid = os.getpid()
+ self._lock = threading.Lock()
+ self._size = size
+ self._lengths = []
+ self._len_to_seq = {}
+ self._start_to_block = {}
+ self._stop_to_block = {}
+ self._allocated_blocks = set()
+ self._arenas = []
+
+ @staticmethod
+ def _roundup(n, alignment):
+ # alignment must be a power of 2
+ mask = alignment - 1
+ return (n + mask) & ~mask
+
+ def _malloc(self, size):
+ # returns a large enough block -- it might be much larger
+ i = bisect.bisect_left(self._lengths, size)
+ if i == len(self._lengths):
+ length = self._roundup(max(self._size, size), mmap.PAGESIZE)
+ self._size *= 2
+ info('allocating a new mmap of length %d', length)
+ arena = Arena(length)
+ self._arenas.append(arena)
+ return (arena, 0, length)
+ else:
+ length = self._lengths[i]
+ seq = self._len_to_seq[length]
+ block = seq.pop()
+ if not seq:
+ del self._len_to_seq[length], self._lengths[i]
+
+ (arena, start, stop) = block
+ del self._start_to_block[(arena, start)]
+ del self._stop_to_block[(arena, stop)]
+ return block
+
+ def _free(self, block):
+ # free location and try to merge with neighbours
+ (arena, start, stop) = block
+
+ try:
+ prev_block = self._stop_to_block[(arena, start)]
+ except KeyError:
+ pass
+ else:
+ start, _ = self._absorb(prev_block)
+
+ try:
+ next_block = self._start_to_block[(arena, stop)]
+ except KeyError:
+ pass
+ else:
+ _, stop = self._absorb(next_block)
+
+ block = (arena, start, stop)
+ length = stop - start
+
+ try:
+ self._len_to_seq[length].append(block)
+ except KeyError:
+ self._len_to_seq[length] = [block]
+ bisect.insort(self._lengths, length)
+
+ self._start_to_block[(arena, start)] = block
+ self._stop_to_block[(arena, stop)] = block
+
+ def _absorb(self, block):
+ # deregister this block so it can be merged with a neighbour
+ (arena, start, stop) = block
+ del self._start_to_block[(arena, start)]
+ del self._stop_to_block[(arena, stop)]
+
+ length = stop - start
+ seq = self._len_to_seq[length]
+ seq.remove(block)
+ if not seq:
+ del self._len_to_seq[length]
+ self._lengths.remove(length)
+
+ return start, stop
+
+ def free(self, block):
+ # free a block returned by malloc()
+ assert os.getpid() == self._lastpid
+ self._lock.acquire()
+ try:
+ self._allocated_blocks.remove(block)
+ self._free(block)
+ finally:
+ self._lock.release()
+
+ def malloc(self, size):
+ # return a block of right size (possibly rounded up)
+ assert 0 <= size < sys.maxint
+ if os.getpid() != self._lastpid:
+ self.__init__() # reinitialize after fork
+ self._lock.acquire()
+ try:
+ size = self._roundup(max(size,1), self._alignment)
+ (arena, start, stop) = self._malloc(size)
+ new_stop = start + size
+ if new_stop < stop:
+ self._free((arena, new_stop, stop))
+ block = (arena, start, new_stop)
+ self._allocated_blocks.add(block)
+ return block
+ finally:
+ self._lock.release()
+
+#
+# Class representing a chunk of an mmap -- can be inherited
+#
+
+class BufferWrapper(object):
+
+ _heap = Heap()
+
+ def __init__(self, size):
+ assert 0 <= size < sys.maxint
+ block = BufferWrapper._heap.malloc(size)
+ self._state = (block, size)
+ Finalize(self, BufferWrapper._heap.free, args=(block,))
+
+ def get_address(self):
+ (arena, start, stop), size = self._state
+ address, length = _multiprocessing.address_of_buffer(arena.buffer)
+ assert size <= length
+ return address + start
+
+ def get_size(self):
+ return self._state[1]
diff --git a/Lib/multiprocessing/managers.py b/Lib/multiprocessing/managers.py index 908c193..6c1d912 100644 --- a/Lib/multiprocessing/managers.py +++ b/Lib/multiprocessing/managers.py @@ -40,7 +40,7 @@ try: bytes
except NameError:
bytes = str # XXX not needed in Py2.6 and Py3.0
-
+
#
# Register some things for pickling
#
@@ -55,7 +55,7 @@ if view_types[0] is not list: # XXX only needed in Py3.0 return list, (list(obj),)
for view_type in view_types:
copy_reg.pickle(view_type, rebuild_as_list)
-
+
#
# Type for identifying shared objects
#
@@ -104,7 +104,7 @@ def convert_to_error(kind, result): return RemoteError('Unserializable message: %s\n' % result)
else:
return ValueError('Unrecognized message type')
-
+
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
@@ -340,7 +340,7 @@ class Server(object): util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
-
+
util._run_finalizers(0)
for p in active_children():
@@ -358,7 +358,7 @@ class Server(object): traceback.print_exc()
finally:
exit(0)
-
+
def create(self, c, typeid, *args, **kwds):
'''
Create a new shared object and return its id
@@ -367,7 +367,7 @@ class Server(object): try:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
-
+
if callable is None:
assert len(args) == 1 and not kwds
obj = args[0]
@@ -456,7 +456,7 @@ class BaseManager(object): '''
_registry = {}
_Server = Server
-
+
def __init__(self, address=None, authkey=None, serializer='pickle'):
if authkey is None:
authkey = current_process().get_authkey()
@@ -487,7 +487,7 @@ class BaseManager(object): conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
-
+
def start(self):
'''
Spawn a server process for this manager object
@@ -570,10 +570,10 @@ class BaseManager(object): Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
- try:
+ try:
return dispatch(conn, None, 'number_of_objects')
finally:
- conn.close()
+ conn.close()
def __enter__(self):
return self
@@ -612,7 +612,7 @@ class BaseManager(object): del BaseProxy._address_to_local[address]
except KeyError:
pass
-
+
address = property(lambda self: self._address)
@classmethod
@@ -640,7 +640,7 @@ class BaseManager(object): cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
-
+
if create_method:
def temp(self, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
@@ -709,9 +709,9 @@ class BaseProxy(object): if incref:
self._incref()
-
+
util.register_after_fork(self, BaseProxy._after_fork)
-
+
def _connect(self):
util.debug('making connection to manager')
name = current_process().get_name()
@@ -720,7 +720,7 @@ class BaseProxy(object): conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
-
+
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referrent and return a copy of the result
@@ -735,7 +735,7 @@ class BaseProxy(object): conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
-
+
if kind == '#RETURN':
return result
elif kind == '#PROXY':
@@ -793,7 +793,7 @@ class BaseProxy(object): threading.current_thread().get_name())
tls.connection.close()
del tls.connection
-
+
def _after_fork(self):
self._manager = None
try:
@@ -806,7 +806,7 @@ class BaseProxy(object): kwds = {}
if Popen.thread_is_spawning():
kwds['authkey'] = self._authkey
-
+
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
@@ -817,7 +817,7 @@ class BaseProxy(object): def __deepcopy__(self, memo):
return self._getvalue()
-
+
def __repr__(self):
return '<%s object, typeid %r at %s>' % \
(type(self).__name__, self._token.typeid, '0x%x' % id(self))
@@ -842,7 +842,7 @@ def RebuildProxy(func, token, serializer, kwds): If possible the shared object is returned, or otherwise a proxy for it.
'''
server = getattr(current_process(), '_manager_server', None)
-
+
if server and server.address == token.address:
return server.id_to_obj[token.id][0]
else:
@@ -884,7 +884,7 @@ def AutoProxy(token, serializer, manager=None, authkey=None, Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
-
+
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
@@ -995,7 +995,7 @@ class NamespaceProxy(BaseProxy): if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
- return callmethod('__getattribute__', (key,))
+ return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
@@ -1007,7 +1007,7 @@ class NamespaceProxy(BaseProxy): callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
-
+
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
@@ -1063,10 +1063,10 @@ PoolProxy._method_to_typeid_ = { class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
-
+
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
-
+
The `multiprocessing.Manager()` function creates started instances of
this class.
'''
diff --git a/Lib/multiprocessing/pool.py b/Lib/multiprocessing/pool.py index 0255c86..79f0a29 100644 --- a/Lib/multiprocessing/pool.py +++ b/Lib/multiprocessing/pool.py @@ -58,18 +58,18 @@ def worker(inqueue, outqueue, initializer=None, initargs=()): except (EOFError, IOError):
debug('worker got EOFError or IOError -- exiting')
break
-
+
if task is None:
debug('worker got sentinel -- exiting')
break
-
+
job, i, func, args, kwds = task
try:
result = (True, func(*args, **kwds))
except Exception, e:
result = (False, e)
put((job, i, result))
-
+
#
# Class representing a process pool
#
@@ -91,7 +91,7 @@ class Pool(object): processes = cpu_count()
except NotImplementedError:
processes = 1
-
+
self._pool = []
for i in range(processes):
w = self.Process(
@@ -102,7 +102,7 @@ class Pool(object): w.set_name(w.get_name().replace('Process', 'PoolWorker'))
w.set_daemon(True)
w.start()
-
+
self._task_handler = threading.Thread(
target=Pool._handle_tasks,
args=(self._taskqueue, self._quick_put, self._outqueue, self._pool)
@@ -132,7 +132,7 @@ class Pool(object): self._outqueue = SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
-
+
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `apply()` builtin
@@ -182,7 +182,7 @@ class Pool(object): self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
-
+
def apply_async(self, func, args=(), kwds={}, callback=None):
'''
Asynchronous equivalent of `apply()` builtin
@@ -199,12 +199,12 @@ class Pool(object): assert self._state == RUN
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
-
+
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
-
+
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
@@ -234,13 +234,13 @@ class Pool(object): break
else:
debug('task handler got sentinel')
-
+
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
outqueue.put(None)
-
+
# tell workers there is no more work
debug('task handler sending sentinel to workers')
for p in pool:
@@ -260,12 +260,12 @@ class Pool(object): except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
-
+
if thread._state:
assert thread._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
break
-
+
if task is None:
debug('result handler got sentinel')
break
@@ -321,7 +321,7 @@ class Pool(object): raise NotImplementedError(
'pool objects cannot be passed between processes or pickled'
)
-
+
def close(self):
debug('closing pool')
if self._state == RUN:
@@ -355,7 +355,7 @@ class Pool(object): task_handler, result_handler, cache):
# this is guaranteed to only be called once
debug('finalizing pool')
-
+
task_handler._state = TERMINATE
taskqueue.put(None) # sentinel
@@ -363,7 +363,7 @@ class Pool(object): cls._help_stuff_finish(inqueue, task_handler, len(pool))
assert result_handler.is_alive() or len(cache) == 0
-
+
result_handler._state = TERMINATE
outqueue.put(None) # sentinel
@@ -396,14 +396,14 @@ class ApplyResult(object): self._ready = False
self._callback = callback
cache[self._job] = self
-
+
def ready(self):
return self._ready
-
+
def successful(self):
assert self._ready
return self._success
-
+
def wait(self, timeout=None):
self._cond.acquire()
try:
@@ -438,7 +438,7 @@ class ApplyResult(object): #
class MapResult(ApplyResult):
-
+
def __init__(self, cache, chunksize, length, callback):
ApplyResult.__init__(self, cache, callback)
self._success = True
@@ -449,7 +449,7 @@ class MapResult(ApplyResult): self._ready = True
else:
self._number_left = length//chunksize + bool(length % chunksize)
-
+
def _set(self, i, success_result):
success, result = success_result
if success:
@@ -492,10 +492,10 @@ class IMapIterator(object): self._length = None
self._unsorted = {}
cache[self._job] = self
-
+
def __iter__(self):
return self
-
+
def next(self, timeout=None):
self._cond.acquire()
try:
@@ -520,7 +520,7 @@ class IMapIterator(object): raise value
__next__ = next # XXX
-
+
def _set(self, i, obj):
self._cond.acquire()
try:
@@ -534,12 +534,12 @@ class IMapIterator(object): self._cond.notify()
else:
self._unsorted[i] = obj
-
+
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
-
+
def _set_length(self, length):
self._cond.acquire()
try:
@@ -572,18 +572,18 @@ class IMapUnorderedIterator(IMapIterator): #
class ThreadPool(Pool):
-
+
from .dummy import Process
-
+
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
-
+
def _setup_queues(self):
self._inqueue = Queue.Queue()
self._outqueue = Queue.Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
-
+
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# put sentinels at head of inqueue to make workers finish
diff --git a/Lib/multiprocessing/process.py b/Lib/multiprocessing/process.py index 915d863..43d8297 100644 --- a/Lib/multiprocessing/process.py +++ b/Lib/multiprocessing/process.py @@ -47,7 +47,7 @@ def active_children(): '''
_cleanup()
return list(_current_process._children)
-
+
#
#
#
@@ -69,7 +69,7 @@ class Process(object): The class is analagous to `threading.Thread`
'''
_Popen = None
-
+
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
assert group is None, 'group argument must be None for now'
count = _current_process._counter.next()
@@ -91,7 +91,7 @@ class Process(object): '''
if self._target:
self._target(*self._args, **self._kwargs)
-
+
def start(self):
'''
Start child process
@@ -114,7 +114,7 @@ class Process(object): Terminate process; sends SIGTERM signal or uses TerminateProcess()
'''
self._popen.terminate()
-
+
def join(self, timeout=None):
'''
Wait until child process terminates
@@ -217,11 +217,11 @@ class Process(object): status, self._daemonic and ' daemon' or '')
##
-
+
def _bootstrap(self):
from . import util
global _current_process
-
+
try:
self._children = set()
self._counter = itertools.count(1)
diff --git a/Lib/multiprocessing/queues.py b/Lib/multiprocessing/queues.py index 78cb362..ea89090 100644 --- a/Lib/multiprocessing/queues.py +++ b/Lib/multiprocessing/queues.py @@ -41,9 +41,9 @@ class Queue(object): else:
self._wlock = Lock()
self._sem = BoundedSemaphore(maxsize)
-
+
self._after_fork()
-
+
if sys.platform != 'win32':
register_after_fork(self, Queue._after_fork)
@@ -51,12 +51,12 @@ class Queue(object): assert_spawning(self)
return (self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid)
-
+
def __setstate__(self, state):
(self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid) = state
self._after_fork()
-
+
def _after_fork(self):
debug('Queue._after_fork()')
self._notempty = threading.Condition(threading.Lock())
@@ -69,7 +69,7 @@ class Queue(object): self._send = self._writer.send
self._recv = self._reader.recv
self._poll = self._reader.poll
-
+
def put(self, obj, block=True, timeout=None):
assert not self._closed
if not self._sem.acquire(block, timeout):
@@ -93,7 +93,7 @@ class Queue(object): return res
finally:
self._rlock.release()
-
+
else:
if block:
deadline = time.time() + timeout
@@ -135,7 +135,7 @@ class Queue(object): assert self._closed
if self._jointhread:
self._jointhread()
-
+
def cancel_join_thread(self):
debug('Queue.cancel_join_thread()')
self._joincancelled = True
@@ -146,7 +146,7 @@ class Queue(object): def _start_thread(self):
debug('Queue._start_thread()')
-
+
# Start thread which transfers data from buffer to pipe
self._buffer.clear()
self._thread = threading.Thread(
@@ -174,14 +174,14 @@ class Queue(object): [weakref.ref(self._thread)],
exitpriority=-5
)
-
+
# Send sentinel to the thread queue object when garbage collected
self._close = Finalize(
self, Queue._finalize_close,
[self._buffer, self._notempty],
exitpriority=10
)
-
+
@staticmethod
def _finalize_join(twr):
debug('joining queue thread')
@@ -191,7 +191,7 @@ class Queue(object): debug('... queue thread joined')
else:
debug('... queue thread already dead')
-
+
@staticmethod
def _finalize_close(buffer, notempty):
debug('telling queue thread to quit')
@@ -206,7 +206,7 @@ class Queue(object): def _feed(buffer, notempty, send, writelock, close):
debug('starting thread to feed data to pipe')
from .util import is_exiting
-
+
nacquire = notempty.acquire
nrelease = notempty.release
nwait = notempty.wait
@@ -217,7 +217,7 @@ class Queue(object): wrelease = writelock.release
else:
wacquire = None
-
+
try:
while 1:
nacquire()
@@ -257,7 +257,7 @@ class Queue(object): traceback.print_exc()
except Exception:
pass
-
+
_sentinel = object()
#
@@ -274,7 +274,7 @@ class JoinableQueue(Queue): Queue.__init__(self, maxsize)
self._unfinished_tasks = Semaphore(0)
self._cond = Condition()
-
+
def __getstate__(self):
return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
@@ -285,7 +285,7 @@ class JoinableQueue(Queue): def put(self, item, block=True, timeout=None):
Queue.put(self, item, block, timeout)
self._unfinished_tasks.release()
-
+
def task_done(self):
self._cond.acquire()
try:
@@ -295,7 +295,7 @@ class JoinableQueue(Queue): self._cond.notify_all()
finally:
self._cond.release()
-
+
def join(self):
self._cond.acquire()
try:
diff --git a/Lib/multiprocessing/reduction.py b/Lib/multiprocessing/reduction.py index 0d6cf4f..17778ef 100644 --- a/Lib/multiprocessing/reduction.py +++ b/Lib/multiprocessing/reduction.py @@ -36,7 +36,7 @@ if not(sys.platform == 'win32' or hasattr(_multiprocessing, 'recvfd')): if sys.platform == 'win32':
import _subprocess
from ._multiprocessing import win32
-
+
def send_handle(conn, handle, destination_pid):
process_handle = win32.OpenProcess(
win32.PROCESS_ALL_ACCESS, False, destination_pid
@@ -46,14 +46,14 @@ if sys.platform == 'win32': conn.send(new_handle)
finally:
close(process_handle)
-
+
def recv_handle(conn):
return conn.recv()
else:
def send_handle(conn, handle, destination_pid):
_multiprocessing.sendfd(conn.fileno(), handle)
-
+
def recv_handle(conn):
return _multiprocessing.recvfd(conn.fileno())
@@ -93,7 +93,7 @@ def _get_listener(): def _serve():
from .util import is_exiting, sub_warning
-
+
while 1:
try:
conn = _listener.accept()
@@ -109,7 +109,7 @@ def _serve(): 'thread for sharing handles raised exception :\n' +
'-'*79 + '\n' + traceback.format_exc() + '-'*79
)
-
+
#
# Functions to be used for pickling/unpickling objects with handles
#
@@ -176,15 +176,15 @@ copy_reg.pickle(socket.socket, reduce_socket) #
if sys.platform == 'win32':
-
+
def reduce_pipe_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_pipe_connection, (rh, conn.readable, conn.writable)
-
+
def rebuild_pipe_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _multiprocessing.PipeConnection(
handle, readable=readable, writable=writable
)
-
+
copy_reg.pickle(_multiprocessing.PipeConnection, reduce_pipe_connection)
diff --git a/Lib/multiprocessing/sharedctypes.py b/Lib/multiprocessing/sharedctypes.py index 6877249..808d312 100644 --- a/Lib/multiprocessing/sharedctypes.py +++ b/Lib/multiprocessing/sharedctypes.py @@ -92,10 +92,10 @@ def copy(obj): new_obj = _new_value(type(obj))
ctypes.pointer(new_obj)[0] = obj
return new_obj
-
+
def synchronized(obj, lock=None):
assert not isinstance(obj, SynchronizedBase), 'object already synchronized'
-
+
if isinstance(obj, ctypes._SimpleCData):
return Synchronized(obj, lock)
elif isinstance(obj, ctypes.Array):
@@ -123,7 +123,7 @@ def reduce_ctype(obj): return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_)
else:
return rebuild_ctype, (type(obj), obj._wrapper, None)
-
+
def rebuild_ctype(type_, wrapper, length):
if length is not None:
type_ = type_ * length
@@ -170,7 +170,7 @@ class_cache = weakref.WeakKeyDictionary() #
class SynchronizedBase(object):
-
+
def __init__(self, obj, lock=None):
self._obj = obj
self._lock = lock or RLock()
@@ -180,55 +180,55 @@ class SynchronizedBase(object): def __reduce__(self):
assert_spawning(self)
return synchronized, (self._obj, self._lock)
-
+
def get_obj(self):
return self._obj
-
+
def get_lock(self):
return self._lock
-
+
def __repr__(self):
return '<%s wrapper for %s>' % (type(self).__name__, self._obj)
-
-
+
+
class Synchronized(SynchronizedBase):
value = make_property('value')
-
-
+
+
class SynchronizedArray(SynchronizedBase):
-
+
def __len__(self):
return len(self._obj)
-
+
def __getitem__(self, i):
self.acquire()
try:
return self._obj[i]
finally:
self.release()
-
+
def __setitem__(self, i, value):
self.acquire()
try:
self._obj[i] = value
finally:
self.release()
-
+
def __getslice__(self, start, stop):
self.acquire()
try:
return self._obj[start:stop]
finally:
self.release()
-
+
def __setslice__(self, start, stop, values):
self.acquire()
try:
self._obj[start:stop] = values
finally:
self.release()
-
-
+
+
class SynchronizedString(SynchronizedArray):
value = make_property('value')
raw = make_property('raw')
diff --git a/Lib/multiprocessing/synchronize.py b/Lib/multiprocessing/synchronize.py index d642032..6a7189a 100644 --- a/Lib/multiprocessing/synchronize.py +++ b/Lib/multiprocessing/synchronize.py @@ -38,7 +38,7 @@ class SemLock(object): sl = self._semlock = _multiprocessing.SemLock(kind, value, maxvalue)
debug('created semlock with handle %s' % sl.handle)
self._make_methods()
-
+
if sys.platform != 'win32':
def _after_fork(obj):
obj._semlock._after_fork()
@@ -129,7 +129,7 @@ class RLock(SemLock): def __init__(self):
SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1)
-
+
def __repr__(self):
try:
if self._semlock._is_mine():
@@ -210,17 +210,17 @@ class Condition(object): def notify(self):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)
-
+
# to take account of timeouts since last notify() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
-
+
if self._sleeping_count.acquire(False): # try grabbing a sleeper
self._wait_semaphore.release() # wake up one sleeper
self._woken_count.acquire() # wait for the sleeper to wake
-
+
# rezero _wait_semaphore in case a timeout just happened
self._wait_semaphore.acquire(False)
@@ -233,7 +233,7 @@ class Condition(object): while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
-
+
sleepers = 0
while self._sleeping_count.acquire(False):
self._wait_semaphore.release() # wake up one sleeper
@@ -266,7 +266,7 @@ class Event(object): return False
finally:
self._cond.release()
-
+
def set(self):
self._cond.acquire()
try:
diff --git a/Lib/multiprocessing/util.py b/Lib/multiprocessing/util.py index d1b190c..25ff8bd 100644 --- a/Lib/multiprocessing/util.py +++ b/Lib/multiprocessing/util.py @@ -83,7 +83,7 @@ def _check_logger_class(): import logging
if hasattr(logging, 'multiprocessing'):
return
-
+
logging._acquireLock()
try:
OldLoggerClass = logging.getLoggerClass()
diff --git a/Lib/test/test_multiprocessing.py b/Lib/test/test_multiprocessing.py index d75fd20..4d3527c 100644 --- a/Lib/test/test_multiprocessing.py +++ b/Lib/test/test_multiprocessing.py @@ -1,1791 +1,1791 @@ -#
-# Unit tests for the multiprocessing package
-#
-
-import unittest
-import threading
-import Queue
-import time
-import sys
-import os
-import gc
-import signal
-import array
-import copy
-import socket
-import random
-import logging
-
-import multiprocessing.dummy
-import multiprocessing.connection
-import multiprocessing.managers
-import multiprocessing.heap
-import multiprocessing.managers
-import multiprocessing.pool
-import _multiprocessing
-
-from multiprocessing import util
-
-#
-#
-#
-
-if sys.version_info >= (3, 0):
- def latin(s):
- return s.encode('latin')
-else:
- latin = str
-
-try:
- bytes
-except NameError:
- bytes = str
- def bytearray(seq):
- return array.array('c', seq)
-
-#
-# Constants
-#
-
-LOG_LEVEL = util.SUBWARNING
-#LOG_LEVEL = logging.WARNING
-
-DELTA = 0.1
-CHECK_TIMINGS = False # making true makes tests take a lot longer
- # and can sometimes cause some non-serious
- # failures because some calls block a bit
- # longer than expected
-if CHECK_TIMINGS:
- TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
-else:
- TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
-
-HAVE_GETVALUE = not getattr(_multiprocessing,
- 'HAVE_BROKEN_SEM_GETVALUE', False)
-
-#
-# Creates a wrapper for a function which records the time it takes to finish
-#
-
-class TimingWrapper(object):
-
- def __init__(self, func):
- self.func = func
- self.elapsed = None
-
- def __call__(self, *args, **kwds):
- t = time.time()
- try:
- return self.func(*args, **kwds)
- finally:
- self.elapsed = time.time() - t
-
-#
-# Base class for test cases
-#
-
-class BaseTestCase(object):
-
- ALLOWED_TYPES = ('processes', 'manager', 'threads')
-
- def assertTimingAlmostEqual(self, a, b):
- if CHECK_TIMINGS:
- self.assertAlmostEqual(a, b, 1)
-
- def assertReturnsIfImplemented(self, value, func, *args):
- try:
- res = func(*args)
- except NotImplementedError:
- pass
- else:
- return self.assertEqual(value, res)
-
-#
-# Return the value of a semaphore
-#
-
-def get_value(self):
- try:
- return self.get_value()
- except AttributeError:
- try:
- return self._Semaphore__value
- except AttributeError:
- try:
- return self._value
- except AttributeError:
- raise NotImplementedError
-
-#
-# Testcases
-#
-
-class _TestProcess(BaseTestCase):
-
- ALLOWED_TYPES = ('processes', 'threads')
-
- def test_current(self):
- if self.TYPE == 'threads':
- return
-
- current = self.current_process()
- authkey = current.get_authkey()
-
- self.assertTrue(current.is_alive())
- self.assertTrue(not current.is_daemon())
- self.assertTrue(isinstance(authkey, bytes))
- self.assertTrue(len(authkey) > 0)
- self.assertEqual(current.get_ident(), os.getpid())
- self.assertEqual(current.get_exitcode(), None)
-
- def _test(self, q, *args, **kwds):
- current = self.current_process()
- q.put(args)
- q.put(kwds)
- q.put(current.get_name())
- if self.TYPE != 'threads':
- q.put(bytes(current.get_authkey()))
- q.put(current.pid)
-
- def test_process(self):
- q = self.Queue(1)
- e = self.Event()
- args = (q, 1, 2)
- kwargs = {'hello':23, 'bye':2.54}
- name = 'SomeProcess'
- p = self.Process(
- target=self._test, args=args, kwargs=kwargs, name=name
- )
- p.set_daemon(True)
- current = self.current_process()
-
- if self.TYPE != 'threads':
- self.assertEquals(p.get_authkey(), current.get_authkey())
- self.assertEquals(p.is_alive(), False)
- self.assertEquals(p.is_daemon(), True)
- self.assertTrue(p not in self.active_children())
- self.assertTrue(type(self.active_children()) is list)
- self.assertEqual(p.get_exitcode(), None)
-
- p.start()
-
- self.assertEquals(p.get_exitcode(), None)
- self.assertEquals(p.is_alive(), True)
- self.assertTrue(p in self.active_children())
-
- self.assertEquals(q.get(), args[1:])
- self.assertEquals(q.get(), kwargs)
- self.assertEquals(q.get(), p.get_name())
- if self.TYPE != 'threads':
- self.assertEquals(q.get(), current.get_authkey())
- self.assertEquals(q.get(), p.pid)
-
- p.join()
-
- self.assertEquals(p.get_exitcode(), 0)
- self.assertEquals(p.is_alive(), False)
- self.assertTrue(p not in self.active_children())
-
- def _test_terminate(self):
- time.sleep(1000)
-
- def test_terminate(self):
- if self.TYPE == 'threads':
- return
-
- p = self.Process(target=self._test_terminate)
- p.set_daemon(True)
- p.start()
-
- self.assertEqual(p.is_alive(), True)
- self.assertTrue(p in self.active_children())
- self.assertEqual(p.get_exitcode(), None)
-
- p.terminate()
-
- join = TimingWrapper(p.join)
- self.assertEqual(join(), None)
- self.assertTimingAlmostEqual(join.elapsed, 0.0)
-
- self.assertEqual(p.is_alive(), False)
- self.assertTrue(p not in self.active_children())
-
- p.join()
-
- # XXX sometimes get p.get_exitcode() == 0 on Windows ...
- #self.assertEqual(p.get_exitcode(), -signal.SIGTERM)
-
- def test_cpu_count(self):
- try:
- cpus = multiprocessing.cpu_count()
- except NotImplementedError:
- cpus = 1
- self.assertTrue(type(cpus) is int)
- self.assertTrue(cpus >= 1)
-
- def test_active_children(self):
- self.assertEqual(type(self.active_children()), list)
-
- p = self.Process(target=time.sleep, args=(DELTA,))
- self.assertTrue(p not in self.active_children())
-
- p.start()
- self.assertTrue(p in self.active_children())
-
- p.join()
- self.assertTrue(p not in self.active_children())
-
- def _test_recursion(self, wconn, id):
- from multiprocessing import forking
- wconn.send(id)
- if len(id) < 2:
- for i in range(2):
- p = self.Process(
- target=self._test_recursion, args=(wconn, id+[i])
- )
- p.start()
- p.join()
-
- def test_recursion(self):
- rconn, wconn = self.Pipe(duplex=False)
- self._test_recursion(wconn, [])
-
- time.sleep(DELTA)
- result = []
- while rconn.poll():
- result.append(rconn.recv())
-
- expected = [
- [],
- [0],
- [0, 0],
- [0, 1],
- [1],
- [1, 0],
- [1, 1]
- ]
- self.assertEqual(result, expected)
-
-#
-#
-#
-
-class _UpperCaser(multiprocessing.Process):
-
- def __init__(self):
- multiprocessing.Process.__init__(self)
- self.child_conn, self.parent_conn = multiprocessing.Pipe()
-
- def run(self):
- self.parent_conn.close()
- for s in iter(self.child_conn.recv, None):
- self.child_conn.send(s.upper())
- self.child_conn.close()
-
- def submit(self, s):
- assert type(s) is str
- self.parent_conn.send(s)
- return self.parent_conn.recv()
-
- def stop(self):
- self.parent_conn.send(None)
- self.parent_conn.close()
- self.child_conn.close()
-
-class _TestSubclassingProcess(BaseTestCase):
-
- ALLOWED_TYPES = ('processes',)
-
- def test_subclassing(self):
- uppercaser = _UpperCaser()
- uppercaser.start()
- self.assertEqual(uppercaser.submit('hello'), 'HELLO')
- self.assertEqual(uppercaser.submit('world'), 'WORLD')
- uppercaser.stop()
- uppercaser.join()
-
-#
-#
-#
-
-def queue_empty(q):
- if hasattr(q, 'empty'):
- return q.empty()
- else:
- return q.qsize() == 0
-
-def queue_full(q, maxsize):
- if hasattr(q, 'full'):
- return q.full()
- else:
- return q.qsize() == maxsize
-
-
-class _TestQueue(BaseTestCase):
-
-
- def _test_put(self, queue, child_can_start, parent_can_continue):
- child_can_start.wait()
- for i in range(6):
- queue.get()
- parent_can_continue.set()
-
- def test_put(self):
- MAXSIZE = 6
- queue = self.Queue(maxsize=MAXSIZE)
- child_can_start = self.Event()
- parent_can_continue = self.Event()
-
- proc = self.Process(
- target=self._test_put,
- args=(queue, child_can_start, parent_can_continue)
- )
- proc.set_daemon(True)
- proc.start()
-
- self.assertEqual(queue_empty(queue), True)
- self.assertEqual(queue_full(queue, MAXSIZE), False)
-
- queue.put(1)
- queue.put(2, True)
- queue.put(3, True, None)
- queue.put(4, False)
- queue.put(5, False, None)
- queue.put_nowait(6)
-
- # the values may be in buffer but not yet in pipe so sleep a bit
- time.sleep(DELTA)
-
- self.assertEqual(queue_empty(queue), False)
- self.assertEqual(queue_full(queue, MAXSIZE), True)
-
- put = TimingWrapper(queue.put)
- put_nowait = TimingWrapper(queue.put_nowait)
-
- self.assertRaises(Queue.Full, put, 7, False)
- self.assertTimingAlmostEqual(put.elapsed, 0)
-
- self.assertRaises(Queue.Full, put, 7, False, None)
- self.assertTimingAlmostEqual(put.elapsed, 0)
-
- self.assertRaises(Queue.Full, put_nowait, 7)
- self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
-
- self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1)
- self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
-
- self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2)
- self.assertTimingAlmostEqual(put.elapsed, 0)
-
- self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3)
- self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
-
- child_can_start.set()
- parent_can_continue.wait()
-
- self.assertEqual(queue_empty(queue), True)
- self.assertEqual(queue_full(queue, MAXSIZE), False)
-
- proc.join()
-
- def _test_get(self, queue, child_can_start, parent_can_continue):
- child_can_start.wait()
- queue.put(1)
- queue.put(2)
- queue.put(3)
- queue.put(4)
- queue.put(5)
- parent_can_continue.set()
-
- def test_get(self):
- queue = self.Queue()
- child_can_start = self.Event()
- parent_can_continue = self.Event()
-
- proc = self.Process(
- target=self._test_get,
- args=(queue, child_can_start, parent_can_continue)
- )
- proc.set_daemon(True)
- proc.start()
-
- self.assertEqual(queue_empty(queue), True)
-
- child_can_start.set()
- parent_can_continue.wait()
-
- time.sleep(DELTA)
- self.assertEqual(queue_empty(queue), False)
-
- self.assertEqual(queue.get(), 1)
- self.assertEqual(queue.get(True, None), 2)
- self.assertEqual(queue.get(True), 3)
- self.assertEqual(queue.get(timeout=1), 4)
- self.assertEqual(queue.get_nowait(), 5)
-
- self.assertEqual(queue_empty(queue), True)
-
- get = TimingWrapper(queue.get)
- get_nowait = TimingWrapper(queue.get_nowait)
-
- self.assertRaises(Queue.Empty, get, False)
- self.assertTimingAlmostEqual(get.elapsed, 0)
-
- self.assertRaises(Queue.Empty, get, False, None)
- self.assertTimingAlmostEqual(get.elapsed, 0)
-
- self.assertRaises(Queue.Empty, get_nowait)
- self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
-
- self.assertRaises(Queue.Empty, get, True, TIMEOUT1)
- self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
-
- self.assertRaises(Queue.Empty, get, False, TIMEOUT2)
- self.assertTimingAlmostEqual(get.elapsed, 0)
-
- self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3)
- self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
-
- proc.join()
-
- def _test_fork(self, queue):
- for i in range(10, 20):
- queue.put(i)
- # note that at this point the items may only be buffered, so the
- # process cannot shutdown until the feeder thread has finished
- # pushing items onto the pipe.
-
- def test_fork(self):
- # Old versions of Queue would fail to create a new feeder
- # thread for a forked process if the original process had its
- # own feeder thread. This test checks that this no longer
- # happens.
-
- queue = self.Queue()
-
- # put items on queue so that main process starts a feeder thread
- for i in range(10):
- queue.put(i)
-
- # wait to make sure thread starts before we fork a new process
- time.sleep(DELTA)
-
- # fork process
- p = self.Process(target=self._test_fork, args=(queue,))
- p.start()
-
- # check that all expected items are in the queue
- for i in range(20):
- self.assertEqual(queue.get(), i)
- self.assertRaises(Queue.Empty, queue.get, False)
-
- p.join()
-
- def test_qsize(self):
- q = self.Queue()
- try:
- self.assertEqual(q.qsize(), 0)
- except NotImplementedError:
- return
- q.put(1)
- self.assertEqual(q.qsize(), 1)
- q.put(5)
- self.assertEqual(q.qsize(), 2)
- q.get()
- self.assertEqual(q.qsize(), 1)
- q.get()
- self.assertEqual(q.qsize(), 0)
-
- def _test_task_done(self, q):
- for obj in iter(q.get, None):
- time.sleep(DELTA)
- q.task_done()
-
- def test_task_done(self):
- queue = self.JoinableQueue()
-
- if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'):
- return
-
- workers = [self.Process(target=self._test_task_done, args=(queue,))
- for i in xrange(4)]
-
- for p in workers:
- p.start()
-
- for i in xrange(10):
- queue.put(i)
-
- queue.join()
-
- for p in workers:
- queue.put(None)
-
- for p in workers:
- p.join()
-
-#
-#
-#
-
-class _TestLock(BaseTestCase):
-
- def test_lock(self):
- lock = self.Lock()
- self.assertEqual(lock.acquire(), True)
- self.assertEqual(lock.acquire(False), False)
- self.assertEqual(lock.release(), None)
- self.assertRaises((ValueError, threading.ThreadError), lock.release)
-
- def test_rlock(self):
- lock = self.RLock()
- self.assertEqual(lock.acquire(), True)
- self.assertEqual(lock.acquire(), True)
- self.assertEqual(lock.acquire(), True)
- self.assertEqual(lock.release(), None)
- self.assertEqual(lock.release(), None)
- self.assertEqual(lock.release(), None)
- self.assertRaises((AssertionError, RuntimeError), lock.release)
-
-
-class _TestSemaphore(BaseTestCase):
-
- def _test_semaphore(self, sem):
- self.assertReturnsIfImplemented(2, get_value, sem)
- self.assertEqual(sem.acquire(), True)
- self.assertReturnsIfImplemented(1, get_value, sem)
- self.assertEqual(sem.acquire(), True)
- self.assertReturnsIfImplemented(0, get_value, sem)
- self.assertEqual(sem.acquire(False), False)
- self.assertReturnsIfImplemented(0, get_value, sem)
- self.assertEqual(sem.release(), None)
- self.assertReturnsIfImplemented(1, get_value, sem)
- self.assertEqual(sem.release(), None)
- self.assertReturnsIfImplemented(2, get_value, sem)
-
- def test_semaphore(self):
- sem = self.Semaphore(2)
- self._test_semaphore(sem)
- self.assertEqual(sem.release(), None)
- self.assertReturnsIfImplemented(3, get_value, sem)
- self.assertEqual(sem.release(), None)
- self.assertReturnsIfImplemented(4, get_value, sem)
-
- def test_bounded_semaphore(self):
- sem = self.BoundedSemaphore(2)
- self._test_semaphore(sem)
- # Currently fails on OS/X
- #if HAVE_GETVALUE:
- # self.assertRaises(ValueError, sem.release)
- # self.assertReturnsIfImplemented(2, get_value, sem)
-
- def test_timeout(self):
- if self.TYPE != 'processes':
- return
-
- sem = self.Semaphore(0)
- acquire = TimingWrapper(sem.acquire)
-
- self.assertEqual(acquire(False), False)
- self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
-
- self.assertEqual(acquire(False, None), False)
- self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
-
- self.assertEqual(acquire(False, TIMEOUT1), False)
- self.assertTimingAlmostEqual(acquire.elapsed, 0)
-
- self.assertEqual(acquire(True, TIMEOUT2), False)
- self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
-
- self.assertEqual(acquire(timeout=TIMEOUT3), False)
- self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
-
-
-class _TestCondition(BaseTestCase):
-
- def f(self, cond, sleeping, woken, timeout=None):
- cond.acquire()
- sleeping.release()
- cond.wait(timeout)
- woken.release()
- cond.release()
-
- def check_invariant(self, cond):
- # this is only supposed to succeed when there are no sleepers
- if self.TYPE == 'processes':
- try:
- sleepers = (cond._sleeping_count.get_value() -
- cond._woken_count.get_value())
- self.assertEqual(sleepers, 0)
- self.assertEqual(cond._wait_semaphore.get_value(), 0)
- except NotImplementedError:
- pass
-
- def test_notify(self):
- cond = self.Condition()
- sleeping = self.Semaphore(0)
- woken = self.Semaphore(0)
-
- p = self.Process(target=self.f, args=(cond, sleeping, woken))
- p.set_daemon(True)
- p.start()
-
- p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
- p.set_daemon(True)
- p.start()
-
- # wait for both children to start sleeping
- sleeping.acquire()
- sleeping.acquire()
-
- # check no process/thread has woken up
- time.sleep(DELTA)
- self.assertReturnsIfImplemented(0, get_value, woken)
-
- # wake up one process/thread
- cond.acquire()
- cond.notify()
- cond.release()
-
- # check one process/thread has woken up
- time.sleep(DELTA)
- self.assertReturnsIfImplemented(1, get_value, woken)
-
- # wake up another
- cond.acquire()
- cond.notify()
- cond.release()
-
- # check other has woken up
- time.sleep(DELTA)
- self.assertReturnsIfImplemented(2, get_value, woken)
-
- # check state is not mucked up
- self.check_invariant(cond)
- p.join()
-
- def test_notify_all(self):
- cond = self.Condition()
- sleeping = self.Semaphore(0)
- woken = self.Semaphore(0)
-
- # start some threads/processes which will timeout
- for i in range(3):
- p = self.Process(target=self.f,
- args=(cond, sleeping, woken, TIMEOUT1))
- p.set_daemon(True)
- p.start()
-
- t = threading.Thread(target=self.f,
- args=(cond, sleeping, woken, TIMEOUT1))
- t.set_daemon(True)
- t.start()
-
- # wait for them all to sleep
- for i in xrange(6):
- sleeping.acquire()
-
- # check they have all timed out
- for i in xrange(6):
- woken.acquire()
- self.assertReturnsIfImplemented(0, get_value, woken)
-
- # check state is not mucked up
- self.check_invariant(cond)
-
- # start some more threads/processes
- for i in range(3):
- p = self.Process(target=self.f, args=(cond, sleeping, woken))
- p.set_daemon(True)
- p.start()
-
- t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
- t.set_daemon(True)
- t.start()
-
- # wait for them to all sleep
- for i in xrange(6):
- sleeping.acquire()
-
- # check no process/thread has woken up
- time.sleep(DELTA)
- self.assertReturnsIfImplemented(0, get_value, woken)
-
- # wake them all up
- cond.acquire()
- cond.notify_all()
- cond.release()
-
- # check they have all woken
- time.sleep(DELTA)
- self.assertReturnsIfImplemented(6, get_value, woken)
-
- # check state is not mucked up
- self.check_invariant(cond)
-
- def test_timeout(self):
- cond = self.Condition()
- wait = TimingWrapper(cond.wait)
- cond.acquire()
- res = wait(TIMEOUT1)
- cond.release()
- self.assertEqual(res, None)
- self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
-
-
-class _TestEvent(BaseTestCase):
-
- def _test_event(self, event):
- time.sleep(TIMEOUT2)
- event.set()
-
- def test_event(self):
- event = self.Event()
- wait = TimingWrapper(event.wait)
-
- # Removed temporaily, due to API shear, this does not
- # work with threading._Event objects. is_set == isSet
- #self.assertEqual(event.is_set(), False)
-
- self.assertEqual(wait(0.0), None)
- self.assertTimingAlmostEqual(wait.elapsed, 0.0)
- self.assertEqual(wait(TIMEOUT1), None)
- self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
-
- event.set()
-
- # See note above on the API differences
- # self.assertEqual(event.is_set(), True)
- self.assertEqual(wait(), None)
- self.assertTimingAlmostEqual(wait.elapsed, 0.0)
- self.assertEqual(wait(TIMEOUT1), None)
- self.assertTimingAlmostEqual(wait.elapsed, 0.0)
- # self.assertEqual(event.is_set(), True)
-
- event.clear()
-
- #self.assertEqual(event.is_set(), False)
-
- self.Process(target=self._test_event, args=(event,)).start()
- self.assertEqual(wait(), None)
-
-#
-#
-#
-
-class _TestValue(BaseTestCase):
-
- codes_values = [
- ('i', 4343, 24234),
- ('d', 3.625, -4.25),
- ('h', -232, 234),
- ('c', latin('x'), latin('y'))
- ]
-
- def _test(self, values):
- for sv, cv in zip(values, self.codes_values):
- sv.value = cv[2]
-
-
- def test_value(self, raw=False):
- if self.TYPE != 'processes':
- return
-
- if raw:
- values = [self.RawValue(code, value)
- for code, value, _ in self.codes_values]
- else:
- values = [self.Value(code, value)
- for code, value, _ in self.codes_values]
-
- for sv, cv in zip(values, self.codes_values):
- self.assertEqual(sv.value, cv[1])
-
- proc = self.Process(target=self._test, args=(values,))
- proc.start()
- proc.join()
-
- for sv, cv in zip(values, self.codes_values):
- self.assertEqual(sv.value, cv[2])
-
- def test_rawvalue(self):
- self.test_value(raw=True)
-
- def test_getobj_getlock(self):
- if self.TYPE != 'processes':
- return
-
- val1 = self.Value('i', 5)
- lock1 = val1.get_lock()
- obj1 = val1.get_obj()
-
- val2 = self.Value('i', 5, lock=None)
- lock2 = val2.get_lock()
- obj2 = val2.get_obj()
-
- lock = self.Lock()
- val3 = self.Value('i', 5, lock=lock)
- lock3 = val3.get_lock()
- obj3 = val3.get_obj()
- self.assertEqual(lock, lock3)
-
- arr4 = self.RawValue('i', 5)
- self.assertFalse(hasattr(arr4, 'get_lock'))
- self.assertFalse(hasattr(arr4, 'get_obj'))
-
-
-class _TestArray(BaseTestCase):
-
- def f(self, seq):
- for i in range(1, len(seq)):
- seq[i] += seq[i-1]
-
- def test_array(self, raw=False):
- if self.TYPE != 'processes':
- return
-
- seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
- if raw:
- arr = self.RawArray('i', seq)
- else:
- arr = self.Array('i', seq)
-
- self.assertEqual(len(arr), len(seq))
- self.assertEqual(arr[3], seq[3])
- self.assertEqual(list(arr[2:7]), list(seq[2:7]))
-
- arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
-
- self.assertEqual(list(arr[:]), seq)
-
- self.f(seq)
-
- p = self.Process(target=self.f, args=(arr,))
- p.start()
- p.join()
-
- self.assertEqual(list(arr[:]), seq)
-
- def test_rawarray(self):
- self.test_array(raw=True)
-
- def test_getobj_getlock_obj(self):
- if self.TYPE != 'processes':
- return
-
- arr1 = self.Array('i', range(10))
- lock1 = arr1.get_lock()
- obj1 = arr1.get_obj()
-
- arr2 = self.Array('i', range(10), lock=None)
- lock2 = arr2.get_lock()
- obj2 = arr2.get_obj()
-
- lock = self.Lock()
- arr3 = self.Array('i', range(10), lock=lock)
- lock3 = arr3.get_lock()
- obj3 = arr3.get_obj()
- self.assertEqual(lock, lock3)
-
- arr4 = self.RawArray('i', range(10))
- self.assertFalse(hasattr(arr4, 'get_lock'))
- self.assertFalse(hasattr(arr4, 'get_obj'))
-
-#
-#
-#
-
-class _TestContainers(BaseTestCase):
-
- ALLOWED_TYPES = ('manager',)
-
- def test_list(self):
- a = self.list(range(10))
- self.assertEqual(a[:], range(10))
-
- b = self.list()
- self.assertEqual(b[:], [])
-
- b.extend(range(5))
- self.assertEqual(b[:], range(5))
-
- self.assertEqual(b[2], 2)
- self.assertEqual(b[2:10], [2,3,4])
-
- b *= 2
- self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
-
- self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
-
- self.assertEqual(a[:], range(10))
-
- d = [a, b]
- e = self.list(d)
- self.assertEqual(
- e[:],
- [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
- )
-
- f = self.list([a])
- a.append('hello')
- self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
-
- def test_dict(self):
- d = self.dict()
- indices = range(65, 70)
- for i in indices:
- d[i] = chr(i)
- self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
- self.assertEqual(sorted(d.keys()), indices)
- self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
- self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
-
- def test_namespace(self):
- n = self.Namespace()
- n.name = 'Bob'
- n.job = 'Builder'
- n._hidden = 'hidden'
- self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
- del n.job
- self.assertEqual(str(n), "Namespace(name='Bob')")
- self.assertTrue(hasattr(n, 'name'))
- self.assertTrue(not hasattr(n, 'job'))
-
-#
-#
-#
-
-def sqr(x, wait=0.0):
- time.sleep(wait)
- return x*x
-
-class _TestPool(BaseTestCase):
-
- def test_apply(self):
- papply = self.pool.apply
- self.assertEqual(papply(sqr, (5,)), sqr(5))
- self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
-
- def test_map(self):
- pmap = self.pool.map
- self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10)))
- self.assertEqual(pmap(sqr, range(100), chunksize=20),
- map(sqr, range(100)))
-
- def test_async(self):
- res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
- get = TimingWrapper(res.get)
- self.assertEqual(get(), 49)
- self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
-
- def test_async_timeout(self):
- res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2))
- get = TimingWrapper(res.get)
- self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
- self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
-
- def test_imap(self):
- it = self.pool.imap(sqr, range(10))
- self.assertEqual(list(it), map(sqr, range(10)))
-
- it = self.pool.imap(sqr, range(10))
- for i in range(10):
- self.assertEqual(it.next(), i*i)
- self.assertRaises(StopIteration, it.next)
-
- it = self.pool.imap(sqr, range(1000), chunksize=100)
- for i in range(1000):
- self.assertEqual(it.next(), i*i)
- self.assertRaises(StopIteration, it.next)
-
- def test_imap_unordered(self):
- it = self.pool.imap_unordered(sqr, range(1000))
- self.assertEqual(sorted(it), map(sqr, range(1000)))
-
- it = self.pool.imap_unordered(sqr, range(1000), chunksize=53)
- self.assertEqual(sorted(it), map(sqr, range(1000)))
-
- def test_make_pool(self):
- p = multiprocessing.Pool(3)
- self.assertEqual(3, len(p._pool))
- p.close()
- p.join()
-
- def test_terminate(self):
- if self.TYPE == 'manager':
- # On Unix a forked process increfs each shared object to
- # which its parent process held a reference. If the
- # forked process gets terminated then there is likely to
- # be a reference leak. So to prevent
- # _TestZZZNumberOfObjects from failing we skip this test
- # when using a manager.
- return
-
- result = self.pool.map_async(
- time.sleep, [0.1 for i in range(10000)], chunksize=1
- )
- self.pool.terminate()
- join = TimingWrapper(self.pool.join)
- join()
- self.assertTrue(join.elapsed < 0.2)
-
-#
-# Test that manager has expected number of shared objects left
-#
-
-class _TestZZZNumberOfObjects(BaseTestCase):
- # Because test cases are sorted alphabetically, this one will get
- # run after all the other tests for the manager. It tests that
- # there have been no "reference leaks" for the manager's shared
- # objects. Note the comment in _TestPool.test_terminate().
- ALLOWED_TYPES = ('manager',)
-
- def test_number_of_objects(self):
- EXPECTED_NUMBER = 1 # the pool object is still alive
- multiprocessing.active_children() # discard dead process objs
- gc.collect() # do garbage collection
- refs = self.manager._number_of_objects()
- if refs != EXPECTED_NUMBER:
- print self.manager._debugInfo()
-
- self.assertEqual(refs, EXPECTED_NUMBER)
-
-#
-# Test of creating a customized manager class
-#
-
-from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
-
-class FooBar(object):
- def f(self):
- return 'f()'
- def g(self):
- raise ValueError
- def _h(self):
- return '_h()'
-
-def baz():
- for i in xrange(10):
- yield i*i
-
-class IteratorProxy(BaseProxy):
- _exposed_ = ('next', '__next__')
- def __iter__(self):
- return self
- def next(self):
- return self._callmethod('next')
- def __next__(self):
- return self._callmethod('__next__')
-
-class MyManager(BaseManager):
- pass
-
-MyManager.register('Foo', callable=FooBar)
-MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
-MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
-
-
-class _TestMyManager(BaseTestCase):
-
- ALLOWED_TYPES = ('manager',)
-
- def test_mymanager(self):
- manager = MyManager()
- manager.start()
-
- foo = manager.Foo()
- bar = manager.Bar()
- baz = manager.baz()
-
- foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
- bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
-
- self.assertEqual(foo_methods, ['f', 'g'])
- self.assertEqual(bar_methods, ['f', '_h'])
-
- self.assertEqual(foo.f(), 'f()')
- self.assertRaises(ValueError, foo.g)
- self.assertEqual(foo._callmethod('f'), 'f()')
- self.assertRaises(RemoteError, foo._callmethod, '_h')
-
- self.assertEqual(bar.f(), 'f()')
- self.assertEqual(bar._h(), '_h()')
- self.assertEqual(bar._callmethod('f'), 'f()')
- self.assertEqual(bar._callmethod('_h'), '_h()')
-
- self.assertEqual(list(baz), [i*i for i in range(10)])
-
- manager.shutdown()
-
-#
-# Test of connecting to a remote server and using xmlrpclib for serialization
-#
-
-_queue = Queue.Queue()
-def get_queue():
- return _queue
-
-class QueueManager(BaseManager):
- '''manager class used by server process'''
-QueueManager.register('get_queue', callable=get_queue)
-
-class QueueManager2(BaseManager):
- '''manager class which specifies the same interface as QueueManager'''
-QueueManager2.register('get_queue')
-
-
-SERIALIZER = 'xmlrpclib'
-
-class _TestRemoteManager(BaseTestCase):
-
- ALLOWED_TYPES = ('manager',)
-
- def _putter(self, address, authkey):
- manager = QueueManager2(
- address=address, authkey=authkey, serializer=SERIALIZER
- )
- manager.connect()
- queue = manager.get_queue()
- queue.put(('hello world', None, True, 2.25))
-
- def test_remote(self):
- authkey = os.urandom(32)
-
- manager = QueueManager(
- address=('localhost', 0), authkey=authkey, serializer=SERIALIZER
- )
- manager.start()
-
- p = self.Process(target=self._putter, args=(manager.address, authkey))
- p.start()
-
- manager2 = QueueManager2(
- address=manager.address, authkey=authkey, serializer=SERIALIZER
- )
- manager2.connect()
- queue = manager2.get_queue()
-
- # Note that xmlrpclib will deserialize object as a list not a tuple
- self.assertEqual(queue.get(), ['hello world', None, True, 2.25])
-
- # Because we are using xmlrpclib for serialization instead of
- # pickle this will cause a serialization error.
- self.assertRaises(Exception, queue.put, time.sleep)
-
- # Make queue finalizer run before the server is stopped
- del queue
- manager.shutdown()
-
-#
-#
-#
-
-SENTINEL = latin('')
-
-class _TestConnection(BaseTestCase):
-
- ALLOWED_TYPES = ('processes', 'threads')
-
- def _echo(self, conn):
- for msg in iter(conn.recv_bytes, SENTINEL):
- conn.send_bytes(msg)
- conn.close()
-
- def test_connection(self):
- conn, child_conn = self.Pipe()
-
- p = self.Process(target=self._echo, args=(child_conn,))
- p.set_daemon(True)
- p.start()
-
- seq = [1, 2.25, None]
- msg = latin('hello world')
- longmsg = msg * 10
- arr = array.array('i', range(4))
-
- if self.TYPE == 'processes':
- self.assertEqual(type(conn.fileno()), int)
-
- self.assertEqual(conn.send(seq), None)
- self.assertEqual(conn.recv(), seq)
-
- self.assertEqual(conn.send_bytes(msg), None)
- self.assertEqual(conn.recv_bytes(), msg)
-
- if self.TYPE == 'processes':
- buffer = array.array('i', [0]*10)
- expected = list(arr) + [0] * (10 - len(arr))
- self.assertEqual(conn.send_bytes(arr), None)
- self.assertEqual(conn.recv_bytes_into(buffer),
- len(arr) * buffer.itemsize)
- self.assertEqual(list(buffer), expected)
-
- buffer = array.array('i', [0]*10)
- expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
- self.assertEqual(conn.send_bytes(arr), None)
- self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
- len(arr) * buffer.itemsize)
- self.assertEqual(list(buffer), expected)
-
- buffer = bytearray(latin(' ' * 40))
- self.assertEqual(conn.send_bytes(longmsg), None)
- try:
- res = conn.recv_bytes_into(buffer)
- except multiprocessing.BufferTooShort, e:
- self.assertEqual(e.args, (longmsg,))
- else:
- self.fail('expected BufferTooShort, got %s' % res)
-
- poll = TimingWrapper(conn.poll)
-
- self.assertEqual(poll(), False)
- self.assertTimingAlmostEqual(poll.elapsed, 0)
-
- self.assertEqual(poll(TIMEOUT1), False)
- self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
-
- conn.send(None)
-
- self.assertEqual(poll(TIMEOUT1), True)
- self.assertTimingAlmostEqual(poll.elapsed, 0)
-
- self.assertEqual(conn.recv(), None)
-
- really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
- conn.send_bytes(really_big_msg)
- self.assertEqual(conn.recv_bytes(), really_big_msg)
-
- conn.send_bytes(SENTINEL) # tell child to quit
- child_conn.close()
-
- if self.TYPE == 'processes':
- self.assertEqual(conn.readable, True)
- self.assertEqual(conn.writable, True)
- self.assertRaises(EOFError, conn.recv)
- self.assertRaises(EOFError, conn.recv_bytes)
-
- p.join()
-
- def test_duplex_false(self):
- reader, writer = self.Pipe(duplex=False)
- self.assertEqual(writer.send(1), None)
- self.assertEqual(reader.recv(), 1)
- if self.TYPE == 'processes':
- self.assertEqual(reader.readable, True)
- self.assertEqual(reader.writable, False)
- self.assertEqual(writer.readable, False)
- self.assertEqual(writer.writable, True)
- self.assertRaises(IOError, reader.send, 2)
- self.assertRaises(IOError, writer.recv)
- self.assertRaises(IOError, writer.poll)
-
- def test_spawn_close(self):
- # We test that a pipe connection can be closed by parent
- # process immediately after child is spawned. On Windows this
- # would have sometimes failed on old versions because
- # child_conn would be closed before the child got a chance to
- # duplicate it.
- conn, child_conn = self.Pipe()
-
- p = self.Process(target=self._echo, args=(child_conn,))
- p.start()
- child_conn.close() # this might complete before child initializes
-
- msg = latin('hello')
- conn.send_bytes(msg)
- self.assertEqual(conn.recv_bytes(), msg)
-
- conn.send_bytes(SENTINEL)
- conn.close()
- p.join()
-
- def test_sendbytes(self):
- if self.TYPE != 'processes':
- return
-
- msg = latin('abcdefghijklmnopqrstuvwxyz')
- a, b = self.Pipe()
-
- a.send_bytes(msg)
- self.assertEqual(b.recv_bytes(), msg)
-
- a.send_bytes(msg, 5)
- self.assertEqual(b.recv_bytes(), msg[5:])
-
- a.send_bytes(msg, 7, 8)
- self.assertEqual(b.recv_bytes(), msg[7:7+8])
-
- a.send_bytes(msg, 26)
- self.assertEqual(b.recv_bytes(), latin(''))
-
- a.send_bytes(msg, 26, 0)
- self.assertEqual(b.recv_bytes(), latin(''))
-
- self.assertRaises(ValueError, a.send_bytes, msg, 27)
-
- self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
-
- self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
-
- self.assertRaises(ValueError, a.send_bytes, msg, -1)
-
- self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
-
-
-class _TestListenerClient(BaseTestCase):
-
- ALLOWED_TYPES = ('processes', 'threads')
-
- def _test(self, address):
- conn = self.connection.Client(address)
- conn.send('hello')
- conn.close()
-
- def test_listener_client(self):
- for family in self.connection.families:
- l = self.connection.Listener(family=family)
- p = self.Process(target=self._test, args=(l.address,))
- p.set_daemon(True)
- p.start()
- conn = l.accept()
- self.assertEqual(conn.recv(), 'hello')
- p.join()
- l.close()
-
-#
-# Test of sending connection and socket objects between processes
-#
-
-class _TestPicklingConnections(BaseTestCase):
-
- ALLOWED_TYPES = ('processes',)
-
- def _listener(self, conn, families):
- for fam in families:
- l = self.connection.Listener(family=fam)
- conn.send(l.address)
- new_conn = l.accept()
- conn.send(new_conn)
-
- if self.TYPE == 'processes':
- l = socket.socket()
- l.bind(('localhost', 0))
- conn.send(l.getsockname())
- l.listen(1)
- new_conn, addr = l.accept()
- conn.send(new_conn)
-
- conn.recv()
-
- def _remote(self, conn):
- for (address, msg) in iter(conn.recv, None):
- client = self.connection.Client(address)
- client.send(msg.upper())
- client.close()
-
- if self.TYPE == 'processes':
- address, msg = conn.recv()
- client = socket.socket()
- client.connect(address)
- client.sendall(msg.upper())
- client.close()
-
- conn.close()
-
- def test_pickling(self):
- try:
- multiprocessing.allow_connection_pickling()
- except ImportError:
- return
-
- families = self.connection.families
-
- lconn, lconn0 = self.Pipe()
- lp = self.Process(target=self._listener, args=(lconn0, families))
- lp.start()
- lconn0.close()
-
- rconn, rconn0 = self.Pipe()
- rp = self.Process(target=self._remote, args=(rconn0,))
- rp.start()
- rconn0.close()
-
- for fam in families:
- msg = ('This connection uses family %s' % fam).encode('ascii')
- address = lconn.recv()
- rconn.send((address, msg))
- new_conn = lconn.recv()
- self.assertEqual(new_conn.recv(), msg.upper())
-
- rconn.send(None)
-
- if self.TYPE == 'processes':
- msg = latin('This connection uses a normal socket')
- address = lconn.recv()
- rconn.send((address, msg))
- if hasattr(socket, 'fromfd'):
- new_conn = lconn.recv()
- self.assertEqual(new_conn.recv(100), msg.upper())
- else:
- # XXX On Windows with Py2.6 need to backport fromfd()
- discard = lconn.recv_bytes()
-
- lconn.send(None)
-
- rconn.close()
- lconn.close()
-
- lp.join()
- rp.join()
-
-#
-#
-#
-
-class _TestHeap(BaseTestCase):
-
- ALLOWED_TYPES = ('processes',)
-
- def test_heap(self):
- iterations = 5000
- maxblocks = 50
- blocks = []
-
- # create and destroy lots of blocks of different sizes
- for i in xrange(iterations):
- size = int(random.lognormvariate(0, 1) * 1000)
- b = multiprocessing.heap.BufferWrapper(size)
- blocks.append(b)
- if len(blocks) > maxblocks:
- i = random.randrange(maxblocks)
- del blocks[i]
-
- # get the heap object
- heap = multiprocessing.heap.BufferWrapper._heap
-
- # verify the state of the heap
- all = []
- occupied = 0
- for L in heap._len_to_seq.values():
- for arena, start, stop in L:
- all.append((heap._arenas.index(arena), start, stop,
- stop-start, 'free'))
- for arena, start, stop in heap._allocated_blocks:
- all.append((heap._arenas.index(arena), start, stop,
- stop-start, 'occupied'))
- occupied += (stop-start)
-
- all.sort()
-
- for i in range(len(all)-1):
- (arena, start, stop) = all[i][:3]
- (narena, nstart, nstop) = all[i+1][:3]
- self.assertTrue((arena != narena and nstart == 0) or
- (stop == nstart))
-
-#
-#
-#
-
-try:
- from ctypes import Structure, Value, copy, c_int, c_double
-except ImportError:
- Structure = object
- c_int = c_double = None
-
-class _Foo(Structure):
- _fields_ = [
- ('x', c_int),
- ('y', c_double)
- ]
-
-class _TestSharedCTypes(BaseTestCase):
-
- ALLOWED_TYPES = ('processes',)
-
- def _double(self, x, y, foo, arr, string):
- x.value *= 2
- y.value *= 2
- foo.x *= 2
- foo.y *= 2
- string.value *= 2
- for i in range(len(arr)):
- arr[i] *= 2
-
- def test_sharedctypes(self, lock=False):
- if c_int is None:
- return
-
- x = Value('i', 7, lock=lock)
- y = Value(ctypes.c_double, 1.0/3.0, lock=lock)
- foo = Value(_Foo, 3, 2, lock=lock)
- arr = Array('d', range(10), lock=lock)
- string = Array('c', 20, lock=lock)
- string.value = 'hello'
-
- p = self.Process(target=self._double, args=(x, y, foo, arr, string))
- p.start()
- p.join()
-
- self.assertEqual(x.value, 14)
- self.assertAlmostEqual(y.value, 2.0/3.0)
- self.assertEqual(foo.x, 6)
- self.assertAlmostEqual(foo.y, 4.0)
- for i in range(10):
- self.assertAlmostEqual(arr[i], i*2)
- self.assertEqual(string.value, latin('hellohello'))
-
- def test_synchronize(self):
- self.test_sharedctypes(lock=True)
-
- def test_copy(self):
- if c_int is None:
- return
-
- foo = _Foo(2, 5.0)
- bar = copy(foo)
- foo.x = 0
- foo.y = 0
- self.assertEqual(bar.x, 2)
- self.assertAlmostEqual(bar.y, 5.0)
-
-#
-#
-#
-
-class _TestFinalize(BaseTestCase):
-
- ALLOWED_TYPES = ('processes',)
-
- def _test_finalize(self, conn):
- class Foo(object):
- pass
-
- a = Foo()
- util.Finalize(a, conn.send, args=('a',))
- del a # triggers callback for a
-
- b = Foo()
- close_b = util.Finalize(b, conn.send, args=('b',))
- close_b() # triggers callback for b
- close_b() # does nothing because callback has already been called
- del b # does nothing because callback has already been called
-
- c = Foo()
- util.Finalize(c, conn.send, args=('c',))
-
- d10 = Foo()
- util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
-
- d01 = Foo()
- util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
- d02 = Foo()
- util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
- d03 = Foo()
- util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
-
- util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
-
- util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
-
- # call mutliprocessing's cleanup function then exit process without
- # garbage collecting locals
- util._exit_function()
- conn.close()
- os._exit(0)
-
- def test_finalize(self):
- conn, child_conn = self.Pipe()
-
- p = self.Process(target=self._test_finalize, args=(child_conn,))
- p.start()
- p.join()
-
- result = [obj for obj in iter(conn.recv, 'STOP')]
- self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
-
-#
-# Test that from ... import * works for each module
-#
-
-class _TestImportStar(BaseTestCase):
-
- ALLOWED_TYPES = ('processes',)
-
- def test_import(self):
- modules = (
- 'multiprocessing', 'multiprocessing.connection',
- 'multiprocessing.heap', 'multiprocessing.managers',
- 'multiprocessing.pool', 'multiprocessing.process',
- 'multiprocessing.reduction', 'multiprocessing.sharedctypes',
- 'multiprocessing.synchronize', 'multiprocessing.util'
- )
-
- for name in modules:
- __import__(name)
- mod = sys.modules[name]
-
- for attr in getattr(mod, '__all__', ()):
- self.assertTrue(
- hasattr(mod, attr),
- '%r does not have attribute %r' % (mod, attr)
- )
-
-#
-# Quick test that logging works -- does not test logging output
-#
-
-class _TestLogging(BaseTestCase):
-
- ALLOWED_TYPES = ('processes',)
-
- def test_enable_logging(self):
- logger = multiprocessing.get_logger()
- logger.setLevel(util.SUBWARNING)
- self.assertTrue(logger is not None)
- logger.debug('this will not be printed')
- logger.info('nor will this')
- logger.setLevel(LOG_LEVEL)
-
- def _test_level(self, conn):
- logger = multiprocessing.get_logger()
- conn.send(logger.getEffectiveLevel())
-
- def test_level(self):
- LEVEL1 = 32
- LEVEL2 = 37
-
- logger = multiprocessing.get_logger()
- root_logger = logging.getLogger()
- root_level = root_logger.level
-
- reader, writer = multiprocessing.Pipe(duplex=False)
-
- logger.setLevel(LEVEL1)
- self.Process(target=self._test_level, args=(writer,)).start()
- self.assertEqual(LEVEL1, reader.recv())
-
- logger.setLevel(logging.NOTSET)
- root_logger.setLevel(LEVEL2)
- self.Process(target=self._test_level, args=(writer,)).start()
- self.assertEqual(LEVEL2, reader.recv())
-
- root_logger.setLevel(root_level)
- logger.setLevel(level=LOG_LEVEL)
-
-#
-# Functions used to create test cases from the base ones in this module
-#
-
-def get_attributes(Source, names):
- d = {}
- for name in names:
- obj = getattr(Source, name)
- if type(obj) == type(get_attributes):
- obj = staticmethod(obj)
- d[name] = obj
- return d
-
-def create_test_cases(Mixin, type):
- result = {}
- glob = globals()
- Type = type[0].upper() + type[1:]
-
- for name in glob.keys():
- if name.startswith('_Test'):
- base = glob[name]
- if type in base.ALLOWED_TYPES:
- newname = 'With' + Type + name[1:]
- class Temp(base, unittest.TestCase, Mixin):
- pass
- result[newname] = Temp
- Temp.__name__ = newname
- Temp.__module__ = Mixin.__module__
- return result
-
-#
-# Create test cases
-#
-
-class ProcessesMixin(object):
- TYPE = 'processes'
- Process = multiprocessing.Process
- locals().update(get_attributes(multiprocessing, (
- 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
- 'Condition', 'Event', 'Value', 'Array', 'RawValue',
- 'RawArray', 'current_process', 'active_children', 'Pipe',
- 'connection', 'JoinableQueue'
- )))
-
-testcases_processes = create_test_cases(ProcessesMixin, type='processes')
-globals().update(testcases_processes)
-
-
-class ManagerMixin(object):
- TYPE = 'manager'
- Process = multiprocessing.Process
- manager = object.__new__(multiprocessing.managers.SyncManager)
- locals().update(get_attributes(manager, (
- 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
- 'Condition', 'Event', 'Value', 'Array', 'list', 'dict',
- 'Namespace', 'JoinableQueue'
- )))
-
-testcases_manager = create_test_cases(ManagerMixin, type='manager')
-globals().update(testcases_manager)
-
-
-class ThreadsMixin(object):
- TYPE = 'threads'
- Process = multiprocessing.dummy.Process
- locals().update(get_attributes(multiprocessing.dummy, (
- 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
- 'Condition', 'Event', 'Value', 'Array', 'current_process',
- 'active_children', 'Pipe', 'connection', 'dict', 'list',
- 'Namespace', 'JoinableQueue'
- )))
-
-testcases_threads = create_test_cases(ThreadsMixin, type='threads')
-globals().update(testcases_threads)
-
-#
-#
-#
-
-def test_main(run=None):
- if run is None:
- from test.test_support import run_unittest as run
-
- util.get_temp_dir() # creates temp directory for use by all processes
-
- multiprocessing.get_logger().setLevel(LOG_LEVEL)
-
- ProcessesMixin.pool = multiprocessing.Pool(4)
- ThreadsMixin.pool = multiprocessing.dummy.Pool(4)
- ManagerMixin.manager.__init__()
- ManagerMixin.manager.start()
- ManagerMixin.pool = ManagerMixin.manager.Pool(4)
-
- testcases = (
- sorted(testcases_processes.values(), key=lambda tc:tc.__name__) +
- sorted(testcases_threads.values(), key=lambda tc:tc.__name__) +
- sorted(testcases_manager.values(), key=lambda tc:tc.__name__)
- )
-
- loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase
- suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases)
- run(suite)
-
- ThreadsMixin.pool.terminate()
- ProcessesMixin.pool.terminate()
- ManagerMixin.pool.terminate()
- ManagerMixin.manager.shutdown()
-
- del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool
-
-def main():
- test_main(unittest.TextTestRunner(verbosity=2).run)
-
-if __name__ == '__main__':
- main()
+# +# Unit tests for the multiprocessing package +# + +import unittest +import threading +import Queue +import time +import sys +import os +import gc +import signal +import array +import copy +import socket +import random +import logging + +import multiprocessing.dummy +import multiprocessing.connection +import multiprocessing.managers +import multiprocessing.heap +import multiprocessing.managers +import multiprocessing.pool +import _multiprocessing + +from multiprocessing import util + +# +# +# + +if sys.version_info >= (3, 0): + def latin(s): + return s.encode('latin') +else: + latin = str + +try: + bytes +except NameError: + bytes = str + def bytearray(seq): + return array.array('c', seq) + +# +# Constants +# + +LOG_LEVEL = util.SUBWARNING +#LOG_LEVEL = logging.WARNING + +DELTA = 0.1 +CHECK_TIMINGS = False # making true makes tests take a lot longer + # and can sometimes cause some non-serious + # failures because some calls block a bit + # longer than expected +if CHECK_TIMINGS: + TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4 +else: + TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1 + +HAVE_GETVALUE = not getattr(_multiprocessing, + 'HAVE_BROKEN_SEM_GETVALUE', False) + +# +# Creates a wrapper for a function which records the time it takes to finish +# + +class TimingWrapper(object): + + def __init__(self, func): + self.func = func + self.elapsed = None + + def __call__(self, *args, **kwds): + t = time.time() + try: + return self.func(*args, **kwds) + finally: + self.elapsed = time.time() - t + +# +# Base class for test cases +# + +class BaseTestCase(object): + + ALLOWED_TYPES = ('processes', 'manager', 'threads') + + def assertTimingAlmostEqual(self, a, b): + if CHECK_TIMINGS: + self.assertAlmostEqual(a, b, 1) + + def assertReturnsIfImplemented(self, value, func, *args): + try: + res = func(*args) + except NotImplementedError: + pass + else: + return self.assertEqual(value, res) + +# +# Return the value of a semaphore +# + +def get_value(self): + try: + return self.get_value() + except AttributeError: + try: + return self._Semaphore__value + except AttributeError: + try: + return self._value + except AttributeError: + raise NotImplementedError + +# +# Testcases +# + +class _TestProcess(BaseTestCase): + + ALLOWED_TYPES = ('processes', 'threads') + + def test_current(self): + if self.TYPE == 'threads': + return + + current = self.current_process() + authkey = current.get_authkey() + + self.assertTrue(current.is_alive()) + self.assertTrue(not current.is_daemon()) + self.assertTrue(isinstance(authkey, bytes)) + self.assertTrue(len(authkey) > 0) + self.assertEqual(current.get_ident(), os.getpid()) + self.assertEqual(current.get_exitcode(), None) + + def _test(self, q, *args, **kwds): + current = self.current_process() + q.put(args) + q.put(kwds) + q.put(current.get_name()) + if self.TYPE != 'threads': + q.put(bytes(current.get_authkey())) + q.put(current.pid) + + def test_process(self): + q = self.Queue(1) + e = self.Event() + args = (q, 1, 2) + kwargs = {'hello':23, 'bye':2.54} + name = 'SomeProcess' + p = self.Process( + target=self._test, args=args, kwargs=kwargs, name=name + ) + p.set_daemon(True) + current = self.current_process() + + if self.TYPE != 'threads': + self.assertEquals(p.get_authkey(), current.get_authkey()) + self.assertEquals(p.is_alive(), False) + self.assertEquals(p.is_daemon(), True) + self.assertTrue(p not in self.active_children()) + self.assertTrue(type(self.active_children()) is list) + self.assertEqual(p.get_exitcode(), None) + + p.start() + + self.assertEquals(p.get_exitcode(), None) + self.assertEquals(p.is_alive(), True) + self.assertTrue(p in self.active_children()) + + self.assertEquals(q.get(), args[1:]) + self.assertEquals(q.get(), kwargs) + self.assertEquals(q.get(), p.get_name()) + if self.TYPE != 'threads': + self.assertEquals(q.get(), current.get_authkey()) + self.assertEquals(q.get(), p.pid) + + p.join() + + self.assertEquals(p.get_exitcode(), 0) + self.assertEquals(p.is_alive(), False) + self.assertTrue(p not in self.active_children()) + + def _test_terminate(self): + time.sleep(1000) + + def test_terminate(self): + if self.TYPE == 'threads': + return + + p = self.Process(target=self._test_terminate) + p.set_daemon(True) + p.start() + + self.assertEqual(p.is_alive(), True) + self.assertTrue(p in self.active_children()) + self.assertEqual(p.get_exitcode(), None) + + p.terminate() + + join = TimingWrapper(p.join) + self.assertEqual(join(), None) + self.assertTimingAlmostEqual(join.elapsed, 0.0) + + self.assertEqual(p.is_alive(), False) + self.assertTrue(p not in self.active_children()) + + p.join() + + # XXX sometimes get p.get_exitcode() == 0 on Windows ... + #self.assertEqual(p.get_exitcode(), -signal.SIGTERM) + + def test_cpu_count(self): + try: + cpus = multiprocessing.cpu_count() + except NotImplementedError: + cpus = 1 + self.assertTrue(type(cpus) is int) + self.assertTrue(cpus >= 1) + + def test_active_children(self): + self.assertEqual(type(self.active_children()), list) + + p = self.Process(target=time.sleep, args=(DELTA,)) + self.assertTrue(p not in self.active_children()) + + p.start() + self.assertTrue(p in self.active_children()) + + p.join() + self.assertTrue(p not in self.active_children()) + + def _test_recursion(self, wconn, id): + from multiprocessing import forking + wconn.send(id) + if len(id) < 2: + for i in range(2): + p = self.Process( + target=self._test_recursion, args=(wconn, id+[i]) + ) + p.start() + p.join() + + def test_recursion(self): + rconn, wconn = self.Pipe(duplex=False) + self._test_recursion(wconn, []) + + time.sleep(DELTA) + result = [] + while rconn.poll(): + result.append(rconn.recv()) + + expected = [ + [], + [0], + [0, 0], + [0, 1], + [1], + [1, 0], + [1, 1] + ] + self.assertEqual(result, expected) + +# +# +# + +class _UpperCaser(multiprocessing.Process): + + def __init__(self): + multiprocessing.Process.__init__(self) + self.child_conn, self.parent_conn = multiprocessing.Pipe() + + def run(self): + self.parent_conn.close() + for s in iter(self.child_conn.recv, None): + self.child_conn.send(s.upper()) + self.child_conn.close() + + def submit(self, s): + assert type(s) is str + self.parent_conn.send(s) + return self.parent_conn.recv() + + def stop(self): + self.parent_conn.send(None) + self.parent_conn.close() + self.child_conn.close() + +class _TestSubclassingProcess(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def test_subclassing(self): + uppercaser = _UpperCaser() + uppercaser.start() + self.assertEqual(uppercaser.submit('hello'), 'HELLO') + self.assertEqual(uppercaser.submit('world'), 'WORLD') + uppercaser.stop() + uppercaser.join() + +# +# +# + +def queue_empty(q): + if hasattr(q, 'empty'): + return q.empty() + else: + return q.qsize() == 0 + +def queue_full(q, maxsize): + if hasattr(q, 'full'): + return q.full() + else: + return q.qsize() == maxsize + + +class _TestQueue(BaseTestCase): + + + def _test_put(self, queue, child_can_start, parent_can_continue): + child_can_start.wait() + for i in range(6): + queue.get() + parent_can_continue.set() + + def test_put(self): + MAXSIZE = 6 + queue = self.Queue(maxsize=MAXSIZE) + child_can_start = self.Event() + parent_can_continue = self.Event() + + proc = self.Process( + target=self._test_put, + args=(queue, child_can_start, parent_can_continue) + ) + proc.set_daemon(True) + proc.start() + + self.assertEqual(queue_empty(queue), True) + self.assertEqual(queue_full(queue, MAXSIZE), False) + + queue.put(1) + queue.put(2, True) + queue.put(3, True, None) + queue.put(4, False) + queue.put(5, False, None) + queue.put_nowait(6) + + # the values may be in buffer but not yet in pipe so sleep a bit + time.sleep(DELTA) + + self.assertEqual(queue_empty(queue), False) + self.assertEqual(queue_full(queue, MAXSIZE), True) + + put = TimingWrapper(queue.put) + put_nowait = TimingWrapper(queue.put_nowait) + + self.assertRaises(Queue.Full, put, 7, False) + self.assertTimingAlmostEqual(put.elapsed, 0) + + self.assertRaises(Queue.Full, put, 7, False, None) + self.assertTimingAlmostEqual(put.elapsed, 0) + + self.assertRaises(Queue.Full, put_nowait, 7) + self.assertTimingAlmostEqual(put_nowait.elapsed, 0) + + self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1) + self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1) + + self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2) + self.assertTimingAlmostEqual(put.elapsed, 0) + + self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3) + self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3) + + child_can_start.set() + parent_can_continue.wait() + + self.assertEqual(queue_empty(queue), True) + self.assertEqual(queue_full(queue, MAXSIZE), False) + + proc.join() + + def _test_get(self, queue, child_can_start, parent_can_continue): + child_can_start.wait() + queue.put(1) + queue.put(2) + queue.put(3) + queue.put(4) + queue.put(5) + parent_can_continue.set() + + def test_get(self): + queue = self.Queue() + child_can_start = self.Event() + parent_can_continue = self.Event() + + proc = self.Process( + target=self._test_get, + args=(queue, child_can_start, parent_can_continue) + ) + proc.set_daemon(True) + proc.start() + + self.assertEqual(queue_empty(queue), True) + + child_can_start.set() + parent_can_continue.wait() + + time.sleep(DELTA) + self.assertEqual(queue_empty(queue), False) + + self.assertEqual(queue.get(), 1) + self.assertEqual(queue.get(True, None), 2) + self.assertEqual(queue.get(True), 3) + self.assertEqual(queue.get(timeout=1), 4) + self.assertEqual(queue.get_nowait(), 5) + + self.assertEqual(queue_empty(queue), True) + + get = TimingWrapper(queue.get) + get_nowait = TimingWrapper(queue.get_nowait) + + self.assertRaises(Queue.Empty, get, False) + self.assertTimingAlmostEqual(get.elapsed, 0) + + self.assertRaises(Queue.Empty, get, False, None) + self.assertTimingAlmostEqual(get.elapsed, 0) + + self.assertRaises(Queue.Empty, get_nowait) + self.assertTimingAlmostEqual(get_nowait.elapsed, 0) + + self.assertRaises(Queue.Empty, get, True, TIMEOUT1) + self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) + + self.assertRaises(Queue.Empty, get, False, TIMEOUT2) + self.assertTimingAlmostEqual(get.elapsed, 0) + + self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3) + self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3) + + proc.join() + + def _test_fork(self, queue): + for i in range(10, 20): + queue.put(i) + # note that at this point the items may only be buffered, so the + # process cannot shutdown until the feeder thread has finished + # pushing items onto the pipe. + + def test_fork(self): + # Old versions of Queue would fail to create a new feeder + # thread for a forked process if the original process had its + # own feeder thread. This test checks that this no longer + # happens. + + queue = self.Queue() + + # put items on queue so that main process starts a feeder thread + for i in range(10): + queue.put(i) + + # wait to make sure thread starts before we fork a new process + time.sleep(DELTA) + + # fork process + p = self.Process(target=self._test_fork, args=(queue,)) + p.start() + + # check that all expected items are in the queue + for i in range(20): + self.assertEqual(queue.get(), i) + self.assertRaises(Queue.Empty, queue.get, False) + + p.join() + + def test_qsize(self): + q = self.Queue() + try: + self.assertEqual(q.qsize(), 0) + except NotImplementedError: + return + q.put(1) + self.assertEqual(q.qsize(), 1) + q.put(5) + self.assertEqual(q.qsize(), 2) + q.get() + self.assertEqual(q.qsize(), 1) + q.get() + self.assertEqual(q.qsize(), 0) + + def _test_task_done(self, q): + for obj in iter(q.get, None): + time.sleep(DELTA) + q.task_done() + + def test_task_done(self): + queue = self.JoinableQueue() + + if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'): + return + + workers = [self.Process(target=self._test_task_done, args=(queue,)) + for i in xrange(4)] + + for p in workers: + p.start() + + for i in xrange(10): + queue.put(i) + + queue.join() + + for p in workers: + queue.put(None) + + for p in workers: + p.join() + +# +# +# + +class _TestLock(BaseTestCase): + + def test_lock(self): + lock = self.Lock() + self.assertEqual(lock.acquire(), True) + self.assertEqual(lock.acquire(False), False) + self.assertEqual(lock.release(), None) + self.assertRaises((ValueError, threading.ThreadError), lock.release) + + def test_rlock(self): + lock = self.RLock() + self.assertEqual(lock.acquire(), True) + self.assertEqual(lock.acquire(), True) + self.assertEqual(lock.acquire(), True) + self.assertEqual(lock.release(), None) + self.assertEqual(lock.release(), None) + self.assertEqual(lock.release(), None) + self.assertRaises((AssertionError, RuntimeError), lock.release) + + +class _TestSemaphore(BaseTestCase): + + def _test_semaphore(self, sem): + self.assertReturnsIfImplemented(2, get_value, sem) + self.assertEqual(sem.acquire(), True) + self.assertReturnsIfImplemented(1, get_value, sem) + self.assertEqual(sem.acquire(), True) + self.assertReturnsIfImplemented(0, get_value, sem) + self.assertEqual(sem.acquire(False), False) + self.assertReturnsIfImplemented(0, get_value, sem) + self.assertEqual(sem.release(), None) + self.assertReturnsIfImplemented(1, get_value, sem) + self.assertEqual(sem.release(), None) + self.assertReturnsIfImplemented(2, get_value, sem) + + def test_semaphore(self): + sem = self.Semaphore(2) + self._test_semaphore(sem) + self.assertEqual(sem.release(), None) + self.assertReturnsIfImplemented(3, get_value, sem) + self.assertEqual(sem.release(), None) + self.assertReturnsIfImplemented(4, get_value, sem) + + def test_bounded_semaphore(self): + sem = self.BoundedSemaphore(2) + self._test_semaphore(sem) + # Currently fails on OS/X + #if HAVE_GETVALUE: + # self.assertRaises(ValueError, sem.release) + # self.assertReturnsIfImplemented(2, get_value, sem) + + def test_timeout(self): + if self.TYPE != 'processes': + return + + sem = self.Semaphore(0) + acquire = TimingWrapper(sem.acquire) + + self.assertEqual(acquire(False), False) + self.assertTimingAlmostEqual(acquire.elapsed, 0.0) + + self.assertEqual(acquire(False, None), False) + self.assertTimingAlmostEqual(acquire.elapsed, 0.0) + + self.assertEqual(acquire(False, TIMEOUT1), False) + self.assertTimingAlmostEqual(acquire.elapsed, 0) + + self.assertEqual(acquire(True, TIMEOUT2), False) + self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2) + + self.assertEqual(acquire(timeout=TIMEOUT3), False) + self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) + + +class _TestCondition(BaseTestCase): + + def f(self, cond, sleeping, woken, timeout=None): + cond.acquire() + sleeping.release() + cond.wait(timeout) + woken.release() + cond.release() + + def check_invariant(self, cond): + # this is only supposed to succeed when there are no sleepers + if self.TYPE == 'processes': + try: + sleepers = (cond._sleeping_count.get_value() - + cond._woken_count.get_value()) + self.assertEqual(sleepers, 0) + self.assertEqual(cond._wait_semaphore.get_value(), 0) + except NotImplementedError: + pass + + def test_notify(self): + cond = self.Condition() + sleeping = self.Semaphore(0) + woken = self.Semaphore(0) + + p = self.Process(target=self.f, args=(cond, sleeping, woken)) + p.set_daemon(True) + p.start() + + p = threading.Thread(target=self.f, args=(cond, sleeping, woken)) + p.set_daemon(True) + p.start() + + # wait for both children to start sleeping + sleeping.acquire() + sleeping.acquire() + + # check no process/thread has woken up + time.sleep(DELTA) + self.assertReturnsIfImplemented(0, get_value, woken) + + # wake up one process/thread + cond.acquire() + cond.notify() + cond.release() + + # check one process/thread has woken up + time.sleep(DELTA) + self.assertReturnsIfImplemented(1, get_value, woken) + + # wake up another + cond.acquire() + cond.notify() + cond.release() + + # check other has woken up + time.sleep(DELTA) + self.assertReturnsIfImplemented(2, get_value, woken) + + # check state is not mucked up + self.check_invariant(cond) + p.join() + + def test_notify_all(self): + cond = self.Condition() + sleeping = self.Semaphore(0) + woken = self.Semaphore(0) + + # start some threads/processes which will timeout + for i in range(3): + p = self.Process(target=self.f, + args=(cond, sleeping, woken, TIMEOUT1)) + p.set_daemon(True) + p.start() + + t = threading.Thread(target=self.f, + args=(cond, sleeping, woken, TIMEOUT1)) + t.set_daemon(True) + t.start() + + # wait for them all to sleep + for i in xrange(6): + sleeping.acquire() + + # check they have all timed out + for i in xrange(6): + woken.acquire() + self.assertReturnsIfImplemented(0, get_value, woken) + + # check state is not mucked up + self.check_invariant(cond) + + # start some more threads/processes + for i in range(3): + p = self.Process(target=self.f, args=(cond, sleeping, woken)) + p.set_daemon(True) + p.start() + + t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) + t.set_daemon(True) + t.start() + + # wait for them to all sleep + for i in xrange(6): + sleeping.acquire() + + # check no process/thread has woken up + time.sleep(DELTA) + self.assertReturnsIfImplemented(0, get_value, woken) + + # wake them all up + cond.acquire() + cond.notify_all() + cond.release() + + # check they have all woken + time.sleep(DELTA) + self.assertReturnsIfImplemented(6, get_value, woken) + + # check state is not mucked up + self.check_invariant(cond) + + def test_timeout(self): + cond = self.Condition() + wait = TimingWrapper(cond.wait) + cond.acquire() + res = wait(TIMEOUT1) + cond.release() + self.assertEqual(res, None) + self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) + + +class _TestEvent(BaseTestCase): + + def _test_event(self, event): + time.sleep(TIMEOUT2) + event.set() + + def test_event(self): + event = self.Event() + wait = TimingWrapper(event.wait) + + # Removed temporaily, due to API shear, this does not + # work with threading._Event objects. is_set == isSet + #self.assertEqual(event.is_set(), False) + + self.assertEqual(wait(0.0), None) + self.assertTimingAlmostEqual(wait.elapsed, 0.0) + self.assertEqual(wait(TIMEOUT1), None) + self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) + + event.set() + + # See note above on the API differences + # self.assertEqual(event.is_set(), True) + self.assertEqual(wait(), None) + self.assertTimingAlmostEqual(wait.elapsed, 0.0) + self.assertEqual(wait(TIMEOUT1), None) + self.assertTimingAlmostEqual(wait.elapsed, 0.0) + # self.assertEqual(event.is_set(), True) + + event.clear() + + #self.assertEqual(event.is_set(), False) + + self.Process(target=self._test_event, args=(event,)).start() + self.assertEqual(wait(), None) + +# +# +# + +class _TestValue(BaseTestCase): + + codes_values = [ + ('i', 4343, 24234), + ('d', 3.625, -4.25), + ('h', -232, 234), + ('c', latin('x'), latin('y')) + ] + + def _test(self, values): + for sv, cv in zip(values, self.codes_values): + sv.value = cv[2] + + + def test_value(self, raw=False): + if self.TYPE != 'processes': + return + + if raw: + values = [self.RawValue(code, value) + for code, value, _ in self.codes_values] + else: + values = [self.Value(code, value) + for code, value, _ in self.codes_values] + + for sv, cv in zip(values, self.codes_values): + self.assertEqual(sv.value, cv[1]) + + proc = self.Process(target=self._test, args=(values,)) + proc.start() + proc.join() + + for sv, cv in zip(values, self.codes_values): + self.assertEqual(sv.value, cv[2]) + + def test_rawvalue(self): + self.test_value(raw=True) + + def test_getobj_getlock(self): + if self.TYPE != 'processes': + return + + val1 = self.Value('i', 5) + lock1 = val1.get_lock() + obj1 = val1.get_obj() + + val2 = self.Value('i', 5, lock=None) + lock2 = val2.get_lock() + obj2 = val2.get_obj() + + lock = self.Lock() + val3 = self.Value('i', 5, lock=lock) + lock3 = val3.get_lock() + obj3 = val3.get_obj() + self.assertEqual(lock, lock3) + + arr4 = self.RawValue('i', 5) + self.assertFalse(hasattr(arr4, 'get_lock')) + self.assertFalse(hasattr(arr4, 'get_obj')) + + +class _TestArray(BaseTestCase): + + def f(self, seq): + for i in range(1, len(seq)): + seq[i] += seq[i-1] + + def test_array(self, raw=False): + if self.TYPE != 'processes': + return + + seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831] + if raw: + arr = self.RawArray('i', seq) + else: + arr = self.Array('i', seq) + + self.assertEqual(len(arr), len(seq)) + self.assertEqual(arr[3], seq[3]) + self.assertEqual(list(arr[2:7]), list(seq[2:7])) + + arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4]) + + self.assertEqual(list(arr[:]), seq) + + self.f(seq) + + p = self.Process(target=self.f, args=(arr,)) + p.start() + p.join() + + self.assertEqual(list(arr[:]), seq) + + def test_rawarray(self): + self.test_array(raw=True) + + def test_getobj_getlock_obj(self): + if self.TYPE != 'processes': + return + + arr1 = self.Array('i', range(10)) + lock1 = arr1.get_lock() + obj1 = arr1.get_obj() + + arr2 = self.Array('i', range(10), lock=None) + lock2 = arr2.get_lock() + obj2 = arr2.get_obj() + + lock = self.Lock() + arr3 = self.Array('i', range(10), lock=lock) + lock3 = arr3.get_lock() + obj3 = arr3.get_obj() + self.assertEqual(lock, lock3) + + arr4 = self.RawArray('i', range(10)) + self.assertFalse(hasattr(arr4, 'get_lock')) + self.assertFalse(hasattr(arr4, 'get_obj')) + +# +# +# + +class _TestContainers(BaseTestCase): + + ALLOWED_TYPES = ('manager',) + + def test_list(self): + a = self.list(range(10)) + self.assertEqual(a[:], range(10)) + + b = self.list() + self.assertEqual(b[:], []) + + b.extend(range(5)) + self.assertEqual(b[:], range(5)) + + self.assertEqual(b[2], 2) + self.assertEqual(b[2:10], [2,3,4]) + + b *= 2 + self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]) + + self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6]) + + self.assertEqual(a[:], range(10)) + + d = [a, b] + e = self.list(d) + self.assertEqual( + e[:], + [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]] + ) + + f = self.list([a]) + a.append('hello') + self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']]) + + def test_dict(self): + d = self.dict() + indices = range(65, 70) + for i in indices: + d[i] = chr(i) + self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices)) + self.assertEqual(sorted(d.keys()), indices) + self.assertEqual(sorted(d.values()), [chr(i) for i in indices]) + self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices]) + + def test_namespace(self): + n = self.Namespace() + n.name = 'Bob' + n.job = 'Builder' + n._hidden = 'hidden' + self.assertEqual((n.name, n.job), ('Bob', 'Builder')) + del n.job + self.assertEqual(str(n), "Namespace(name='Bob')") + self.assertTrue(hasattr(n, 'name')) + self.assertTrue(not hasattr(n, 'job')) + +# +# +# + +def sqr(x, wait=0.0): + time.sleep(wait) + return x*x + +class _TestPool(BaseTestCase): + + def test_apply(self): + papply = self.pool.apply + self.assertEqual(papply(sqr, (5,)), sqr(5)) + self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3)) + + def test_map(self): + pmap = self.pool.map + self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10))) + self.assertEqual(pmap(sqr, range(100), chunksize=20), + map(sqr, range(100))) + + def test_async(self): + res = self.pool.apply_async(sqr, (7, TIMEOUT1,)) + get = TimingWrapper(res.get) + self.assertEqual(get(), 49) + self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) + + def test_async_timeout(self): + res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2)) + get = TimingWrapper(res.get) + self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2) + self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) + + def test_imap(self): + it = self.pool.imap(sqr, range(10)) + self.assertEqual(list(it), map(sqr, range(10))) + + it = self.pool.imap(sqr, range(10)) + for i in range(10): + self.assertEqual(it.next(), i*i) + self.assertRaises(StopIteration, it.next) + + it = self.pool.imap(sqr, range(1000), chunksize=100) + for i in range(1000): + self.assertEqual(it.next(), i*i) + self.assertRaises(StopIteration, it.next) + + def test_imap_unordered(self): + it = self.pool.imap_unordered(sqr, range(1000)) + self.assertEqual(sorted(it), map(sqr, range(1000))) + + it = self.pool.imap_unordered(sqr, range(1000), chunksize=53) + self.assertEqual(sorted(it), map(sqr, range(1000))) + + def test_make_pool(self): + p = multiprocessing.Pool(3) + self.assertEqual(3, len(p._pool)) + p.close() + p.join() + + def test_terminate(self): + if self.TYPE == 'manager': + # On Unix a forked process increfs each shared object to + # which its parent process held a reference. If the + # forked process gets terminated then there is likely to + # be a reference leak. So to prevent + # _TestZZZNumberOfObjects from failing we skip this test + # when using a manager. + return + + result = self.pool.map_async( + time.sleep, [0.1 for i in range(10000)], chunksize=1 + ) + self.pool.terminate() + join = TimingWrapper(self.pool.join) + join() + self.assertTrue(join.elapsed < 0.2) + +# +# Test that manager has expected number of shared objects left +# + +class _TestZZZNumberOfObjects(BaseTestCase): + # Because test cases are sorted alphabetically, this one will get + # run after all the other tests for the manager. It tests that + # there have been no "reference leaks" for the manager's shared + # objects. Note the comment in _TestPool.test_terminate(). + ALLOWED_TYPES = ('manager',) + + def test_number_of_objects(self): + EXPECTED_NUMBER = 1 # the pool object is still alive + multiprocessing.active_children() # discard dead process objs + gc.collect() # do garbage collection + refs = self.manager._number_of_objects() + if refs != EXPECTED_NUMBER: + print self.manager._debugInfo() + + self.assertEqual(refs, EXPECTED_NUMBER) + +# +# Test of creating a customized manager class +# + +from multiprocessing.managers import BaseManager, BaseProxy, RemoteError + +class FooBar(object): + def f(self): + return 'f()' + def g(self): + raise ValueError + def _h(self): + return '_h()' + +def baz(): + for i in xrange(10): + yield i*i + +class IteratorProxy(BaseProxy): + _exposed_ = ('next', '__next__') + def __iter__(self): + return self + def next(self): + return self._callmethod('next') + def __next__(self): + return self._callmethod('__next__') + +class MyManager(BaseManager): + pass + +MyManager.register('Foo', callable=FooBar) +MyManager.register('Bar', callable=FooBar, exposed=('f', '_h')) +MyManager.register('baz', callable=baz, proxytype=IteratorProxy) + + +class _TestMyManager(BaseTestCase): + + ALLOWED_TYPES = ('manager',) + + def test_mymanager(self): + manager = MyManager() + manager.start() + + foo = manager.Foo() + bar = manager.Bar() + baz = manager.baz() + + foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)] + bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)] + + self.assertEqual(foo_methods, ['f', 'g']) + self.assertEqual(bar_methods, ['f', '_h']) + + self.assertEqual(foo.f(), 'f()') + self.assertRaises(ValueError, foo.g) + self.assertEqual(foo._callmethod('f'), 'f()') + self.assertRaises(RemoteError, foo._callmethod, '_h') + + self.assertEqual(bar.f(), 'f()') + self.assertEqual(bar._h(), '_h()') + self.assertEqual(bar._callmethod('f'), 'f()') + self.assertEqual(bar._callmethod('_h'), '_h()') + + self.assertEqual(list(baz), [i*i for i in range(10)]) + + manager.shutdown() + +# +# Test of connecting to a remote server and using xmlrpclib for serialization +# + +_queue = Queue.Queue() +def get_queue(): + return _queue + +class QueueManager(BaseManager): + '''manager class used by server process''' +QueueManager.register('get_queue', callable=get_queue) + +class QueueManager2(BaseManager): + '''manager class which specifies the same interface as QueueManager''' +QueueManager2.register('get_queue') + + +SERIALIZER = 'xmlrpclib' + +class _TestRemoteManager(BaseTestCase): + + ALLOWED_TYPES = ('manager',) + + def _putter(self, address, authkey): + manager = QueueManager2( + address=address, authkey=authkey, serializer=SERIALIZER + ) + manager.connect() + queue = manager.get_queue() + queue.put(('hello world', None, True, 2.25)) + + def test_remote(self): + authkey = os.urandom(32) + + manager = QueueManager( + address=('localhost', 0), authkey=authkey, serializer=SERIALIZER + ) + manager.start() + + p = self.Process(target=self._putter, args=(manager.address, authkey)) + p.start() + + manager2 = QueueManager2( + address=manager.address, authkey=authkey, serializer=SERIALIZER + ) + manager2.connect() + queue = manager2.get_queue() + + # Note that xmlrpclib will deserialize object as a list not a tuple + self.assertEqual(queue.get(), ['hello world', None, True, 2.25]) + + # Because we are using xmlrpclib for serialization instead of + # pickle this will cause a serialization error. + self.assertRaises(Exception, queue.put, time.sleep) + + # Make queue finalizer run before the server is stopped + del queue + manager.shutdown() + +# +# +# + +SENTINEL = latin('') + +class _TestConnection(BaseTestCase): + + ALLOWED_TYPES = ('processes', 'threads') + + def _echo(self, conn): + for msg in iter(conn.recv_bytes, SENTINEL): + conn.send_bytes(msg) + conn.close() + + def test_connection(self): + conn, child_conn = self.Pipe() + + p = self.Process(target=self._echo, args=(child_conn,)) + p.set_daemon(True) + p.start() + + seq = [1, 2.25, None] + msg = latin('hello world') + longmsg = msg * 10 + arr = array.array('i', range(4)) + + if self.TYPE == 'processes': + self.assertEqual(type(conn.fileno()), int) + + self.assertEqual(conn.send(seq), None) + self.assertEqual(conn.recv(), seq) + + self.assertEqual(conn.send_bytes(msg), None) + self.assertEqual(conn.recv_bytes(), msg) + + if self.TYPE == 'processes': + buffer = array.array('i', [0]*10) + expected = list(arr) + [0] * (10 - len(arr)) + self.assertEqual(conn.send_bytes(arr), None) + self.assertEqual(conn.recv_bytes_into(buffer), + len(arr) * buffer.itemsize) + self.assertEqual(list(buffer), expected) + + buffer = array.array('i', [0]*10) + expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr)) + self.assertEqual(conn.send_bytes(arr), None) + self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize), + len(arr) * buffer.itemsize) + self.assertEqual(list(buffer), expected) + + buffer = bytearray(latin(' ' * 40)) + self.assertEqual(conn.send_bytes(longmsg), None) + try: + res = conn.recv_bytes_into(buffer) + except multiprocessing.BufferTooShort, e: + self.assertEqual(e.args, (longmsg,)) + else: + self.fail('expected BufferTooShort, got %s' % res) + + poll = TimingWrapper(conn.poll) + + self.assertEqual(poll(), False) + self.assertTimingAlmostEqual(poll.elapsed, 0) + + self.assertEqual(poll(TIMEOUT1), False) + self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1) + + conn.send(None) + + self.assertEqual(poll(TIMEOUT1), True) + self.assertTimingAlmostEqual(poll.elapsed, 0) + + self.assertEqual(conn.recv(), None) + + really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb + conn.send_bytes(really_big_msg) + self.assertEqual(conn.recv_bytes(), really_big_msg) + + conn.send_bytes(SENTINEL) # tell child to quit + child_conn.close() + + if self.TYPE == 'processes': + self.assertEqual(conn.readable, True) + self.assertEqual(conn.writable, True) + self.assertRaises(EOFError, conn.recv) + self.assertRaises(EOFError, conn.recv_bytes) + + p.join() + + def test_duplex_false(self): + reader, writer = self.Pipe(duplex=False) + self.assertEqual(writer.send(1), None) + self.assertEqual(reader.recv(), 1) + if self.TYPE == 'processes': + self.assertEqual(reader.readable, True) + self.assertEqual(reader.writable, False) + self.assertEqual(writer.readable, False) + self.assertEqual(writer.writable, True) + self.assertRaises(IOError, reader.send, 2) + self.assertRaises(IOError, writer.recv) + self.assertRaises(IOError, writer.poll) + + def test_spawn_close(self): + # We test that a pipe connection can be closed by parent + # process immediately after child is spawned. On Windows this + # would have sometimes failed on old versions because + # child_conn would be closed before the child got a chance to + # duplicate it. + conn, child_conn = self.Pipe() + + p = self.Process(target=self._echo, args=(child_conn,)) + p.start() + child_conn.close() # this might complete before child initializes + + msg = latin('hello') + conn.send_bytes(msg) + self.assertEqual(conn.recv_bytes(), msg) + + conn.send_bytes(SENTINEL) + conn.close() + p.join() + + def test_sendbytes(self): + if self.TYPE != 'processes': + return + + msg = latin('abcdefghijklmnopqrstuvwxyz') + a, b = self.Pipe() + + a.send_bytes(msg) + self.assertEqual(b.recv_bytes(), msg) + + a.send_bytes(msg, 5) + self.assertEqual(b.recv_bytes(), msg[5:]) + + a.send_bytes(msg, 7, 8) + self.assertEqual(b.recv_bytes(), msg[7:7+8]) + + a.send_bytes(msg, 26) + self.assertEqual(b.recv_bytes(), latin('')) + + a.send_bytes(msg, 26, 0) + self.assertEqual(b.recv_bytes(), latin('')) + + self.assertRaises(ValueError, a.send_bytes, msg, 27) + + self.assertRaises(ValueError, a.send_bytes, msg, 22, 5) + + self.assertRaises(ValueError, a.send_bytes, msg, 26, 1) + + self.assertRaises(ValueError, a.send_bytes, msg, -1) + + self.assertRaises(ValueError, a.send_bytes, msg, 4, -1) + + +class _TestListenerClient(BaseTestCase): + + ALLOWED_TYPES = ('processes', 'threads') + + def _test(self, address): + conn = self.connection.Client(address) + conn.send('hello') + conn.close() + + def test_listener_client(self): + for family in self.connection.families: + l = self.connection.Listener(family=family) + p = self.Process(target=self._test, args=(l.address,)) + p.set_daemon(True) + p.start() + conn = l.accept() + self.assertEqual(conn.recv(), 'hello') + p.join() + l.close() + +# +# Test of sending connection and socket objects between processes +# + +class _TestPicklingConnections(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def _listener(self, conn, families): + for fam in families: + l = self.connection.Listener(family=fam) + conn.send(l.address) + new_conn = l.accept() + conn.send(new_conn) + + if self.TYPE == 'processes': + l = socket.socket() + l.bind(('localhost', 0)) + conn.send(l.getsockname()) + l.listen(1) + new_conn, addr = l.accept() + conn.send(new_conn) + + conn.recv() + + def _remote(self, conn): + for (address, msg) in iter(conn.recv, None): + client = self.connection.Client(address) + client.send(msg.upper()) + client.close() + + if self.TYPE == 'processes': + address, msg = conn.recv() + client = socket.socket() + client.connect(address) + client.sendall(msg.upper()) + client.close() + + conn.close() + + def test_pickling(self): + try: + multiprocessing.allow_connection_pickling() + except ImportError: + return + + families = self.connection.families + + lconn, lconn0 = self.Pipe() + lp = self.Process(target=self._listener, args=(lconn0, families)) + lp.start() + lconn0.close() + + rconn, rconn0 = self.Pipe() + rp = self.Process(target=self._remote, args=(rconn0,)) + rp.start() + rconn0.close() + + for fam in families: + msg = ('This connection uses family %s' % fam).encode('ascii') + address = lconn.recv() + rconn.send((address, msg)) + new_conn = lconn.recv() + self.assertEqual(new_conn.recv(), msg.upper()) + + rconn.send(None) + + if self.TYPE == 'processes': + msg = latin('This connection uses a normal socket') + address = lconn.recv() + rconn.send((address, msg)) + if hasattr(socket, 'fromfd'): + new_conn = lconn.recv() + self.assertEqual(new_conn.recv(100), msg.upper()) + else: + # XXX On Windows with Py2.6 need to backport fromfd() + discard = lconn.recv_bytes() + + lconn.send(None) + + rconn.close() + lconn.close() + + lp.join() + rp.join() + +# +# +# + +class _TestHeap(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def test_heap(self): + iterations = 5000 + maxblocks = 50 + blocks = [] + + # create and destroy lots of blocks of different sizes + for i in xrange(iterations): + size = int(random.lognormvariate(0, 1) * 1000) + b = multiprocessing.heap.BufferWrapper(size) + blocks.append(b) + if len(blocks) > maxblocks: + i = random.randrange(maxblocks) + del blocks[i] + + # get the heap object + heap = multiprocessing.heap.BufferWrapper._heap + + # verify the state of the heap + all = [] + occupied = 0 + for L in heap._len_to_seq.values(): + for arena, start, stop in L: + all.append((heap._arenas.index(arena), start, stop, + stop-start, 'free')) + for arena, start, stop in heap._allocated_blocks: + all.append((heap._arenas.index(arena), start, stop, + stop-start, 'occupied')) + occupied += (stop-start) + + all.sort() + + for i in range(len(all)-1): + (arena, start, stop) = all[i][:3] + (narena, nstart, nstop) = all[i+1][:3] + self.assertTrue((arena != narena and nstart == 0) or + (stop == nstart)) + +# +# +# + +try: + from ctypes import Structure, Value, copy, c_int, c_double +except ImportError: + Structure = object + c_int = c_double = None + +class _Foo(Structure): + _fields_ = [ + ('x', c_int), + ('y', c_double) + ] + +class _TestSharedCTypes(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def _double(self, x, y, foo, arr, string): + x.value *= 2 + y.value *= 2 + foo.x *= 2 + foo.y *= 2 + string.value *= 2 + for i in range(len(arr)): + arr[i] *= 2 + + def test_sharedctypes(self, lock=False): + if c_int is None: + return + + x = Value('i', 7, lock=lock) + y = Value(ctypes.c_double, 1.0/3.0, lock=lock) + foo = Value(_Foo, 3, 2, lock=lock) + arr = Array('d', range(10), lock=lock) + string = Array('c', 20, lock=lock) + string.value = 'hello' + + p = self.Process(target=self._double, args=(x, y, foo, arr, string)) + p.start() + p.join() + + self.assertEqual(x.value, 14) + self.assertAlmostEqual(y.value, 2.0/3.0) + self.assertEqual(foo.x, 6) + self.assertAlmostEqual(foo.y, 4.0) + for i in range(10): + self.assertAlmostEqual(arr[i], i*2) + self.assertEqual(string.value, latin('hellohello')) + + def test_synchronize(self): + self.test_sharedctypes(lock=True) + + def test_copy(self): + if c_int is None: + return + + foo = _Foo(2, 5.0) + bar = copy(foo) + foo.x = 0 + foo.y = 0 + self.assertEqual(bar.x, 2) + self.assertAlmostEqual(bar.y, 5.0) + +# +# +# + +class _TestFinalize(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def _test_finalize(self, conn): + class Foo(object): + pass + + a = Foo() + util.Finalize(a, conn.send, args=('a',)) + del a # triggers callback for a + + b = Foo() + close_b = util.Finalize(b, conn.send, args=('b',)) + close_b() # triggers callback for b + close_b() # does nothing because callback has already been called + del b # does nothing because callback has already been called + + c = Foo() + util.Finalize(c, conn.send, args=('c',)) + + d10 = Foo() + util.Finalize(d10, conn.send, args=('d10',), exitpriority=1) + + d01 = Foo() + util.Finalize(d01, conn.send, args=('d01',), exitpriority=0) + d02 = Foo() + util.Finalize(d02, conn.send, args=('d02',), exitpriority=0) + d03 = Foo() + util.Finalize(d03, conn.send, args=('d03',), exitpriority=0) + + util.Finalize(None, conn.send, args=('e',), exitpriority=-10) + + util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100) + + # call mutliprocessing's cleanup function then exit process without + # garbage collecting locals + util._exit_function() + conn.close() + os._exit(0) + + def test_finalize(self): + conn, child_conn = self.Pipe() + + p = self.Process(target=self._test_finalize, args=(child_conn,)) + p.start() + p.join() + + result = [obj for obj in iter(conn.recv, 'STOP')] + self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e']) + +# +# Test that from ... import * works for each module +# + +class _TestImportStar(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def test_import(self): + modules = ( + 'multiprocessing', 'multiprocessing.connection', + 'multiprocessing.heap', 'multiprocessing.managers', + 'multiprocessing.pool', 'multiprocessing.process', + 'multiprocessing.reduction', 'multiprocessing.sharedctypes', + 'multiprocessing.synchronize', 'multiprocessing.util' + ) + + for name in modules: + __import__(name) + mod = sys.modules[name] + + for attr in getattr(mod, '__all__', ()): + self.assertTrue( + hasattr(mod, attr), + '%r does not have attribute %r' % (mod, attr) + ) + +# +# Quick test that logging works -- does not test logging output +# + +class _TestLogging(BaseTestCase): + + ALLOWED_TYPES = ('processes',) + + def test_enable_logging(self): + logger = multiprocessing.get_logger() + logger.setLevel(util.SUBWARNING) + self.assertTrue(logger is not None) + logger.debug('this will not be printed') + logger.info('nor will this') + logger.setLevel(LOG_LEVEL) + + def _test_level(self, conn): + logger = multiprocessing.get_logger() + conn.send(logger.getEffectiveLevel()) + + def test_level(self): + LEVEL1 = 32 + LEVEL2 = 37 + + logger = multiprocessing.get_logger() + root_logger = logging.getLogger() + root_level = root_logger.level + + reader, writer = multiprocessing.Pipe(duplex=False) + + logger.setLevel(LEVEL1) + self.Process(target=self._test_level, args=(writer,)).start() + self.assertEqual(LEVEL1, reader.recv()) + + logger.setLevel(logging.NOTSET) + root_logger.setLevel(LEVEL2) + self.Process(target=self._test_level, args=(writer,)).start() + self.assertEqual(LEVEL2, reader.recv()) + + root_logger.setLevel(root_level) + logger.setLevel(level=LOG_LEVEL) + +# +# Functions used to create test cases from the base ones in this module +# + +def get_attributes(Source, names): + d = {} + for name in names: + obj = getattr(Source, name) + if type(obj) == type(get_attributes): + obj = staticmethod(obj) + d[name] = obj + return d + +def create_test_cases(Mixin, type): + result = {} + glob = globals() + Type = type[0].upper() + type[1:] + + for name in glob.keys(): + if name.startswith('_Test'): + base = glob[name] + if type in base.ALLOWED_TYPES: + newname = 'With' + Type + name[1:] + class Temp(base, unittest.TestCase, Mixin): + pass + result[newname] = Temp + Temp.__name__ = newname + Temp.__module__ = Mixin.__module__ + return result + +# +# Create test cases +# + +class ProcessesMixin(object): + TYPE = 'processes' + Process = multiprocessing.Process + locals().update(get_attributes(multiprocessing, ( + 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', + 'Condition', 'Event', 'Value', 'Array', 'RawValue', + 'RawArray', 'current_process', 'active_children', 'Pipe', + 'connection', 'JoinableQueue' + ))) + +testcases_processes = create_test_cases(ProcessesMixin, type='processes') +globals().update(testcases_processes) + + +class ManagerMixin(object): + TYPE = 'manager' + Process = multiprocessing.Process + manager = object.__new__(multiprocessing.managers.SyncManager) + locals().update(get_attributes(manager, ( + 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', + 'Condition', 'Event', 'Value', 'Array', 'list', 'dict', + 'Namespace', 'JoinableQueue' + ))) + +testcases_manager = create_test_cases(ManagerMixin, type='manager') +globals().update(testcases_manager) + + +class ThreadsMixin(object): + TYPE = 'threads' + Process = multiprocessing.dummy.Process + locals().update(get_attributes(multiprocessing.dummy, ( + 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', + 'Condition', 'Event', 'Value', 'Array', 'current_process', + 'active_children', 'Pipe', 'connection', 'dict', 'list', + 'Namespace', 'JoinableQueue' + ))) + +testcases_threads = create_test_cases(ThreadsMixin, type='threads') +globals().update(testcases_threads) + +# +# +# + +def test_main(run=None): + if run is None: + from test.test_support import run_unittest as run + + util.get_temp_dir() # creates temp directory for use by all processes + + multiprocessing.get_logger().setLevel(LOG_LEVEL) + + ProcessesMixin.pool = multiprocessing.Pool(4) + ThreadsMixin.pool = multiprocessing.dummy.Pool(4) + ManagerMixin.manager.__init__() + ManagerMixin.manager.start() + ManagerMixin.pool = ManagerMixin.manager.Pool(4) + + testcases = ( + sorted(testcases_processes.values(), key=lambda tc:tc.__name__) + + sorted(testcases_threads.values(), key=lambda tc:tc.__name__) + + sorted(testcases_manager.values(), key=lambda tc:tc.__name__) + ) + + loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase + suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases) + run(suite) + + ThreadsMixin.pool.terminate() + ProcessesMixin.pool.terminate() + ManagerMixin.pool.terminate() + ManagerMixin.manager.shutdown() + + del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool + +def main(): + test_main(unittest.TextTestRunner(verbosity=2).run) + +if __name__ == '__main__': + main() |