diff options
Diffstat (limited to 'Lib/multiprocessing')
-rw-r--r-- | Lib/multiprocessing/__init__.py | 12 | ||||
-rw-r--r-- | Lib/multiprocessing/connection.py | 14 | ||||
-rw-r--r-- | Lib/multiprocessing/dummy/__init__.py | 286 | ||||
-rw-r--r-- | Lib/multiprocessing/dummy/connection.py | 122 | ||||
-rw-r--r-- | Lib/multiprocessing/forking.py | 20 | ||||
-rw-r--r-- | Lib/multiprocessing/heap.py | 402 | ||||
-rw-r--r-- | Lib/multiprocessing/managers.py | 50 | ||||
-rw-r--r-- | Lib/multiprocessing/pool.py | 60 | ||||
-rw-r--r-- | Lib/multiprocessing/process.py | 12 | ||||
-rw-r--r-- | Lib/multiprocessing/queues.py | 34 | ||||
-rw-r--r-- | Lib/multiprocessing/reduction.py | 16 | ||||
-rw-r--r-- | Lib/multiprocessing/sharedctypes.py | 36 | ||||
-rw-r--r-- | Lib/multiprocessing/synchronize.py | 14 | ||||
-rw-r--r-- | Lib/multiprocessing/util.py | 2 |
14 files changed, 540 insertions, 540 deletions
diff --git a/Lib/multiprocessing/__init__.py b/Lib/multiprocessing/__init__.py index 1cb3222..decb2ad 100644 --- a/Lib/multiprocessing/__init__.py +++ b/Lib/multiprocessing/__init__.py @@ -68,10 +68,10 @@ from multiprocessing.process import Process, current_process, active_children class ProcessError(Exception):
pass
-
+
class BufferTooShort(ProcessError):
pass
-
+
class TimeoutError(ProcessError):
pass
@@ -123,7 +123,7 @@ def cpu_count(): num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0
-
+
if num >= 1:
return num
else:
@@ -151,13 +151,13 @@ def log_to_stderr(level=None): '''
from multiprocessing.util import log_to_stderr
return log_to_stderr(level)
-
+
def allow_connection_pickling():
'''
Install support for sending connections and sockets between processes
'''
from multiprocessing import reduction
-
+
#
# Definitions depending on native semaphores
#
@@ -263,7 +263,7 @@ if sys.platform == 'win32': '''
Sets the path to a python.exe or pythonw.exe binary used to run
child processes on Windows instead of sys.executable.
- Useful for people embedding Python.
+ Useful for people embedding Python.
'''
from multiprocessing.forking import set_executable
set_executable(executable)
diff --git a/Lib/multiprocessing/connection.py b/Lib/multiprocessing/connection.py index 752d9ab..f5a3301 100644 --- a/Lib/multiprocessing/connection.py +++ b/Lib/multiprocessing/connection.py @@ -50,7 +50,7 @@ def arbitrary_address(family): '''
if family == 'AF_INET':
return ('localhost', 0)
- elif family == 'AF_UNIX':
+ elif family == 'AF_UNIX':
return tempfile.mktemp(prefix='listener-', dir=get_temp_dir())
elif family == 'AF_PIPE':
return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
@@ -160,7 +160,7 @@ if sys.platform != 'win32': c2 = _multiprocessing.Connection(fd2, readable=False)
return c1, c2
-
+
else:
from ._multiprocessing import win32
@@ -200,7 +200,7 @@ else: c1 = _multiprocessing.PipeConnection(h1, writable=duplex)
c2 = _multiprocessing.PipeConnection(h2, readable=duplex)
-
+
return c1, c2
#
@@ -290,14 +290,14 @@ if sys.platform == 'win32': )
self._handle_queue = [handle]
self._last_accepted = None
-
+
sub_debug('listener created with address=%r', self._address)
self.close = Finalize(
self, PipeListener._finalize_pipe_listener,
args=(self._handle_queue, self._address), exitpriority=0
)
-
+
def accept(self):
newhandle = win32.CreateNamedPipe(
self._address, win32.PIPE_ACCESS_DUPLEX,
@@ -320,7 +320,7 @@ if sys.platform == 'win32': sub_debug('closing listener with address=%r', address)
for handle in queue:
close(handle)
-
+
def PipeClient(address):
'''
Return a connection object connected to the pipe given by `address`
@@ -397,7 +397,7 @@ class ConnectionWrapper(object): self._loads = loads
for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'):
obj = getattr(conn, attr)
- setattr(self, attr, obj)
+ setattr(self, attr, obj)
def send(self, obj):
s = self._dumps(obj)
self._conn.send_bytes(s)
diff --git a/Lib/multiprocessing/dummy/__init__.py b/Lib/multiprocessing/dummy/__init__.py index cabf580..dd0f07b 100644 --- a/Lib/multiprocessing/dummy/__init__.py +++ b/Lib/multiprocessing/dummy/__init__.py @@ -1,143 +1,143 @@ -#
-# Support for the API of the multiprocessing package using threads
-#
-# multiprocessing/dummy/__init__.py
-#
-# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
-#
-
-__all__ = [
- 'Process', 'current_process', 'active_children', 'freeze_support',
- 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
- 'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
- ]
-
-#
-# Imports
-#
-
-import threading
-import sys
-import weakref
-import array
-import itertools
-
-from multiprocessing import TimeoutError, cpu_count
-from multiprocessing.dummy.connection import Pipe
-from threading import Lock, RLock, Semaphore, BoundedSemaphore
-from threading import Event
-from Queue import Queue
-
-#
-#
-#
-
-class DummyProcess(threading.Thread):
-
- def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
- threading.Thread.__init__(self, group, target, name, args, kwargs)
- self._pid = None
- self._children = weakref.WeakKeyDictionary()
- self._start_called = False
- self._parent = current_process()
-
- def start(self):
- assert self._parent is current_process()
- self._start_called = True
- self._parent._children[self] = None
- threading.Thread.start(self)
-
- def get_exitcode(self):
- if self._start_called and not self.is_alive():
- return 0
- else:
- return None
-
- # XXX
- if sys.version_info < (3, 0):
- is_alive = threading.Thread.is_alive.im_func
- get_name = threading.Thread.get_name.im_func
- set_name = threading.Thread.set_name.im_func
- is_daemon = threading.Thread.is_daemon.im_func
- set_daemon = threading.Thread.set_daemon.im_func
- else:
- is_alive = threading.Thread.is_alive
- get_name = threading.Thread.get_name
- set_name = threading.Thread.set_name
- is_daemon = threading.Thread.is_daemon
- set_daemon = threading.Thread.set_daemon
-
-#
-#
-#
-
-class Condition(threading._Condition):
- # XXX
- if sys.version_info < (3, 0):
- notify_all = threading._Condition.notify_all.im_func
- else:
- notify_all = threading._Condition.notify_all
-
-#
-#
-#
-
-Process = DummyProcess
-current_process = threading.current_thread
-current_process()._children = weakref.WeakKeyDictionary()
-
-def active_children():
- children = current_process()._children
- for p in list(children):
- if not p.is_alive():
- children.pop(p, None)
- return list(children)
-
-def freeze_support():
- pass
-
-#
-#
-#
-
-class Namespace(object):
- def __init__(self, **kwds):
- self.__dict__.update(kwds)
- def __repr__(self):
- items = self.__dict__.items()
- temp = []
- for name, value in items:
- if not name.startswith('_'):
- temp.append('%s=%r' % (name, value))
- temp.sort()
- return 'Namespace(%s)' % str.join(', ', temp)
-
-dict = dict
-list = list
-
-def Array(typecode, sequence, lock=True):
- return array.array(typecode, sequence)
-
-class Value(object):
- def __init__(self, typecode, value, lock=True):
- self._typecode = typecode
- self._value = value
- def _get(self):
- return self._value
- def _set(self, value):
- self._value = value
- value = property(_get, _set)
- def __repr__(self):
- return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
-
-def Manager():
- return sys.modules[__name__]
-
-def shutdown():
- pass
-
-def Pool(processes=None, initializer=None, initargs=()):
- from multiprocessing.pool import ThreadPool
- return ThreadPool(processes, initializer, initargs)
-
-JoinableQueue = Queue
+# +# Support for the API of the multiprocessing package using threads +# +# multiprocessing/dummy/__init__.py +# +# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt +# + +__all__ = [ + 'Process', 'current_process', 'active_children', 'freeze_support', + 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', + 'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' + ] + +# +# Imports +# + +import threading +import sys +import weakref +import array +import itertools + +from multiprocessing import TimeoutError, cpu_count +from multiprocessing.dummy.connection import Pipe +from threading import Lock, RLock, Semaphore, BoundedSemaphore +from threading import Event +from Queue import Queue + +# +# +# + +class DummyProcess(threading.Thread): + + def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): + threading.Thread.__init__(self, group, target, name, args, kwargs) + self._pid = None + self._children = weakref.WeakKeyDictionary() + self._start_called = False + self._parent = current_process() + + def start(self): + assert self._parent is current_process() + self._start_called = True + self._parent._children[self] = None + threading.Thread.start(self) + + def get_exitcode(self): + if self._start_called and not self.is_alive(): + return 0 + else: + return None + + # XXX + if sys.version_info < (3, 0): + is_alive = threading.Thread.is_alive.im_func + get_name = threading.Thread.get_name.im_func + set_name = threading.Thread.set_name.im_func + is_daemon = threading.Thread.is_daemon.im_func + set_daemon = threading.Thread.set_daemon.im_func + else: + is_alive = threading.Thread.is_alive + get_name = threading.Thread.get_name + set_name = threading.Thread.set_name + is_daemon = threading.Thread.is_daemon + set_daemon = threading.Thread.set_daemon + +# +# +# + +class Condition(threading._Condition): + # XXX + if sys.version_info < (3, 0): + notify_all = threading._Condition.notify_all.im_func + else: + notify_all = threading._Condition.notify_all + +# +# +# + +Process = DummyProcess +current_process = threading.current_thread +current_process()._children = weakref.WeakKeyDictionary() + +def active_children(): + children = current_process()._children + for p in list(children): + if not p.is_alive(): + children.pop(p, None) + return list(children) + +def freeze_support(): + pass + +# +# +# + +class Namespace(object): + def __init__(self, **kwds): + self.__dict__.update(kwds) + def __repr__(self): + items = self.__dict__.items() + temp = [] + for name, value in items: + if not name.startswith('_'): + temp.append('%s=%r' % (name, value)) + temp.sort() + return 'Namespace(%s)' % str.join(', ', temp) + +dict = dict +list = list + +def Array(typecode, sequence, lock=True): + return array.array(typecode, sequence) + +class Value(object): + def __init__(self, typecode, value, lock=True): + self._typecode = typecode + self._value = value + def _get(self): + return self._value + def _set(self, value): + self._value = value + value = property(_get, _set) + def __repr__(self): + return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value) + +def Manager(): + return sys.modules[__name__] + +def shutdown(): + pass + +def Pool(processes=None, initializer=None, initargs=()): + from multiprocessing.pool import ThreadPool + return ThreadPool(processes, initializer, initargs) + +JoinableQueue = Queue diff --git a/Lib/multiprocessing/dummy/connection.py b/Lib/multiprocessing/dummy/connection.py index dd2bcb9..4f0a680 100644 --- a/Lib/multiprocessing/dummy/connection.py +++ b/Lib/multiprocessing/dummy/connection.py @@ -1,61 +1,61 @@ -#
-# Analogue of `multiprocessing.connection` which uses queues instead of sockets
-#
-# multiprocessing/dummy/connection.py
-#
-# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
-#
-
-__all__ = [ 'Client', 'Listener', 'Pipe' ]
-
-from Queue import Queue
-
-
-families = [None]
-
-
-class Listener(object):
-
- def __init__(self, address=None, family=None, backlog=1):
- self._backlog_queue = Queue(backlog)
-
- def accept(self):
- return Connection(*self._backlog_queue.get())
-
- def close(self):
- self._backlog_queue = None
-
- address = property(lambda self: self._backlog_queue)
-
-
-def Client(address):
- _in, _out = Queue(), Queue()
- address.put((_out, _in))
- return Connection(_in, _out)
-
-
-def Pipe(duplex=True):
- a, b = Queue(), Queue()
- return Connection(a, b), Connection(b, a)
-
-
-class Connection(object):
-
- def __init__(self, _in, _out):
- self._out = _out
- self._in = _in
- self.send = self.send_bytes = _out.put
- self.recv = self.recv_bytes = _in.get
-
- def poll(self, timeout=0.0):
- if self._in.qsize() > 0:
- return True
- if timeout <= 0.0:
- return False
- self._in.not_empty.acquire()
- self._in.not_empty.wait(timeout)
- self._in.not_empty.release()
- return self._in.qsize() > 0
-
- def close(self):
- pass
+# +# Analogue of `multiprocessing.connection` which uses queues instead of sockets +# +# multiprocessing/dummy/connection.py +# +# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt +# + +__all__ = [ 'Client', 'Listener', 'Pipe' ] + +from Queue import Queue + + +families = [None] + + +class Listener(object): + + def __init__(self, address=None, family=None, backlog=1): + self._backlog_queue = Queue(backlog) + + def accept(self): + return Connection(*self._backlog_queue.get()) + + def close(self): + self._backlog_queue = None + + address = property(lambda self: self._backlog_queue) + + +def Client(address): + _in, _out = Queue(), Queue() + address.put((_out, _in)) + return Connection(_in, _out) + + +def Pipe(duplex=True): + a, b = Queue(), Queue() + return Connection(a, b), Connection(b, a) + + +class Connection(object): + + def __init__(self, _in, _out): + self._out = _out + self._in = _in + self.send = self.send_bytes = _out.put + self.recv = self.recv_bytes = _in.get + + def poll(self, timeout=0.0): + if self._in.qsize() > 0: + return True + if timeout <= 0.0: + return False + self._in.not_empty.acquire() + self._in.not_empty.wait(timeout) + self._in.not_empty.release() + return self._in.qsize() > 0 + + def close(self): + pass diff --git a/Lib/multiprocessing/forking.py b/Lib/multiprocessing/forking.py index 2c1d3cf..6107f07 100644 --- a/Lib/multiprocessing/forking.py +++ b/Lib/multiprocessing/forking.py @@ -92,7 +92,7 @@ if sys.platform != 'win32': except OSError, e:
if self.wait(timeout=0.1) is None:
raise
-
+
@staticmethod
def thread_is_spawning():
return False
@@ -107,10 +107,10 @@ else: import _subprocess
import copy_reg
import time
-
+
from ._multiprocessing import win32, Connection, PipeConnection
from .util import Finalize
-
+
try:
from cPickle import dump, load, HIGHEST_PROTOCOL
except ImportError:
@@ -217,7 +217,7 @@ else: if code == TERMINATE:
code = -signal.SIGTERM
self.returncode = code
-
+
return self.returncode
def poll(self):
@@ -230,7 +230,7 @@ else: except WindowsError:
if self.wait(timeout=0.1) is None:
raise
-
+
#
#
#
@@ -308,7 +308,7 @@ else: Return info about parent needed by child to unpickle process object
'''
from .util import _logger, _log_to_stderr
-
+
d = dict(
name=name,
sys_path=sys.path,
@@ -317,7 +317,7 @@ else: orig_dir=process.ORIGINAL_DIR,
authkey=process.current_process().get_authkey(),
)
-
+
if _logger is not None:
d['log_level'] = _logger.getEffectiveLevel()
@@ -336,7 +336,7 @@ else: #
# Make (Pipe)Connection picklable
#
-
+
def reduce_connection(conn):
if not Popen.thread_is_spawning():
raise RuntimeError(
@@ -345,7 +345,7 @@ else: )
return type(conn), (Popen.duplicate_for_child(conn.fileno()),
conn.readable, conn.writable)
-
+
copy_reg.pickle(Connection, reduce_connection)
copy_reg.pickle(PipeConnection, reduce_connection)
@@ -367,7 +367,7 @@ def prepare(data): if 'authkey' in data:
process.current_process()._authkey = data['authkey']
-
+
if 'log_to_stderr' in data and data['log_to_stderr']:
util.log_to_stderr()
diff --git a/Lib/multiprocessing/heap.py b/Lib/multiprocessing/heap.py index 7e596ca..f6b3404 100644 --- a/Lib/multiprocessing/heap.py +++ b/Lib/multiprocessing/heap.py @@ -1,201 +1,201 @@ -# -# Module which supports allocation of memory from an mmap -# -# multiprocessing/heap.py -# -# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt -# - -import bisect -import mmap -import tempfile -import os -import sys -import threading -import itertools - -import _multiprocessing -from multiprocessing.util import Finalize, info -from multiprocessing.forking import assert_spawning - -__all__ = ['BufferWrapper'] - -# -# Inheirtable class which wraps an mmap, and from which blocks can be allocated -# - -if sys.platform == 'win32': - - from ._multiprocessing import win32 - - class Arena(object): - - _counter = itertools.count() - - def __init__(self, size): - self.size = size - self.name = 'pym-%d-%d' % (os.getpid(), Arena._counter.next()) - self.buffer = mmap.mmap(-1, self.size, tagname=self.name) - assert win32.GetLastError() == 0, 'tagname already in use' - self._state = (self.size, self.name) - - def __getstate__(self): - assert_spawning(self) - return self._state - - def __setstate__(self, state): - self.size, self.name = self._state = state - self.buffer = mmap.mmap(-1, self.size, tagname=self.name) - assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS - -else: - - class Arena(object): - - def __init__(self, size): - self.buffer = mmap.mmap(-1, size) - self.size = size - self.name = None - -# -# Class allowing allocation of chunks of memory from arenas -# - -class Heap(object): - - _alignment = 8 - - def __init__(self, size=mmap.PAGESIZE): - self._lastpid = os.getpid() - self._lock = threading.Lock() - self._size = size - self._lengths = [] - self._len_to_seq = {} - self._start_to_block = {} - self._stop_to_block = {} - self._allocated_blocks = set() - self._arenas = [] - - @staticmethod - def _roundup(n, alignment): - # alignment must be a power of 2 - mask = alignment - 1 - return (n + mask) & ~mask - - def _malloc(self, size): - # returns a large enough block -- it might be much larger - i = bisect.bisect_left(self._lengths, size) - if i == len(self._lengths): - length = self._roundup(max(self._size, size), mmap.PAGESIZE) - self._size *= 2 - info('allocating a new mmap of length %d', length) - arena = Arena(length) - self._arenas.append(arena) - return (arena, 0, length) - else: - length = self._lengths[i] - seq = self._len_to_seq[length] - block = seq.pop() - if not seq: - del self._len_to_seq[length], self._lengths[i] - - (arena, start, stop) = block - del self._start_to_block[(arena, start)] - del self._stop_to_block[(arena, stop)] - return block - - def _free(self, block): - # free location and try to merge with neighbours - (arena, start, stop) = block - - try: - prev_block = self._stop_to_block[(arena, start)] - except KeyError: - pass - else: - start, _ = self._absorb(prev_block) - - try: - next_block = self._start_to_block[(arena, stop)] - except KeyError: - pass - else: - _, stop = self._absorb(next_block) - - block = (arena, start, stop) - length = stop - start - - try: - self._len_to_seq[length].append(block) - except KeyError: - self._len_to_seq[length] = [block] - bisect.insort(self._lengths, length) - - self._start_to_block[(arena, start)] = block - self._stop_to_block[(arena, stop)] = block - - def _absorb(self, block): - # deregister this block so it can be merged with a neighbour - (arena, start, stop) = block - del self._start_to_block[(arena, start)] - del self._stop_to_block[(arena, stop)] - - length = stop - start - seq = self._len_to_seq[length] - seq.remove(block) - if not seq: - del self._len_to_seq[length] - self._lengths.remove(length) - - return start, stop - - def free(self, block): - # free a block returned by malloc() - assert os.getpid() == self._lastpid - self._lock.acquire() - try: - self._allocated_blocks.remove(block) - self._free(block) - finally: - self._lock.release() - - def malloc(self, size): - # return a block of right size (possibly rounded up) - assert 0 <= size < sys.maxint - if os.getpid() != self._lastpid: - self.__init__() # reinitialize after fork - self._lock.acquire() - try: - size = self._roundup(max(size,1), self._alignment) - (arena, start, stop) = self._malloc(size) - new_stop = start + size - if new_stop < stop: - self._free((arena, new_stop, stop)) - block = (arena, start, new_stop) - self._allocated_blocks.add(block) - return block - finally: - self._lock.release() - -# -# Class representing a chunk of an mmap -- can be inherited -# - -class BufferWrapper(object): - - _heap = Heap() - - def __init__(self, size): - assert 0 <= size < sys.maxint - block = BufferWrapper._heap.malloc(size) - self._state = (block, size) - Finalize(self, BufferWrapper._heap.free, args=(block,)) - - def get_address(self): - (arena, start, stop), size = self._state - address, length = _multiprocessing.address_of_buffer(arena.buffer) - assert size <= length - return address + start - - def get_size(self): - return self._state[1] +#
+# Module which supports allocation of memory from an mmap
+#
+# multiprocessing/heap.py
+#
+# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
+#
+
+import bisect
+import mmap
+import tempfile
+import os
+import sys
+import threading
+import itertools
+
+import _multiprocessing
+from multiprocessing.util import Finalize, info
+from multiprocessing.forking import assert_spawning
+
+__all__ = ['BufferWrapper']
+
+#
+# Inheirtable class which wraps an mmap, and from which blocks can be allocated
+#
+
+if sys.platform == 'win32':
+
+ from ._multiprocessing import win32
+
+ class Arena(object):
+
+ _counter = itertools.count()
+
+ def __init__(self, size):
+ self.size = size
+ self.name = 'pym-%d-%d' % (os.getpid(), Arena._counter.next())
+ self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
+ assert win32.GetLastError() == 0, 'tagname already in use'
+ self._state = (self.size, self.name)
+
+ def __getstate__(self):
+ assert_spawning(self)
+ return self._state
+
+ def __setstate__(self, state):
+ self.size, self.name = self._state = state
+ self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
+ assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS
+
+else:
+
+ class Arena(object):
+
+ def __init__(self, size):
+ self.buffer = mmap.mmap(-1, size)
+ self.size = size
+ self.name = None
+
+#
+# Class allowing allocation of chunks of memory from arenas
+#
+
+class Heap(object):
+
+ _alignment = 8
+
+ def __init__(self, size=mmap.PAGESIZE):
+ self._lastpid = os.getpid()
+ self._lock = threading.Lock()
+ self._size = size
+ self._lengths = []
+ self._len_to_seq = {}
+ self._start_to_block = {}
+ self._stop_to_block = {}
+ self._allocated_blocks = set()
+ self._arenas = []
+
+ @staticmethod
+ def _roundup(n, alignment):
+ # alignment must be a power of 2
+ mask = alignment - 1
+ return (n + mask) & ~mask
+
+ def _malloc(self, size):
+ # returns a large enough block -- it might be much larger
+ i = bisect.bisect_left(self._lengths, size)
+ if i == len(self._lengths):
+ length = self._roundup(max(self._size, size), mmap.PAGESIZE)
+ self._size *= 2
+ info('allocating a new mmap of length %d', length)
+ arena = Arena(length)
+ self._arenas.append(arena)
+ return (arena, 0, length)
+ else:
+ length = self._lengths[i]
+ seq = self._len_to_seq[length]
+ block = seq.pop()
+ if not seq:
+ del self._len_to_seq[length], self._lengths[i]
+
+ (arena, start, stop) = block
+ del self._start_to_block[(arena, start)]
+ del self._stop_to_block[(arena, stop)]
+ return block
+
+ def _free(self, block):
+ # free location and try to merge with neighbours
+ (arena, start, stop) = block
+
+ try:
+ prev_block = self._stop_to_block[(arena, start)]
+ except KeyError:
+ pass
+ else:
+ start, _ = self._absorb(prev_block)
+
+ try:
+ next_block = self._start_to_block[(arena, stop)]
+ except KeyError:
+ pass
+ else:
+ _, stop = self._absorb(next_block)
+
+ block = (arena, start, stop)
+ length = stop - start
+
+ try:
+ self._len_to_seq[length].append(block)
+ except KeyError:
+ self._len_to_seq[length] = [block]
+ bisect.insort(self._lengths, length)
+
+ self._start_to_block[(arena, start)] = block
+ self._stop_to_block[(arena, stop)] = block
+
+ def _absorb(self, block):
+ # deregister this block so it can be merged with a neighbour
+ (arena, start, stop) = block
+ del self._start_to_block[(arena, start)]
+ del self._stop_to_block[(arena, stop)]
+
+ length = stop - start
+ seq = self._len_to_seq[length]
+ seq.remove(block)
+ if not seq:
+ del self._len_to_seq[length]
+ self._lengths.remove(length)
+
+ return start, stop
+
+ def free(self, block):
+ # free a block returned by malloc()
+ assert os.getpid() == self._lastpid
+ self._lock.acquire()
+ try:
+ self._allocated_blocks.remove(block)
+ self._free(block)
+ finally:
+ self._lock.release()
+
+ def malloc(self, size):
+ # return a block of right size (possibly rounded up)
+ assert 0 <= size < sys.maxint
+ if os.getpid() != self._lastpid:
+ self.__init__() # reinitialize after fork
+ self._lock.acquire()
+ try:
+ size = self._roundup(max(size,1), self._alignment)
+ (arena, start, stop) = self._malloc(size)
+ new_stop = start + size
+ if new_stop < stop:
+ self._free((arena, new_stop, stop))
+ block = (arena, start, new_stop)
+ self._allocated_blocks.add(block)
+ return block
+ finally:
+ self._lock.release()
+
+#
+# Class representing a chunk of an mmap -- can be inherited
+#
+
+class BufferWrapper(object):
+
+ _heap = Heap()
+
+ def __init__(self, size):
+ assert 0 <= size < sys.maxint
+ block = BufferWrapper._heap.malloc(size)
+ self._state = (block, size)
+ Finalize(self, BufferWrapper._heap.free, args=(block,))
+
+ def get_address(self):
+ (arena, start, stop), size = self._state
+ address, length = _multiprocessing.address_of_buffer(arena.buffer)
+ assert size <= length
+ return address + start
+
+ def get_size(self):
+ return self._state[1]
diff --git a/Lib/multiprocessing/managers.py b/Lib/multiprocessing/managers.py index 908c193..6c1d912 100644 --- a/Lib/multiprocessing/managers.py +++ b/Lib/multiprocessing/managers.py @@ -40,7 +40,7 @@ try: bytes
except NameError:
bytes = str # XXX not needed in Py2.6 and Py3.0
-
+
#
# Register some things for pickling
#
@@ -55,7 +55,7 @@ if view_types[0] is not list: # XXX only needed in Py3.0 return list, (list(obj),)
for view_type in view_types:
copy_reg.pickle(view_type, rebuild_as_list)
-
+
#
# Type for identifying shared objects
#
@@ -104,7 +104,7 @@ def convert_to_error(kind, result): return RemoteError('Unserializable message: %s\n' % result)
else:
return ValueError('Unrecognized message type')
-
+
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
@@ -340,7 +340,7 @@ class Server(object): util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
-
+
util._run_finalizers(0)
for p in active_children():
@@ -358,7 +358,7 @@ class Server(object): traceback.print_exc()
finally:
exit(0)
-
+
def create(self, c, typeid, *args, **kwds):
'''
Create a new shared object and return its id
@@ -367,7 +367,7 @@ class Server(object): try:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
-
+
if callable is None:
assert len(args) == 1 and not kwds
obj = args[0]
@@ -456,7 +456,7 @@ class BaseManager(object): '''
_registry = {}
_Server = Server
-
+
def __init__(self, address=None, authkey=None, serializer='pickle'):
if authkey is None:
authkey = current_process().get_authkey()
@@ -487,7 +487,7 @@ class BaseManager(object): conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
-
+
def start(self):
'''
Spawn a server process for this manager object
@@ -570,10 +570,10 @@ class BaseManager(object): Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
- try:
+ try:
return dispatch(conn, None, 'number_of_objects')
finally:
- conn.close()
+ conn.close()
def __enter__(self):
return self
@@ -612,7 +612,7 @@ class BaseManager(object): del BaseProxy._address_to_local[address]
except KeyError:
pass
-
+
address = property(lambda self: self._address)
@classmethod
@@ -640,7 +640,7 @@ class BaseManager(object): cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
-
+
if create_method:
def temp(self, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
@@ -709,9 +709,9 @@ class BaseProxy(object): if incref:
self._incref()
-
+
util.register_after_fork(self, BaseProxy._after_fork)
-
+
def _connect(self):
util.debug('making connection to manager')
name = current_process().get_name()
@@ -720,7 +720,7 @@ class BaseProxy(object): conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
-
+
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referrent and return a copy of the result
@@ -735,7 +735,7 @@ class BaseProxy(object): conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
-
+
if kind == '#RETURN':
return result
elif kind == '#PROXY':
@@ -793,7 +793,7 @@ class BaseProxy(object): threading.current_thread().get_name())
tls.connection.close()
del tls.connection
-
+
def _after_fork(self):
self._manager = None
try:
@@ -806,7 +806,7 @@ class BaseProxy(object): kwds = {}
if Popen.thread_is_spawning():
kwds['authkey'] = self._authkey
-
+
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
@@ -817,7 +817,7 @@ class BaseProxy(object): def __deepcopy__(self, memo):
return self._getvalue()
-
+
def __repr__(self):
return '<%s object, typeid %r at %s>' % \
(type(self).__name__, self._token.typeid, '0x%x' % id(self))
@@ -842,7 +842,7 @@ def RebuildProxy(func, token, serializer, kwds): If possible the shared object is returned, or otherwise a proxy for it.
'''
server = getattr(current_process(), '_manager_server', None)
-
+
if server and server.address == token.address:
return server.id_to_obj[token.id][0]
else:
@@ -884,7 +884,7 @@ def AutoProxy(token, serializer, manager=None, authkey=None, Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
-
+
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
@@ -995,7 +995,7 @@ class NamespaceProxy(BaseProxy): if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
- return callmethod('__getattribute__', (key,))
+ return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
@@ -1007,7 +1007,7 @@ class NamespaceProxy(BaseProxy): callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
-
+
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
@@ -1063,10 +1063,10 @@ PoolProxy._method_to_typeid_ = { class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
-
+
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
-
+
The `multiprocessing.Manager()` function creates started instances of
this class.
'''
diff --git a/Lib/multiprocessing/pool.py b/Lib/multiprocessing/pool.py index 0255c86..79f0a29 100644 --- a/Lib/multiprocessing/pool.py +++ b/Lib/multiprocessing/pool.py @@ -58,18 +58,18 @@ def worker(inqueue, outqueue, initializer=None, initargs=()): except (EOFError, IOError):
debug('worker got EOFError or IOError -- exiting')
break
-
+
if task is None:
debug('worker got sentinel -- exiting')
break
-
+
job, i, func, args, kwds = task
try:
result = (True, func(*args, **kwds))
except Exception, e:
result = (False, e)
put((job, i, result))
-
+
#
# Class representing a process pool
#
@@ -91,7 +91,7 @@ class Pool(object): processes = cpu_count()
except NotImplementedError:
processes = 1
-
+
self._pool = []
for i in range(processes):
w = self.Process(
@@ -102,7 +102,7 @@ class Pool(object): w.set_name(w.get_name().replace('Process', 'PoolWorker'))
w.set_daemon(True)
w.start()
-
+
self._task_handler = threading.Thread(
target=Pool._handle_tasks,
args=(self._taskqueue, self._quick_put, self._outqueue, self._pool)
@@ -132,7 +132,7 @@ class Pool(object): self._outqueue = SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
-
+
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `apply()` builtin
@@ -182,7 +182,7 @@ class Pool(object): self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
-
+
def apply_async(self, func, args=(), kwds={}, callback=None):
'''
Asynchronous equivalent of `apply()` builtin
@@ -199,12 +199,12 @@ class Pool(object): assert self._state == RUN
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
-
+
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
-
+
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
@@ -234,13 +234,13 @@ class Pool(object): break
else:
debug('task handler got sentinel')
-
+
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
outqueue.put(None)
-
+
# tell workers there is no more work
debug('task handler sending sentinel to workers')
for p in pool:
@@ -260,12 +260,12 @@ class Pool(object): except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
-
+
if thread._state:
assert thread._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
break
-
+
if task is None:
debug('result handler got sentinel')
break
@@ -321,7 +321,7 @@ class Pool(object): raise NotImplementedError(
'pool objects cannot be passed between processes or pickled'
)
-
+
def close(self):
debug('closing pool')
if self._state == RUN:
@@ -355,7 +355,7 @@ class Pool(object): task_handler, result_handler, cache):
# this is guaranteed to only be called once
debug('finalizing pool')
-
+
task_handler._state = TERMINATE
taskqueue.put(None) # sentinel
@@ -363,7 +363,7 @@ class Pool(object): cls._help_stuff_finish(inqueue, task_handler, len(pool))
assert result_handler.is_alive() or len(cache) == 0
-
+
result_handler._state = TERMINATE
outqueue.put(None) # sentinel
@@ -396,14 +396,14 @@ class ApplyResult(object): self._ready = False
self._callback = callback
cache[self._job] = self
-
+
def ready(self):
return self._ready
-
+
def successful(self):
assert self._ready
return self._success
-
+
def wait(self, timeout=None):
self._cond.acquire()
try:
@@ -438,7 +438,7 @@ class ApplyResult(object): #
class MapResult(ApplyResult):
-
+
def __init__(self, cache, chunksize, length, callback):
ApplyResult.__init__(self, cache, callback)
self._success = True
@@ -449,7 +449,7 @@ class MapResult(ApplyResult): self._ready = True
else:
self._number_left = length//chunksize + bool(length % chunksize)
-
+
def _set(self, i, success_result):
success, result = success_result
if success:
@@ -492,10 +492,10 @@ class IMapIterator(object): self._length = None
self._unsorted = {}
cache[self._job] = self
-
+
def __iter__(self):
return self
-
+
def next(self, timeout=None):
self._cond.acquire()
try:
@@ -520,7 +520,7 @@ class IMapIterator(object): raise value
__next__ = next # XXX
-
+
def _set(self, i, obj):
self._cond.acquire()
try:
@@ -534,12 +534,12 @@ class IMapIterator(object): self._cond.notify()
else:
self._unsorted[i] = obj
-
+
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
-
+
def _set_length(self, length):
self._cond.acquire()
try:
@@ -572,18 +572,18 @@ class IMapUnorderedIterator(IMapIterator): #
class ThreadPool(Pool):
-
+
from .dummy import Process
-
+
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
-
+
def _setup_queues(self):
self._inqueue = Queue.Queue()
self._outqueue = Queue.Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
-
+
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# put sentinels at head of inqueue to make workers finish
diff --git a/Lib/multiprocessing/process.py b/Lib/multiprocessing/process.py index 915d863..43d8297 100644 --- a/Lib/multiprocessing/process.py +++ b/Lib/multiprocessing/process.py @@ -47,7 +47,7 @@ def active_children(): '''
_cleanup()
return list(_current_process._children)
-
+
#
#
#
@@ -69,7 +69,7 @@ class Process(object): The class is analagous to `threading.Thread`
'''
_Popen = None
-
+
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
assert group is None, 'group argument must be None for now'
count = _current_process._counter.next()
@@ -91,7 +91,7 @@ class Process(object): '''
if self._target:
self._target(*self._args, **self._kwargs)
-
+
def start(self):
'''
Start child process
@@ -114,7 +114,7 @@ class Process(object): Terminate process; sends SIGTERM signal or uses TerminateProcess()
'''
self._popen.terminate()
-
+
def join(self, timeout=None):
'''
Wait until child process terminates
@@ -217,11 +217,11 @@ class Process(object): status, self._daemonic and ' daemon' or '')
##
-
+
def _bootstrap(self):
from . import util
global _current_process
-
+
try:
self._children = set()
self._counter = itertools.count(1)
diff --git a/Lib/multiprocessing/queues.py b/Lib/multiprocessing/queues.py index 78cb362..ea89090 100644 --- a/Lib/multiprocessing/queues.py +++ b/Lib/multiprocessing/queues.py @@ -41,9 +41,9 @@ class Queue(object): else:
self._wlock = Lock()
self._sem = BoundedSemaphore(maxsize)
-
+
self._after_fork()
-
+
if sys.platform != 'win32':
register_after_fork(self, Queue._after_fork)
@@ -51,12 +51,12 @@ class Queue(object): assert_spawning(self)
return (self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid)
-
+
def __setstate__(self, state):
(self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid) = state
self._after_fork()
-
+
def _after_fork(self):
debug('Queue._after_fork()')
self._notempty = threading.Condition(threading.Lock())
@@ -69,7 +69,7 @@ class Queue(object): self._send = self._writer.send
self._recv = self._reader.recv
self._poll = self._reader.poll
-
+
def put(self, obj, block=True, timeout=None):
assert not self._closed
if not self._sem.acquire(block, timeout):
@@ -93,7 +93,7 @@ class Queue(object): return res
finally:
self._rlock.release()
-
+
else:
if block:
deadline = time.time() + timeout
@@ -135,7 +135,7 @@ class Queue(object): assert self._closed
if self._jointhread:
self._jointhread()
-
+
def cancel_join_thread(self):
debug('Queue.cancel_join_thread()')
self._joincancelled = True
@@ -146,7 +146,7 @@ class Queue(object): def _start_thread(self):
debug('Queue._start_thread()')
-
+
# Start thread which transfers data from buffer to pipe
self._buffer.clear()
self._thread = threading.Thread(
@@ -174,14 +174,14 @@ class Queue(object): [weakref.ref(self._thread)],
exitpriority=-5
)
-
+
# Send sentinel to the thread queue object when garbage collected
self._close = Finalize(
self, Queue._finalize_close,
[self._buffer, self._notempty],
exitpriority=10
)
-
+
@staticmethod
def _finalize_join(twr):
debug('joining queue thread')
@@ -191,7 +191,7 @@ class Queue(object): debug('... queue thread joined')
else:
debug('... queue thread already dead')
-
+
@staticmethod
def _finalize_close(buffer, notempty):
debug('telling queue thread to quit')
@@ -206,7 +206,7 @@ class Queue(object): def _feed(buffer, notempty, send, writelock, close):
debug('starting thread to feed data to pipe')
from .util import is_exiting
-
+
nacquire = notempty.acquire
nrelease = notempty.release
nwait = notempty.wait
@@ -217,7 +217,7 @@ class Queue(object): wrelease = writelock.release
else:
wacquire = None
-
+
try:
while 1:
nacquire()
@@ -257,7 +257,7 @@ class Queue(object): traceback.print_exc()
except Exception:
pass
-
+
_sentinel = object()
#
@@ -274,7 +274,7 @@ class JoinableQueue(Queue): Queue.__init__(self, maxsize)
self._unfinished_tasks = Semaphore(0)
self._cond = Condition()
-
+
def __getstate__(self):
return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
@@ -285,7 +285,7 @@ class JoinableQueue(Queue): def put(self, item, block=True, timeout=None):
Queue.put(self, item, block, timeout)
self._unfinished_tasks.release()
-
+
def task_done(self):
self._cond.acquire()
try:
@@ -295,7 +295,7 @@ class JoinableQueue(Queue): self._cond.notify_all()
finally:
self._cond.release()
-
+
def join(self):
self._cond.acquire()
try:
diff --git a/Lib/multiprocessing/reduction.py b/Lib/multiprocessing/reduction.py index 0d6cf4f..17778ef 100644 --- a/Lib/multiprocessing/reduction.py +++ b/Lib/multiprocessing/reduction.py @@ -36,7 +36,7 @@ if not(sys.platform == 'win32' or hasattr(_multiprocessing, 'recvfd')): if sys.platform == 'win32':
import _subprocess
from ._multiprocessing import win32
-
+
def send_handle(conn, handle, destination_pid):
process_handle = win32.OpenProcess(
win32.PROCESS_ALL_ACCESS, False, destination_pid
@@ -46,14 +46,14 @@ if sys.platform == 'win32': conn.send(new_handle)
finally:
close(process_handle)
-
+
def recv_handle(conn):
return conn.recv()
else:
def send_handle(conn, handle, destination_pid):
_multiprocessing.sendfd(conn.fileno(), handle)
-
+
def recv_handle(conn):
return _multiprocessing.recvfd(conn.fileno())
@@ -93,7 +93,7 @@ def _get_listener(): def _serve():
from .util import is_exiting, sub_warning
-
+
while 1:
try:
conn = _listener.accept()
@@ -109,7 +109,7 @@ def _serve(): 'thread for sharing handles raised exception :\n' +
'-'*79 + '\n' + traceback.format_exc() + '-'*79
)
-
+
#
# Functions to be used for pickling/unpickling objects with handles
#
@@ -176,15 +176,15 @@ copy_reg.pickle(socket.socket, reduce_socket) #
if sys.platform == 'win32':
-
+
def reduce_pipe_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_pipe_connection, (rh, conn.readable, conn.writable)
-
+
def rebuild_pipe_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _multiprocessing.PipeConnection(
handle, readable=readable, writable=writable
)
-
+
copy_reg.pickle(_multiprocessing.PipeConnection, reduce_pipe_connection)
diff --git a/Lib/multiprocessing/sharedctypes.py b/Lib/multiprocessing/sharedctypes.py index 6877249..808d312 100644 --- a/Lib/multiprocessing/sharedctypes.py +++ b/Lib/multiprocessing/sharedctypes.py @@ -92,10 +92,10 @@ def copy(obj): new_obj = _new_value(type(obj))
ctypes.pointer(new_obj)[0] = obj
return new_obj
-
+
def synchronized(obj, lock=None):
assert not isinstance(obj, SynchronizedBase), 'object already synchronized'
-
+
if isinstance(obj, ctypes._SimpleCData):
return Synchronized(obj, lock)
elif isinstance(obj, ctypes.Array):
@@ -123,7 +123,7 @@ def reduce_ctype(obj): return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_)
else:
return rebuild_ctype, (type(obj), obj._wrapper, None)
-
+
def rebuild_ctype(type_, wrapper, length):
if length is not None:
type_ = type_ * length
@@ -170,7 +170,7 @@ class_cache = weakref.WeakKeyDictionary() #
class SynchronizedBase(object):
-
+
def __init__(self, obj, lock=None):
self._obj = obj
self._lock = lock or RLock()
@@ -180,55 +180,55 @@ class SynchronizedBase(object): def __reduce__(self):
assert_spawning(self)
return synchronized, (self._obj, self._lock)
-
+
def get_obj(self):
return self._obj
-
+
def get_lock(self):
return self._lock
-
+
def __repr__(self):
return '<%s wrapper for %s>' % (type(self).__name__, self._obj)
-
-
+
+
class Synchronized(SynchronizedBase):
value = make_property('value')
-
-
+
+
class SynchronizedArray(SynchronizedBase):
-
+
def __len__(self):
return len(self._obj)
-
+
def __getitem__(self, i):
self.acquire()
try:
return self._obj[i]
finally:
self.release()
-
+
def __setitem__(self, i, value):
self.acquire()
try:
self._obj[i] = value
finally:
self.release()
-
+
def __getslice__(self, start, stop):
self.acquire()
try:
return self._obj[start:stop]
finally:
self.release()
-
+
def __setslice__(self, start, stop, values):
self.acquire()
try:
self._obj[start:stop] = values
finally:
self.release()
-
-
+
+
class SynchronizedString(SynchronizedArray):
value = make_property('value')
raw = make_property('raw')
diff --git a/Lib/multiprocessing/synchronize.py b/Lib/multiprocessing/synchronize.py index d642032..6a7189a 100644 --- a/Lib/multiprocessing/synchronize.py +++ b/Lib/multiprocessing/synchronize.py @@ -38,7 +38,7 @@ class SemLock(object): sl = self._semlock = _multiprocessing.SemLock(kind, value, maxvalue)
debug('created semlock with handle %s' % sl.handle)
self._make_methods()
-
+
if sys.platform != 'win32':
def _after_fork(obj):
obj._semlock._after_fork()
@@ -129,7 +129,7 @@ class RLock(SemLock): def __init__(self):
SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1)
-
+
def __repr__(self):
try:
if self._semlock._is_mine():
@@ -210,17 +210,17 @@ class Condition(object): def notify(self):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)
-
+
# to take account of timeouts since last notify() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
-
+
if self._sleeping_count.acquire(False): # try grabbing a sleeper
self._wait_semaphore.release() # wake up one sleeper
self._woken_count.acquire() # wait for the sleeper to wake
-
+
# rezero _wait_semaphore in case a timeout just happened
self._wait_semaphore.acquire(False)
@@ -233,7 +233,7 @@ class Condition(object): while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
-
+
sleepers = 0
while self._sleeping_count.acquire(False):
self._wait_semaphore.release() # wake up one sleeper
@@ -266,7 +266,7 @@ class Event(object): return False
finally:
self._cond.release()
-
+
def set(self):
self._cond.acquire()
try:
diff --git a/Lib/multiprocessing/util.py b/Lib/multiprocessing/util.py index d1b190c..25ff8bd 100644 --- a/Lib/multiprocessing/util.py +++ b/Lib/multiprocessing/util.py @@ -83,7 +83,7 @@ def _check_logger_class(): import logging
if hasattr(logging, 'multiprocessing'):
return
-
+
logging._acquireLock()
try:
OldLoggerClass = logging.getLoggerClass()
|