summaryrefslogtreecommitdiffstats
path: root/Lib/multiprocessing/dummy
diff options
context:
space:
mode:
authorBenjamin Peterson <benjamin@python.org>2008-06-11 16:44:04 (GMT)
committerBenjamin Peterson <benjamin@python.org>2008-06-11 16:44:04 (GMT)
commite711cafab13efc9c1fe6c5cd75826401445eb585 (patch)
tree091a6334fdf6ccdcb93027302c5e038570ca04a4 /Lib/multiprocessing/dummy
parenteec3d7137929611b98dd593cd2f122cd91b723b2 (diff)
downloadcpython-e711cafab13efc9c1fe6c5cd75826401445eb585.zip
cpython-e711cafab13efc9c1fe6c5cd75826401445eb585.tar.gz
cpython-e711cafab13efc9c1fe6c5cd75826401445eb585.tar.bz2
Merged revisions 64104,64117 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/trunk ........ r64104 | benjamin.peterson | 2008-06-10 21:40:25 -0500 (Tue, 10 Jun 2008) | 2 lines add the multiprocessing package to fulfill PEP 371 ........ r64117 | benjamin.peterson | 2008-06-11 07:26:31 -0500 (Wed, 11 Jun 2008) | 2 lines fix import of multiprocessing by juggling imports ........
Diffstat (limited to 'Lib/multiprocessing/dummy')
-rw-r--r--Lib/multiprocessing/dummy/__init__.py143
-rw-r--r--Lib/multiprocessing/dummy/connection.py61
2 files changed, 204 insertions, 0 deletions
diff --git a/Lib/multiprocessing/dummy/__init__.py b/Lib/multiprocessing/dummy/__init__.py
new file mode 100644
index 0000000..841d831
--- /dev/null
+++ b/Lib/multiprocessing/dummy/__init__.py
@@ -0,0 +1,143 @@
+#
+# Support for the API of the multiprocessing package using threads
+#
+# multiprocessing/dummy/__init__.py
+#
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
+#
+
+__all__ = [
+ 'Process', 'current_process', 'active_children', 'freeze_support',
+ 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
+ 'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
+ ]
+
+#
+# Imports
+#
+
+import threading
+import sys
+import weakref
+import array
+import itertools
+
+from multiprocessing import TimeoutError, cpu_count
+from multiprocessing.dummy.connection import Pipe
+from threading import Lock, RLock, Semaphore, BoundedSemaphore
+from threading import Event
+from queue import Queue
+
+#
+#
+#
+
+class DummyProcess(threading.Thread):
+
+ def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
+ threading.Thread.__init__(self, group, target, name, args, kwargs)
+ self._pid = None
+ self._children = weakref.WeakKeyDictionary()
+ self._start_called = False
+ self._parent = current_process()
+
+ def start(self):
+ assert self._parent is current_process()
+ self._start_called = True
+ self._parent._children[self] = None
+ threading.Thread.start(self)
+
+ def get_exitcode(self):
+ if self._start_called and not self.isAlive():
+ return 0
+ else:
+ return None
+
+ # XXX
+ if sys.version_info < (3, 0):
+ is_alive = threading.Thread.isAlive.__func__
+ get_name = threading.Thread.getName.__func__
+ set_name = threading.Thread.setName.__func__
+ is_daemon = threading.Thread.isDaemon.__func__
+ set_daemon = threading.Thread.setDaemon.__func__
+ else:
+ is_alive = threading.Thread.isAlive
+ get_name = threading.Thread.getName
+ set_name = threading.Thread.setName
+ is_daemon = threading.Thread.isDaemon
+ set_daemon = threading.Thread.setDaemon
+
+#
+#
+#
+
+class Condition(threading._Condition):
+ # XXX
+ if sys.version_info < (3, 0):
+ notify_all = threading._Condition.notifyAll.__func__
+ else:
+ notify_all = threading._Condition.notifyAll
+
+#
+#
+#
+
+Process = DummyProcess
+current_process = threading.currentThread
+current_process()._children = weakref.WeakKeyDictionary()
+
+def active_children():
+ children = current_process()._children
+ for p in list(children):
+ if not p.isAlive():
+ children.pop(p, None)
+ return list(children)
+
+def freeze_support():
+ pass
+
+#
+#
+#
+
+class Namespace(object):
+ def __init__(self, **kwds):
+ self.__dict__.update(kwds)
+ def __repr__(self):
+ items = list(self.__dict__.items())
+ temp = []
+ for name, value in items:
+ if not name.startswith('_'):
+ temp.append('%s=%r' % (name, value))
+ temp.sort()
+ return 'Namespace(%s)' % str.join(', ', temp)
+
+dict = dict
+list = list
+
+def Array(typecode, sequence, lock=True):
+ return array.array(typecode, sequence)
+
+class Value(object):
+ def __init__(self, typecode, value, lock=True):
+ self._typecode = typecode
+ self._value = value
+ def _get(self):
+ return self._value
+ def _set(self, value):
+ self._value = value
+ value = property(_get, _set)
+ def __repr__(self):
+ return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
+
+def Manager():
+ return sys.modules[__name__]
+
+def shutdown():
+ pass
+
+def Pool(processes=None, initializer=None, initargs=()):
+ from multiprocessing.pool import ThreadPool
+ return ThreadPool(processes, initializer, initargs)
+
+JoinableQueue = Queue
diff --git a/Lib/multiprocessing/dummy/connection.py b/Lib/multiprocessing/dummy/connection.py
new file mode 100644
index 0000000..0b8a52e
--- /dev/null
+++ b/Lib/multiprocessing/dummy/connection.py
@@ -0,0 +1,61 @@
+#
+# Analogue of `multiprocessing.connection` which uses queues instead of sockets
+#
+# multiprocessing/dummy/connection.py
+#
+# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
+#
+
+__all__ = [ 'Client', 'Listener', 'Pipe' ]
+
+from queue import Queue
+
+
+families = [None]
+
+
+class Listener(object):
+
+ def __init__(self, address=None, family=None, backlog=1):
+ self._backlog_queue = Queue(backlog)
+
+ def accept(self):
+ return Connection(*self._backlog_queue.get())
+
+ def close(self):
+ self._backlog_queue = None
+
+ address = property(lambda self: self._backlog_queue)
+
+
+def Client(address):
+ _in, _out = Queue(), Queue()
+ address.put((_out, _in))
+ return Connection(_in, _out)
+
+
+def Pipe(duplex=True):
+ a, b = Queue(), Queue()
+ return Connection(a, b), Connection(b, a)
+
+
+class Connection(object):
+
+ def __init__(self, _in, _out):
+ self._out = _out
+ self._in = _in
+ self.send = self.send_bytes = _out.put
+ self.recv = self.recv_bytes = _in.get
+
+ def poll(self, timeout=0.0):
+ if self._in.qsize() > 0:
+ return True
+ if timeout <= 0.0:
+ return False
+ self._in.not_empty.acquire()
+ self._in.not_empty.wait(timeout)
+ self._in.not_empty.release()
+ return self._in.qsize() > 0
+
+ def close(self):
+ pass