summaryrefslogtreecommitdiffstats
path: root/Lib/multiprocessing/reduction.py
diff options
context:
space:
mode:
authorAntoine Pitrou <solipsis@pitrou.net>2012-04-24 20:56:57 (GMT)
committerAntoine Pitrou <solipsis@pitrou.net>2012-04-24 20:56:57 (GMT)
commit5438ed1572f5e379cac8b85ed2226101a1bfcacc (patch)
treebf6a9df1be40d5d3df6fbfcdee606d5a49fca273 /Lib/multiprocessing/reduction.py
parent9f478c021dc499b0d23ee418c0dcc6b5076524aa (diff)
downloadcpython-5438ed1572f5e379cac8b85ed2226101a1bfcacc.zip
cpython-5438ed1572f5e379cac8b85ed2226101a1bfcacc.tar.gz
cpython-5438ed1572f5e379cac8b85ed2226101a1bfcacc.tar.bz2
Issue #4892: multiprocessing Connections can now be transferred over multiprocessing Connections.
Patch by Richard Oudkerk (sbt).
Diffstat (limited to 'Lib/multiprocessing/reduction.py')
-rw-r--r--Lib/multiprocessing/reduction.py315
1 files changed, 178 insertions, 137 deletions
diff --git a/Lib/multiprocessing/reduction.py b/Lib/multiprocessing/reduction.py
index c80de59..ce38fe3 100644
--- a/Lib/multiprocessing/reduction.py
+++ b/Lib/multiprocessing/reduction.py
@@ -33,7 +33,7 @@
# SUCH DAMAGE.
#
-__all__ = []
+__all__ = ['reduce_socket', 'reduce_connection', 'send_handle', 'recv_handle']
import os
import sys
@@ -42,9 +42,8 @@ import threading
import struct
from multiprocessing import current_process
-from multiprocessing.forking import Popen, duplicate, close, ForkingPickler
from multiprocessing.util import register_after_fork, debug, sub_debug
-from multiprocessing.connection import Client, Listener, Connection
+from multiprocessing.util import is_exiting, sub_warning
#
@@ -60,22 +59,91 @@ if not(sys.platform == 'win32' or (hasattr(socket, 'CMSG_LEN') and
#
if sys.platform == 'win32':
+ # Windows
+ __all__ += ['reduce_pipe_connection']
import _winapi
def send_handle(conn, handle, destination_pid):
- process_handle = _winapi.OpenProcess(
- _winapi.PROCESS_ALL_ACCESS, False, destination_pid
- )
- try:
- new_handle = duplicate(handle, process_handle)
- conn.send(new_handle)
- finally:
- close(process_handle)
+ dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid)
+ conn.send(dh)
def recv_handle(conn):
- return conn.recv()
+ return conn.recv().detach()
+
+ class DupHandle(object):
+ def __init__(self, handle, access, pid=None):
+ # duplicate handle for process with given pid
+ if pid is None:
+ pid = os.getpid()
+ proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid)
+ try:
+ self._handle = _winapi.DuplicateHandle(
+ _winapi.GetCurrentProcess(),
+ handle, proc, access, False, 0)
+ finally:
+ _winapi.CloseHandle(proc)
+ self._access = access
+ self._pid = pid
+
+ def detach(self):
+ # retrieve handle from process which currently owns it
+ if self._pid == os.getpid():
+ return self._handle
+ proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False,
+ self._pid)
+ try:
+ return _winapi.DuplicateHandle(
+ proc, self._handle, _winapi.GetCurrentProcess(),
+ self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE)
+ finally:
+ _winapi.CloseHandle(proc)
+
+ class DupSocket(object):
+ def __init__(self, sock):
+ new_sock = sock.dup()
+ def send(conn, pid):
+ share = new_sock.share(pid)
+ conn.send_bytes(share)
+ self._id = resource_sharer.register(send, new_sock.close)
+
+ def detach(self):
+ conn = resource_sharer.get_connection(self._id)
+ try:
+ share = conn.recv_bytes()
+ return socket.fromshare(share)
+ finally:
+ conn.close()
+
+ def reduce_socket(s):
+ return rebuild_socket, (DupSocket(s),)
+
+ def rebuild_socket(ds):
+ return ds.detach()
+
+ def reduce_connection(conn):
+ handle = conn.fileno()
+ with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s:
+ ds = DupSocket(s)
+ return rebuild_connection, (ds, conn.readable, conn.writable)
+
+ def rebuild_connection(ds, readable, writable):
+ from .connection import Connection
+ sock = ds.detach()
+ return Connection(sock.detach(), readable, writable)
+
+ def reduce_pipe_connection(conn):
+ access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) |
+ (_winapi.FILE_GENERIC_WRITE if conn.writable else 0))
+ dh = DupHandle(conn.fileno(), access)
+ return rebuild_pipe_connection, (dh, conn.readable, conn.writable)
+
+ def rebuild_pipe_connection(dh, readable, writable):
+ from .connection import PipeConnection
+ handle = dh.detach()
+ return PipeConnection(handle, readable, writable)
else:
+ # Unix
def send_handle(conn, handle, destination_pid):
with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.sendmsg([b'x'], [(socket.SOL_SOCKET, socket.SCM_RIGHTS,
@@ -94,136 +162,109 @@ else:
pass
raise RuntimeError('Invalid data received')
+ class DupFd(object):
+ def __init__(self, fd):
+ new_fd = os.dup(fd)
+ def send(conn, pid):
+ send_handle(conn, new_fd, pid)
+ def close():
+ os.close(new_fd)
+ self._id = resource_sharer.register(send, close)
+
+ def detach(self):
+ conn = resource_sharer.get_connection(self._id)
+ try:
+ return recv_handle(conn)
+ finally:
+ conn.close()
-#
-# Support for a per-process server thread which caches pickled handles
-#
-
-_cache = set()
-
-def _reset(obj):
- global _lock, _listener, _cache
- for h in _cache:
- close(h)
- _cache.clear()
- _lock = threading.Lock()
- _listener = None
-
-_reset(None)
-register_after_fork(_reset, _reset)
-
-def _get_listener():
- global _listener
-
- if _listener is None:
- _lock.acquire()
- try:
- if _listener is None:
- debug('starting listener and thread for sending handles')
- _listener = Listener(authkey=current_process().authkey)
- t = threading.Thread(target=_serve)
- t.daemon = True
- t.start()
- finally:
- _lock.release()
-
- return _listener
-
-def _serve():
- from .util import is_exiting, sub_warning
-
- while 1:
- try:
- conn = _listener.accept()
- handle_wanted, destination_pid = conn.recv()
- _cache.remove(handle_wanted)
- send_handle(conn, handle_wanted, destination_pid)
- close(handle_wanted)
- conn.close()
- except:
- if not is_exiting():
- import traceback
- sub_warning(
- 'thread for sharing handles raised exception :\n' +
- '-'*79 + '\n' + traceback.format_exc() + '-'*79
- )
-
-#
-# Functions to be used for pickling/unpickling objects with handles
-#
-
-def reduce_handle(handle):
- if Popen.thread_is_spawning():
- return (None, Popen.duplicate_for_child(handle), True)
- dup_handle = duplicate(handle)
- _cache.add(dup_handle)
- sub_debug('reducing handle %d', handle)
- return (_get_listener().address, dup_handle, False)
-
-def rebuild_handle(pickled_data):
- address, handle, inherited = pickled_data
- if inherited:
- return handle
- sub_debug('rebuilding handle %d', handle)
- conn = Client(address, authkey=current_process().authkey)
- conn.send((handle, os.getpid()))
- new_handle = recv_handle(conn)
- conn.close()
- return new_handle
-
-#
-# Register `Connection` with `ForkingPickler`
-#
-
-def reduce_connection(conn):
- rh = reduce_handle(conn.fileno())
- return rebuild_connection, (rh, conn.readable, conn.writable)
-
-def rebuild_connection(reduced_handle, readable, writable):
- handle = rebuild_handle(reduced_handle)
- return Connection(
- handle, readable=readable, writable=writable
- )
-
-ForkingPickler.register(Connection, reduce_connection)
-
-#
-# Register `socket.socket` with `ForkingPickler`
-#
-
-def fromfd(fd, family, type_, proto=0):
- s = socket.fromfd(fd, family, type_, proto)
- if s.__class__ is not socket.socket:
- s = socket.socket(_sock=s)
- return s
+ def reduce_socket(s):
+ df = DupFd(s.fileno())
+ return rebuild_socket, (df, s.family, s.type, s.proto)
-def reduce_socket(s):
- reduced_handle = reduce_handle(s.fileno())
- return rebuild_socket, (reduced_handle, s.family, s.type, s.proto)
+ def rebuild_socket(df, family, type, proto):
+ fd = df.detach()
+ s = socket.fromfd(fd, family, type, proto)
+ os.close(fd)
+ return s
-def rebuild_socket(reduced_handle, family, type_, proto):
- fd = rebuild_handle(reduced_handle)
- _sock = fromfd(fd, family, type_, proto)
- close(fd)
- return _sock
+ def reduce_connection(conn):
+ df = DupFd(conn.fileno())
+ return rebuild_connection, (df, conn.readable, conn.writable)
-ForkingPickler.register(socket.socket, reduce_socket)
+ def rebuild_connection(df, readable, writable):
+ from .connection import Connection
+ fd = df.detach()
+ return Connection(fd, readable, writable)
#
-# Register `_multiprocessing.PipeConnection` with `ForkingPickler`
+# Server which shares registered resources with clients
#
-if sys.platform == 'win32':
- from multiprocessing.connection import PipeConnection
-
- def reduce_pipe_connection(conn):
- rh = reduce_handle(conn.fileno())
- return rebuild_pipe_connection, (rh, conn.readable, conn.writable)
-
- def rebuild_pipe_connection(reduced_handle, readable, writable):
- handle = rebuild_handle(reduced_handle)
- return PipeConnection(
- handle, readable=readable, writable=writable
- )
-
- ForkingPickler.register(PipeConnection, reduce_pipe_connection)
+class ResourceSharer(object):
+ def __init__(self):
+ self._key = 0
+ self._cache = {}
+ self._old_locks = []
+ self._lock = threading.Lock()
+ self._listener = None
+ self._address = None
+ register_after_fork(self, ResourceSharer._afterfork)
+
+ def register(self, send, close):
+ with self._lock:
+ if self._address is None:
+ self._start()
+ self._key += 1
+ self._cache[self._key] = (send, close)
+ return (self._address, self._key)
+
+ @staticmethod
+ def get_connection(ident):
+ from .connection import Client
+ address, key = ident
+ c = Client(address, authkey=current_process().authkey)
+ c.send((key, os.getpid()))
+ return c
+
+ def _afterfork(self):
+ for key, (send, close) in self._cache.items():
+ close()
+ self._cache.clear()
+ # If self._lock was locked at the time of the fork, it may be broken
+ # -- see issue 6721. Replace it without letting it be gc'ed.
+ self._old_locks.append(self._lock)
+ self._lock = threading.Lock()
+ if self._listener is not None:
+ self._listener.close()
+ self._listener = None
+ self._address = None
+
+ def _start(self):
+ from .connection import Listener
+ assert self._listener is None
+ debug('starting listener and thread for sending handles')
+ self._listener = Listener(authkey=current_process().authkey)
+ self._address = self._listener.address
+ t = threading.Thread(target=self._serve)
+ t.daemon = True
+ t.start()
+
+ def _serve(self):
+ while 1:
+ try:
+ conn = self._listener.accept()
+ key, destination_pid = conn.recv()
+ send, close = self._cache.pop(key)
+ send(conn, destination_pid)
+ close()
+ conn.close()
+ except:
+ if not is_exiting():
+ import traceback
+ sub_warning(
+ 'thread for sharing handles raised exception :\n' +
+ '-'*79 + '\n' + traceback.format_exc() + '-'*79
+ )
+
+resource_sharer = ResourceSharer()