summaryrefslogtreecommitdiffstats
path: root/Lib/multiprocessing
diff options
context:
space:
mode:
authorAntoine Pitrou <solipsis@pitrou.net>2011-04-10 22:26:42 (GMT)
committerAntoine Pitrou <solipsis@pitrou.net>2011-04-10 22:26:42 (GMT)
commit7dfc874a48516e58e830b3a455fca34c8cd834eb (patch)
tree4fe0af4be99d43bbe9ae2192ba64605c9ede5b39 /Lib/multiprocessing
parent04cb72f96884998beac97ef3a81dca3332e44917 (diff)
downloadcpython-7dfc874a48516e58e830b3a455fca34c8cd834eb.zip
cpython-7dfc874a48516e58e830b3a455fca34c8cd834eb.tar.gz
cpython-7dfc874a48516e58e830b3a455fca34c8cd834eb.tar.bz2
Issue #8428: Fix a race condition in multiprocessing.Pool when terminating
worker processes: new processes would be spawned while the pool is being shut down. Patch by Charles-François Natali.
Diffstat (limited to 'Lib/multiprocessing')
-rw-r--r--Lib/multiprocessing/pool.py9
1 files changed, 7 insertions, 2 deletions
diff --git a/Lib/multiprocessing/pool.py b/Lib/multiprocessing/pool.py
index 1f366f0..862a60e 100644
--- a/Lib/multiprocessing/pool.py
+++ b/Lib/multiprocessing/pool.py
@@ -295,6 +295,8 @@ class Pool(object):
while pool._worker_handler._state == RUN and pool._state == RUN:
pool._maintain_pool()
time.sleep(0.1)
+ # send sentinel to stop workers
+ pool._taskqueue.put(None)
debug('worker handler exiting')
@staticmethod
@@ -413,7 +415,6 @@ class Pool(object):
if self._state == RUN:
self._state = CLOSE
self._worker_handler._state = CLOSE
- self._taskqueue.put(None)
def terminate(self):
debug('terminating pool')
@@ -447,7 +448,6 @@ class Pool(object):
worker_handler._state = TERMINATE
task_handler._state = TERMINATE
- taskqueue.put(None) # sentinel
debug('helping task handler/workers to finish')
cls._help_stuff_finish(inqueue, task_handler, len(pool))
@@ -457,6 +457,11 @@ class Pool(object):
result_handler._state = TERMINATE
outqueue.put(None) # sentinel
+ # We must wait for the worker handler to exit before terminating
+ # workers because we don't want workers to be restarted behind our back.
+ debug('joining worker handler')
+ worker_handler.join()
+
# Terminate workers which haven't already finished.
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')