summaryrefslogtreecommitdiffstats
path: root/Lib
diff options
context:
space:
mode:
authorCharles-François Natali <neologix@free.fr>2011-07-02 12:43:11 (GMT)
committerCharles-François Natali <neologix@free.fr>2011-07-02 12:43:11 (GMT)
commit723585bbaf5a4c83d187c7868790cac5ac1991e9 (patch)
tree9084f9388ca9fb68b6a29b3637a53ee4835f1e3a /Lib
parent7ae4448ed235e37726d4082b75765fd6b4d0535a (diff)
parenta4a04069fd93395369b6061ed07f471e53f1c7a1 (diff)
downloadcpython-723585bbaf5a4c83d187c7868790cac5ac1991e9.zip
cpython-723585bbaf5a4c83d187c7868790cac5ac1991e9.tar.gz
cpython-723585bbaf5a4c83d187c7868790cac5ac1991e9.tar.bz2
Merge issue #12352: Fix a deadlock in multiprocessing.Heap when a block is
freed by the garbage collector while the Heap lock is held.
Diffstat (limited to 'Lib')
-rw-r--r--Lib/multiprocessing/heap.py39
-rw-r--r--Lib/test/test_multiprocessing.py24
2 files changed, 57 insertions, 6 deletions
diff --git a/Lib/multiprocessing/heap.py b/Lib/multiprocessing/heap.py
index 1834d0a..0a25ef0 100644
--- a/Lib/multiprocessing/heap.py
+++ b/Lib/multiprocessing/heap.py
@@ -101,6 +101,8 @@ class Heap(object):
self._stop_to_block = {}
self._allocated_blocks = set()
self._arenas = []
+ # list of pending blocks to free - see free() comment below
+ self._pending_free_blocks = []
@staticmethod
def _roundup(n, alignment):
@@ -175,15 +177,39 @@ class Heap(object):
return start, stop
+ def _free_pending_blocks(self):
+ # Free all the blocks in the pending list - called with the lock held.
+ while True:
+ try:
+ block = self._pending_free_blocks.pop()
+ except IndexError:
+ break
+ self._allocated_blocks.remove(block)
+ self._free(block)
+
def free(self, block):
# free a block returned by malloc()
+ # Since free() can be called asynchronously by the GC, it could happen
+ # that it's called while self._lock is held: in that case,
+ # self._lock.acquire() would deadlock (issue #12352). To avoid that, a
+ # trylock is used instead, and if the lock can't be acquired
+ # immediately, the block is added to a list of blocks to be freed
+ # synchronously sometimes later from malloc() or free(), by calling
+ # _free_pending_blocks() (appending and retrieving from a list is not
+ # strictly thread-safe but under cPython it's atomic thanks to the GIL).
assert os.getpid() == self._lastpid
- self._lock.acquire()
- try:
- self._allocated_blocks.remove(block)
- self._free(block)
- finally:
- self._lock.release()
+ if not self._lock.acquire(False):
+ # can't acquire the lock right now, add the block to the list of
+ # pending blocks to free
+ self._pending_free_blocks.append(block)
+ else:
+ # we hold the lock
+ try:
+ self._free_pending_blocks()
+ self._allocated_blocks.remove(block)
+ self._free(block)
+ finally:
+ self._lock.release()
def malloc(self, size):
# return a block of right size (possibly rounded up)
@@ -191,6 +217,7 @@ class Heap(object):
if os.getpid() != self._lastpid:
self.__init__() # reinitialize after fork
self._lock.acquire()
+ self._free_pending_blocks()
try:
size = self._roundup(max(size,1), self._alignment)
(arena, start, stop) = self._malloc(size)
diff --git a/Lib/test/test_multiprocessing.py b/Lib/test/test_multiprocessing.py
index b752d8d..405fbd5 100644
--- a/Lib/test/test_multiprocessing.py
+++ b/Lib/test/test_multiprocessing.py
@@ -1721,6 +1721,8 @@ class _TestHeap(BaseTestCase):
# verify the state of the heap
all = []
occupied = 0
+ heap._lock.acquire()
+ self.addCleanup(heap._lock.release)
for L in list(heap._len_to_seq.values()):
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
@@ -1738,6 +1740,28 @@ class _TestHeap(BaseTestCase):
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
+ def test_free_from_gc(self):
+ # Check that freeing of blocks by the garbage collector doesn't deadlock
+ # (issue #12352).
+ # Make sure the GC is enabled, and set lower collection thresholds to
+ # make collections more frequent (and increase the probability of
+ # deadlock).
+ if not gc.isenabled():
+ gc.enable()
+ self.addCleanup(gc.disable)
+ thresholds = gc.get_threshold()
+ self.addCleanup(gc.set_threshold, *thresholds)
+ gc.set_threshold(10)
+
+ # perform numerous block allocations, with cyclic references to make
+ # sure objects are collected asynchronously by the gc
+ for i in range(5000):
+ a = multiprocessing.heap.BufferWrapper(1)
+ b = multiprocessing.heap.BufferWrapper(1)
+ # circular references
+ a.buddy = b
+ b.buddy = a
+
#
#
#