diff options
| author | Jason Evans <jasone@canonware.com> | 2013-06-03 04:05:59 (GMT) |
|---|---|---|
| committer | Jason Evans <jasone@canonware.com> | 2013-06-03 04:05:59 (GMT) |
| commit | 0ed518e5dab789ad2171bb38977a8927e2a26775 (patch) | |
| tree | 839c8d334476ff176c714a82973070930cab7613 /src/chunk.c | |
| parent | 9ef9d9e8c271cdf14f664b871a8f98c827714784 (diff) | |
| parent | 765cc2b58377551c820e2f2ffc0a311ed31a386c (diff) | |
| download | jemalloc-3.4.0.zip jemalloc-3.4.0.tar.gz jemalloc-3.4.0.tar.bz2 | |
Merge branch 'dev'3.4.0
Diffstat (limited to 'src/chunk.c')
| -rw-r--r-- | src/chunk.c | 22 |
1 files changed, 16 insertions, 6 deletions
diff --git a/src/chunk.c b/src/chunk.c index 044f76b..aef3fed 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -214,7 +214,7 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk, size_t size) { bool unzeroed; - extent_node_t *xnode, *node, *prev, key; + extent_node_t *xnode, *node, *prev, *xprev, key; unzeroed = pages_purge(chunk, size); VALGRIND_MAKE_MEM_NOACCESS(chunk, size); @@ -226,6 +226,8 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk, * held. */ xnode = base_node_alloc(); + /* Use xprev to implement conditional deferred deallocation of prev. */ + xprev = NULL; malloc_mutex_lock(&chunks_mtx); key.addr = (void *)((uintptr_t)chunk + size); @@ -242,8 +244,6 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk, node->size += size; node->zeroed = (node->zeroed && (unzeroed == false)); extent_tree_szad_insert(chunks_szad, node); - if (xnode != NULL) - base_node_dealloc(xnode); } else { /* Coalescing forward failed, so insert a new node. */ if (xnode == NULL) { @@ -253,10 +253,10 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk, * already been purged, so this is only a virtual * memory leak. */ - malloc_mutex_unlock(&chunks_mtx); - return; + goto label_return; } node = xnode; + xnode = NULL; /* Prevent deallocation below. */ node->addr = chunk; node->size = size; node->zeroed = (unzeroed == false); @@ -282,9 +282,19 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk, node->zeroed = (node->zeroed && prev->zeroed); extent_tree_szad_insert(chunks_szad, node); - base_node_dealloc(prev); + xprev = prev; } + +label_return: malloc_mutex_unlock(&chunks_mtx); + /* + * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to + * avoid potential deadlock. + */ + if (xnode != NULL) + base_node_dealloc(xnode); + if (xprev != NULL) + base_node_dealloc(prev); } void |
