summaryrefslogtreecommitdiffstats
path: root/jemalloc/src
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2010-10-18 07:04:44 (GMT)
committerJason Evans <jasone@canonware.com>2010-10-18 07:04:44 (GMT)
commit088e6a0a37e197c8db16e3d4c518a5eb762e7657 (patch)
tree68f895d4fe22cd52001384effad281ad979ce7c7 /jemalloc/src
parent8de6a0282333ca742fb2870b1e859968d3c1b11f (diff)
downloadjemalloc-088e6a0a37e197c8db16e3d4c518a5eb762e7657.zip
jemalloc-088e6a0a37e197c8db16e3d4c518a5eb762e7657.tar.gz
jemalloc-088e6a0a37e197c8db16e3d4c518a5eb762e7657.tar.bz2
Fix arena bugs.
Split arena_dissociate_bin_run() out of arena_dalloc_bin_run(), so that arena_bin_malloc_hard() can avoid dissociation when recovering from losing a race. This fixes a bug introduced by a recent attempted fix. Fix a regression in arena_ralloc_large_grow() that was introduced by recent fixes.
Diffstat (limited to 'jemalloc/src')
-rw-r--r--jemalloc/src/arena.c25
1 files changed, 19 insertions, 6 deletions
diff --git a/jemalloc/src/arena.c b/jemalloc/src/arena.c
index 3c31e98..be26160 100644
--- a/jemalloc/src/arena.c
+++ b/jemalloc/src/arena.c
@@ -174,6 +174,8 @@ static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
static size_t arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size);
+static void arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
+ arena_bin_t *bin);
static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, arena_bin_t *bin);
static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
@@ -1856,10 +1858,9 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
#endif
static void
-arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
arena_bin_t *bin)
{
- size_t npages, run_ind, past;
/* Dissociate run from bin. */
if (run == bin->runcur)
@@ -1876,6 +1877,17 @@ arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
*/
arena_run_tree_remove(&bin->runs, run_mapelm);
}
+}
+
+static void
+arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+ arena_bin_t *bin)
+{
+ size_t npages, run_ind, past;
+
+ assert(run != bin->runcur);
+ assert(arena_run_tree_search(&bin->runs, &chunk->map[
+ (((uintptr_t)run-(uintptr_t)chunk)>>PAGE_SHIFT)-map_bias]) == NULL);
malloc_mutex_unlock(&bin->lock);
/******************************/
@@ -1980,9 +1992,10 @@ arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
#endif
arena_run_reg_dalloc(run, ptr);
- if (run->nfree == bin->nregs)
+ if (run->nfree == bin->nregs) {
+ arena_dissociate_bin_run(chunk, run, bin);
arena_dalloc_bin_run(arena, chunk, run, bin);
- else if (run->nfree == 1 && run != bin->runcur)
+ } else if (run->nfree == 1 && run != bin->runcur)
arena_bin_lower_run(arena, chunk, run, bin);
#ifdef JEMALLOC_STATS
@@ -2140,8 +2153,8 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
((pageind+npages) << PAGE_SHIFT)), splitsize, true, zero);
- size += splitsize;
- npages = (size >> PAGE_SHIFT);
+ size = oldsize + splitsize;
+ npages = size >> PAGE_SHIFT;
/*
* Mark the extended run as dirty if either portion of the run