From 8d4203c72de878c3976adc5db1e09d7ec0618d63 Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Tue, 13 Apr 2010 20:53:21 -0700 Subject: Fix arena chunk purge/dealloc race conditions. Fix arena_chunk_dealloc() to put the new spare in a consistent state before dropping the arena mutex to deallocate the previous spare. Fix arena_run_dalloc() to insert a newly dirtied chunk into the chunks_dirty list before potentially deallocating the chunk, so that dirty page accounting is self-consistent. --- jemalloc/src/arena.c | 54 +++++++++++++++++++++++++++++----------------------- 1 file changed, 30 insertions(+), 24 deletions(-) diff --git a/jemalloc/src/arena.c b/jemalloc/src/arena.c index 222ec25..e414226 100644 --- a/jemalloc/src/arena.c +++ b/jemalloc/src/arena.c @@ -470,10 +470,22 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk) { arena_avail_tree_t *runs_avail; - while (arena->spare != NULL) { + /* + * Remove run from the appropriate runs_avail_* tree, so that the arena + * does not use it. + */ + if ((chunk->map[arena_chunk_header_npages].bits & + CHUNK_MAP_DIRTY) == 0) + runs_avail = &arena->runs_avail_clean; + else + runs_avail = &arena->runs_avail_dirty; + arena_avail_tree_remove(runs_avail, + &chunk->map[arena_chunk_header_npages]); + + if (arena->spare != NULL) { arena_chunk_t *spare = arena->spare; - arena->spare = NULL; + arena->spare = chunk; if (spare->dirtied) { ql_remove(&chunk->arena->chunks_dirty, spare, link_dirty); @@ -485,21 +497,8 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk) #ifdef JEMALLOC_STATS arena->stats.mapped -= chunksize; #endif - } - - /* - * Remove run from the appropriate runs_avail_* tree, so that the arena - * does not use it. - */ - if ((chunk->map[arena_chunk_header_npages].bits & - CHUNK_MAP_DIRTY) == 0) - runs_avail = &arena->runs_avail_clean; - else - runs_avail = &arena->runs_avail_dirty; - arena_avail_tree_remove(runs_avail, - &chunk->map[arena_chunk_header_npages]); - - arena->spare = chunk; + } else + arena->spare = chunk; } static arena_run_t * @@ -925,6 +924,18 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty) /* Insert into runs_avail, now that coalescing is complete. */ arena_avail_tree_insert(runs_avail, &chunk->map[run_ind]); + if (dirty) { + /* + * Insert into chunks_dirty before potentially calling + * arena_chunk_dealloc(), so that chunks_dirty and + * arena->ndirty are consistent. + */ + if (chunk->dirtied == false) { + ql_tail_insert(&arena->chunks_dirty, chunk, link_dirty); + chunk->dirtied = true; + } + } + /* * Deallocate chunk if it is now completely unused. The bit * manipulation checks whether the first run is unallocated and extends @@ -935,19 +946,14 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty) arena_chunk_dealloc(arena, chunk); /* - * It is okay to do dirty page processing even if the chunk was + * It is okay to do dirty page processing here even if the chunk was * deallocated above, since in that case it is the spare. Waiting * until after possible chunk deallocation to do dirty processing * allows for an old spare to be fully deallocated, thus decreasing the * chances of spuriously crossing the dirty page purging threshold. */ - if (dirty) { - if (chunk->dirtied == false) { - ql_tail_insert(&arena->chunks_dirty, chunk, link_dirty); - chunk->dirtied = true; - } + if (dirty) arena_maybe_purge(arena); - } } static void -- cgit v0.12