summaryrefslogtreecommitdiffstats
path: root/src/arena.c
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2016-12-04 06:48:43 (GMT)
committerJason Evans <jasone@canonware.com>2016-12-04 06:48:43 (GMT)
commitf1f76357313e7dcad7262f17a48ff0a2e005fcdc (patch)
tree826e5a42621cbd8abc9e7473437f223faccc2b0c /src/arena.c
parent0110fa8451af905affd77c3bea0d545fee2251b2 (diff)
parent2d1bb8980fff829c58dabbf122224f577879a32c (diff)
downloadjemalloc-4.4.0.zip
jemalloc-4.4.0.tar.gz
jemalloc-4.4.0.tar.bz2
Merge branch 'rc-4.4.0'4.4.0
Diffstat (limited to 'src/arena.c')
-rw-r--r--src/arena.c170
1 files changed, 126 insertions, 44 deletions
diff --git a/src/arena.c b/src/arena.c
index e196b13..648a8da 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -38,8 +38,8 @@ static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
bool dirty, bool cleaned, bool decommitted);
static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
-static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, arena_bin_t *bin);
+static void arena_bin_lower_run(arena_t *arena, arena_run_t *run,
+ arena_bin_t *bin);
/******************************************************************************/
@@ -55,8 +55,31 @@ arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
return (arena_mapbits_size_decode(mapbits));
}
+JEMALLOC_INLINE_C const extent_node_t *
+arena_miscelm_extent_get(const arena_chunk_map_misc_t *miscelm)
+{
+ arena_chunk_t *chunk;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
+ return (&chunk->node);
+}
+
+JEMALLOC_INLINE_C int
+arena_sn_comp(const arena_chunk_map_misc_t *a, const arena_chunk_map_misc_t *b)
+{
+ size_t a_sn, b_sn;
+
+ assert(a != NULL);
+ assert(b != NULL);
+
+ a_sn = extent_node_sn_get(arena_miscelm_extent_get(a));
+ b_sn = extent_node_sn_get(arena_miscelm_extent_get(b));
+
+ return ((a_sn > b_sn) - (a_sn < b_sn));
+}
+
JEMALLOC_INLINE_C int
-arena_run_addr_comp(const arena_chunk_map_misc_t *a,
+arena_ad_comp(const arena_chunk_map_misc_t *a,
const arena_chunk_map_misc_t *b)
{
uintptr_t a_miscelm = (uintptr_t)a;
@@ -68,9 +91,26 @@ arena_run_addr_comp(const arena_chunk_map_misc_t *a,
return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
}
+JEMALLOC_INLINE_C int
+arena_snad_comp(const arena_chunk_map_misc_t *a,
+ const arena_chunk_map_misc_t *b)
+{
+ int ret;
+
+ assert(a != NULL);
+ assert(b != NULL);
+
+ ret = arena_sn_comp(a, b);
+ if (ret != 0)
+ return (ret);
+
+ ret = arena_ad_comp(a, b);
+ return (ret);
+}
+
/* Generate pairing heap functions. */
ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
- ph_link, arena_run_addr_comp)
+ ph_link, arena_snad_comp)
#ifdef JEMALLOC_JET
#undef run_quantize_floor
@@ -529,7 +569,7 @@ arena_chunk_init_spare(arena_t *arena)
static bool
arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- bool zero)
+ size_t sn, bool zero)
{
/*
@@ -538,7 +578,7 @@ arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
* of runs is tracked individually, and upon chunk deallocation the
* entire chunk is in a consistent commit state.
*/
- extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
+ extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true);
extent_node_achunk_set(&chunk->node, true);
return (chunk_register(tsdn, chunk, &chunk->node));
}
@@ -548,28 +588,30 @@ arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
{
arena_chunk_t *chunk;
+ size_t sn;
malloc_mutex_unlock(tsdn, &arena->lock);
chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
- NULL, chunksize, chunksize, zero, commit);
+ NULL, chunksize, chunksize, &sn, zero, commit);
if (chunk != NULL && !*commit) {
/* Commit header. */
if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind)) {
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
- (void *)chunk, chunksize, *zero, *commit);
+ (void *)chunk, chunksize, sn, *zero, *commit);
chunk = NULL;
}
}
- if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) {
+ if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, sn,
+ *zero)) {
if (!*commit) {
/* Undo commit of header. */
chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind);
}
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
- chunksize, *zero, *commit);
+ chunksize, sn, *zero, *commit);
chunk = NULL;
}
@@ -583,13 +625,14 @@ arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
{
arena_chunk_t *chunk;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ size_t sn;
chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
- chunksize, zero, commit, true);
+ chunksize, &sn, zero, commit, true);
if (chunk != NULL) {
- if (arena_chunk_register(tsdn, arena, chunk, *zero)) {
+ if (arena_chunk_register(tsdn, arena, chunk, sn, *zero)) {
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
- chunksize, true);
+ chunksize, sn, true);
return (NULL);
}
}
@@ -621,6 +664,8 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
if (chunk == NULL)
return (NULL);
+ chunk->hugepage = true;
+
/*
* Initialize the map to contain one maximal free untouched run. Mark
* the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
@@ -684,11 +729,14 @@ arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
static void
arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
{
+ size_t sn, hugepage;
bool committed;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
chunk_deregister(chunk, &chunk->node);
+ sn = extent_node_sn_get(&chunk->node);
+ hugepage = chunk->hugepage;
committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
if (!committed) {
/*
@@ -701,9 +749,17 @@ arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
arena->ind);
}
+ if (!hugepage) {
+ /*
+ * Convert chunk back to the default state, so that all
+ * subsequent chunk allocations start out with chunks that can
+ * be backed by transparent huge pages.
+ */
+ pages_huge(chunk, chunksize);
+ }
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
- committed);
+ sn, committed);
if (config_stats) {
arena->stats.mapped -= chunksize;
@@ -859,14 +915,14 @@ arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
static void *
arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero,
- size_t csize)
+ chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, size_t *sn,
+ bool *zero, size_t csize)
{
void *ret;
bool commit = true;
ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
- alignment, zero, &commit);
+ alignment, sn, zero, &commit);
if (ret == NULL) {
/* Revert optimistic stats updates. */
malloc_mutex_lock(tsdn, &arena->lock);
@@ -883,7 +939,7 @@ arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
void *
arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, bool *zero)
+ size_t alignment, size_t *sn, bool *zero)
{
void *ret;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
@@ -900,18 +956,19 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
arena_nactive_add(arena, usize >> LG_PAGE);
ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
- alignment, zero, &commit, true);
+ alignment, sn, zero, &commit, true);
malloc_mutex_unlock(tsdn, &arena->lock);
if (ret == NULL) {
ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
- usize, alignment, zero, csize);
+ usize, alignment, sn, zero, csize);
}
return (ret);
}
void
-arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize)
+arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize,
+ size_t sn)
{
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t csize;
@@ -924,7 +981,7 @@ arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize)
}
arena_nactive_sub(arena, usize >> LG_PAGE);
- chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true);
+ chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, sn, true);
malloc_mutex_unlock(tsdn, &arena->lock);
}
@@ -948,7 +1005,7 @@ arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
void
arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
- size_t oldsize, size_t usize)
+ size_t oldsize, size_t usize, size_t sn)
{
size_t udiff = oldsize - usize;
size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
@@ -967,7 +1024,7 @@ arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
CHUNK_CEILING(usize));
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
- true);
+ sn, true);
}
malloc_mutex_unlock(tsdn, &arena->lock);
}
@@ -975,13 +1032,13 @@ arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
static bool
arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
- bool *zero, void *nchunk, size_t udiff, size_t cdiff)
+ size_t *sn, bool *zero, void *nchunk, size_t udiff, size_t cdiff)
{
bool err;
bool commit = true;
err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
- chunksize, zero, &commit) == NULL);
+ chunksize, sn, zero, &commit) == NULL);
if (err) {
/* Revert optimistic stats updates. */
malloc_mutex_lock(tsdn, &arena->lock);
@@ -995,7 +1052,7 @@ arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) {
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
- *zero, true);
+ *sn, *zero, true);
err = true;
}
return (err);
@@ -1010,6 +1067,7 @@ arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
size_t udiff = usize - oldsize;
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
+ size_t sn;
bool commit = true;
malloc_mutex_lock(tsdn, &arena->lock);
@@ -1022,16 +1080,16 @@ arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
arena_nactive_add(arena, udiff >> LG_PAGE);
err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
- chunksize, zero, &commit, true) == NULL);
+ chunksize, &sn, zero, &commit, true) == NULL);
malloc_mutex_unlock(tsdn, &arena->lock);
if (err) {
err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
- &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff,
- cdiff);
+ &chunk_hooks, chunk, oldsize, usize, &sn, zero, nchunk,
+ udiff, cdiff);
} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) {
chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
- *zero, true);
+ sn, *zero, true);
err = true;
}
@@ -1519,6 +1577,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (rdelm == &chunkselm->rd) {
extent_node_t *chunkselm_next;
+ size_t sn;
bool zero, commit;
UNUSED void *chunk;
@@ -1536,8 +1595,8 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
commit = false;
chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
extent_node_addr_get(chunkselm),
- extent_node_size_get(chunkselm), chunksize, &zero,
- &commit, false);
+ extent_node_size_get(chunkselm), chunksize, &sn,
+ &zero, &commit, false);
assert(chunk == extent_node_addr_get(chunkselm));
assert(zero == extent_node_zeroed_get(chunkselm));
extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
@@ -1634,6 +1693,17 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
run_size = arena_mapbits_large_size_get(chunk, pageind);
npages = run_size >> LG_PAGE;
+ /*
+ * If this is the first run purged within chunk, mark
+ * the chunk as non-huge. This will prevent all use of
+ * transparent huge pages for this chunk until the chunk
+ * as a whole is deallocated.
+ */
+ if (chunk->hugepage) {
+ pages_nohuge(chunk, chunksize);
+ chunk->hugepage = false;
+ }
+
assert(pageind + npages <= chunk_npages);
assert(!arena_mapbits_decommitted_get(chunk, pageind));
assert(!arena_mapbits_decommitted_get(chunk,
@@ -1703,13 +1773,14 @@ arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
cc_link);
void *addr = extent_node_addr_get(chunkselm);
size_t size = extent_node_size_get(chunkselm);
+ size_t sn = extent_node_sn_get(chunkselm);
bool zeroed = extent_node_zeroed_get(chunkselm);
bool committed = extent_node_committed_get(chunkselm);
extent_node_dirty_remove(chunkselm);
arena_node_dalloc(tsdn, arena, chunkselm);
chunkselm = chunkselm_next;
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
- size, zeroed, committed);
+ size, sn, zeroed, committed);
} else {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
@@ -2315,7 +2386,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
arena_dalloc_bin_run(tsdn, arena, chunk, run,
bin);
} else
- arena_bin_lower_run(arena, chunk, run, bin);
+ arena_bin_lower_run(arena, run, bin);
}
return (ret);
}
@@ -2820,16 +2891,18 @@ arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
}
static void
-arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- arena_bin_t *bin)
+arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin)
{
/*
- * Make sure that if bin->runcur is non-NULL, it refers to the lowest
- * non-full run. It is okay to NULL runcur out rather than proactively
- * keeping it pointing at the lowest non-full run.
+ * Make sure that if bin->runcur is non-NULL, it refers to the
+ * oldest/lowest non-full run. It is okay to NULL runcur out rather
+ * than proactively keeping it pointing at the oldest/lowest non-full
+ * run.
*/
- if ((uintptr_t)run < (uintptr_t)bin->runcur) {
+ if (bin->runcur != NULL &&
+ arena_snad_comp(arena_run_to_miscelm(bin->runcur),
+ arena_run_to_miscelm(run)) > 0) {
/* Switch runcur. */
if (bin->runcur->nfree > 0)
arena_bin_runs_insert(bin, bin->runcur);
@@ -2865,7 +2938,7 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
arena_dissociate_bin_run(chunk, run, bin);
arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
} else if (run->nfree == 1 && run != bin->runcur)
- arena_bin_lower_run(arena, chunk, run, bin);
+ arena_bin_lower_run(arena, run, bin);
if (config_stats) {
bin->stats.ndalloc++;
@@ -3452,6 +3525,13 @@ arena_nthreads_dec(arena_t *arena, bool internal)
atomic_sub_u(&arena->nthreads[internal], 1);
}
+size_t
+arena_extent_sn_next(arena_t *arena)
+{
+
+ return (atomic_add_z(&arena->extent_sn_next, 1) - 1);
+}
+
arena_t *
arena_new(tsdn_t *tsdn, unsigned ind)
{
@@ -3511,6 +3591,8 @@ arena_new(tsdn_t *tsdn, unsigned ind)
ql_new(&arena->achunks);
+ arena->extent_sn_next = 0;
+
arena->spare = NULL;
arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
@@ -3532,9 +3614,9 @@ arena_new(tsdn_t *tsdn, unsigned ind)
WITNESS_RANK_ARENA_HUGE))
return (NULL);
- extent_tree_szad_new(&arena->chunks_szad_cached);
+ extent_tree_szsnad_new(&arena->chunks_szsnad_cached);
extent_tree_ad_new(&arena->chunks_ad_cached);
- extent_tree_szad_new(&arena->chunks_szad_retained);
+ extent_tree_szsnad_new(&arena->chunks_szsnad_retained);
extent_tree_ad_new(&arena->chunks_ad_retained);
if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
WITNESS_RANK_ARENA_CHUNKS))