summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/arena.c170
-rw-r--r--src/base.c24
-rw-r--r--src/chunk.c120
-rw-r--r--src/chunk_dss.c3
-rw-r--r--src/extent.c68
-rw-r--r--src/huge.c16
-rw-r--r--src/jemalloc.c63
-rw-r--r--src/pages.c47
-rwxr-xr-x[-rw-r--r--]src/stats.c13
-rwxr-xr-x[-rw-r--r--]src/tcache.c6
-rwxr-xr-x[-rw-r--r--]src/util.c4
11 files changed, 358 insertions, 176 deletions
diff --git a/src/arena.c b/src/arena.c
index e196b13..648a8da 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -38,8 +38,8 @@ static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
bool dirty, bool cleaned, bool decommitted);
static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
-static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, arena_bin_t *bin);
+static void arena_bin_lower_run(arena_t *arena, arena_run_t *run,
+ arena_bin_t *bin);
/******************************************************************************/
@@ -55,8 +55,31 @@ arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
return (arena_mapbits_size_decode(mapbits));
}
+JEMALLOC_INLINE_C const extent_node_t *
+arena_miscelm_extent_get(const arena_chunk_map_misc_t *miscelm)
+{
+ arena_chunk_t *chunk;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
+ return (&chunk->node);
+}
+
+JEMALLOC_INLINE_C int
+arena_sn_comp(const arena_chunk_map_misc_t *a, const arena_chunk_map_misc_t *b)
+{
+ size_t a_sn, b_sn;
+
+ assert(a != NULL);
+ assert(b != NULL);
+
+ a_sn = extent_node_sn_get(arena_miscelm_extent_get(a));
+ b_sn = extent_node_sn_get(arena_miscelm_extent_get(b));
+
+ return ((a_sn > b_sn) - (a_sn < b_sn));
+}
+
JEMALLOC_INLINE_C int
-arena_run_addr_comp(const arena_chunk_map_misc_t *a,
+arena_ad_comp(const arena_chunk_map_misc_t *a,
const arena_chunk_map_misc_t *b)
{
uintptr_t a_miscelm = (uintptr_t)a;
@@ -68,9 +91,26 @@ arena_run_addr_comp(const arena_chunk_map_misc_t *a,
return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
}
+JEMALLOC_INLINE_C int
+arena_snad_comp(const arena_chunk_map_misc_t *a,
+ const arena_chunk_map_misc_t *b)
+{
+ int ret;
+
+ assert(a != NULL);
+ assert(b != NULL);
+
+ ret = arena_sn_comp(a, b);
+ if (ret != 0)
+ return (ret);
+
+ ret = arena_ad_comp(a, b);
+ return (ret);
+}
+
/* Generate pairing heap functions. */
ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
- ph_link, arena_run_addr_comp)
+ ph_link, arena_snad_comp)
#ifdef JEMALLOC_JET
#undef run_quantize_floor
@@ -529,7 +569,7 @@ arena_chunk_init_spare(arena_t *arena)
static bool
arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- bool zero)
+ size_t sn, bool zero)
{
/*
@@ -538,7 +578,7 @@ arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
* of runs is tracked individually, and upon chunk deallocation the
* entire chunk is in a consistent commit state.
*/
- extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
+ extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true);
extent_node_achunk_set(&chunk->node, true);
return (chunk_register(tsdn, chunk, &chunk->node));
}
@@ -548,28 +588,30 @@ arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
{
arena_chunk_t *chunk;
+ size_t sn;
malloc_mutex_unlock(tsdn, &arena->lock);
chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
- NULL, chunksize, chunksize, zero, commit);
+ NULL, chunksize, chunksize, &sn, zero, commit);
if (chunk != NULL && !*commit) {
/* Commit header. */
if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind)) {
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
- (void *)chunk, chunksize, *zero, *commit);
+ (void *)chunk, chunksize, sn, *zero, *commit);
chunk = NULL;
}
}
- if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) {
+ if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, sn,
+ *zero)) {
if (!*commit) {
/* Undo commit of header. */
chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind);
}
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
- chunksize, *zero, *commit);
+ chunksize, sn, *zero, *commit);
chunk = NULL;
}
@@ -583,13 +625,14 @@ arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
{
arena_chunk_t *chunk;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ size_t sn;
chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
- chunksize, zero, commit, true);
+ chunksize, &sn, zero, commit, true);
if (chunk != NULL) {
- if (arena_chunk_register(tsdn, arena, chunk, *zero)) {
+ if (arena_chunk_register(tsdn, arena, chunk, sn, *zero)) {
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
- chunksize, true);
+ chunksize, sn, true);
return (NULL);
}
}
@@ -621,6 +664,8 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
if (chunk == NULL)
return (NULL);
+ chunk->hugepage = true;
+
/*
* Initialize the map to contain one maximal free untouched run. Mark
* the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
@@ -684,11 +729,14 @@ arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
static void
arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
{
+ size_t sn, hugepage;
bool committed;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
chunk_deregister(chunk, &chunk->node);
+ sn = extent_node_sn_get(&chunk->node);
+ hugepage = chunk->hugepage;
committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
if (!committed) {
/*
@@ -701,9 +749,17 @@ arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
arena->ind);
}
+ if (!hugepage) {
+ /*
+ * Convert chunk back to the default state, so that all
+ * subsequent chunk allocations start out with chunks that can
+ * be backed by transparent huge pages.
+ */
+ pages_huge(chunk, chunksize);
+ }
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
- committed);
+ sn, committed);
if (config_stats) {
arena->stats.mapped -= chunksize;
@@ -859,14 +915,14 @@ arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
static void *
arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero,
- size_t csize)
+ chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, size_t *sn,
+ bool *zero, size_t csize)
{
void *ret;
bool commit = true;
ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
- alignment, zero, &commit);
+ alignment, sn, zero, &commit);
if (ret == NULL) {
/* Revert optimistic stats updates. */
malloc_mutex_lock(tsdn, &arena->lock);
@@ -883,7 +939,7 @@ arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
void *
arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, bool *zero)
+ size_t alignment, size_t *sn, bool *zero)
{
void *ret;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
@@ -900,18 +956,19 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
arena_nactive_add(arena, usize >> LG_PAGE);
ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
- alignment, zero, &commit, true);
+ alignment, sn, zero, &commit, true);
malloc_mutex_unlock(tsdn, &arena->lock);
if (ret == NULL) {
ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
- usize, alignment, zero, csize);
+ usize, alignment, sn, zero, csize);
}
return (ret);
}
void
-arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize)
+arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize,
+ size_t sn)
{
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t csize;
@@ -924,7 +981,7 @@ arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize)
}
arena_nactive_sub(arena, usize >> LG_PAGE);
- chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true);
+ chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, sn, true);
malloc_mutex_unlock(tsdn, &arena->lock);
}
@@ -948,7 +1005,7 @@ arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
void
arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
- size_t oldsize, size_t usize)
+ size_t oldsize, size_t usize, size_t sn)
{
size_t udiff = oldsize - usize;
size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
@@ -967,7 +1024,7 @@ arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
CHUNK_CEILING(usize));
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
- true);
+ sn, true);
}
malloc_mutex_unlock(tsdn, &arena->lock);
}
@@ -975,13 +1032,13 @@ arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
static bool
arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
- bool *zero, void *nchunk, size_t udiff, size_t cdiff)
+ size_t *sn, bool *zero, void *nchunk, size_t udiff, size_t cdiff)
{
bool err;
bool commit = true;
err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
- chunksize, zero, &commit) == NULL);
+ chunksize, sn, zero, &commit) == NULL);
if (err) {
/* Revert optimistic stats updates. */
malloc_mutex_lock(tsdn, &arena->lock);
@@ -995,7 +1052,7 @@ arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) {
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
- *zero, true);
+ *sn, *zero, true);
err = true;
}
return (err);
@@ -1010,6 +1067,7 @@ arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
size_t udiff = usize - oldsize;
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
+ size_t sn;
bool commit = true;
malloc_mutex_lock(tsdn, &arena->lock);
@@ -1022,16 +1080,16 @@ arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
arena_nactive_add(arena, udiff >> LG_PAGE);
err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
- chunksize, zero, &commit, true) == NULL);
+ chunksize, &sn, zero, &commit, true) == NULL);
malloc_mutex_unlock(tsdn, &arena->lock);
if (err) {
err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
- &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff,
- cdiff);
+ &chunk_hooks, chunk, oldsize, usize, &sn, zero, nchunk,
+ udiff, cdiff);
} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) {
chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
- *zero, true);
+ sn, *zero, true);
err = true;
}
@@ -1519,6 +1577,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (rdelm == &chunkselm->rd) {
extent_node_t *chunkselm_next;
+ size_t sn;
bool zero, commit;
UNUSED void *chunk;
@@ -1536,8 +1595,8 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
commit = false;
chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
extent_node_addr_get(chunkselm),
- extent_node_size_get(chunkselm), chunksize, &zero,
- &commit, false);
+ extent_node_size_get(chunkselm), chunksize, &sn,
+ &zero, &commit, false);
assert(chunk == extent_node_addr_get(chunkselm));
assert(zero == extent_node_zeroed_get(chunkselm));
extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
@@ -1634,6 +1693,17 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
run_size = arena_mapbits_large_size_get(chunk, pageind);
npages = run_size >> LG_PAGE;
+ /*
+ * If this is the first run purged within chunk, mark
+ * the chunk as non-huge. This will prevent all use of
+ * transparent huge pages for this chunk until the chunk
+ * as a whole is deallocated.
+ */
+ if (chunk->hugepage) {
+ pages_nohuge(chunk, chunksize);
+ chunk->hugepage = false;
+ }
+
assert(pageind + npages <= chunk_npages);
assert(!arena_mapbits_decommitted_get(chunk, pageind));
assert(!arena_mapbits_decommitted_get(chunk,
@@ -1703,13 +1773,14 @@ arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
cc_link);
void *addr = extent_node_addr_get(chunkselm);
size_t size = extent_node_size_get(chunkselm);
+ size_t sn = extent_node_sn_get(chunkselm);
bool zeroed = extent_node_zeroed_get(chunkselm);
bool committed = extent_node_committed_get(chunkselm);
extent_node_dirty_remove(chunkselm);
arena_node_dalloc(tsdn, arena, chunkselm);
chunkselm = chunkselm_next;
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
- size, zeroed, committed);
+ size, sn, zeroed, committed);
} else {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
@@ -2315,7 +2386,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
arena_dalloc_bin_run(tsdn, arena, chunk, run,
bin);
} else
- arena_bin_lower_run(arena, chunk, run, bin);
+ arena_bin_lower_run(arena, run, bin);
}
return (ret);
}
@@ -2820,16 +2891,18 @@ arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
}
static void
-arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- arena_bin_t *bin)
+arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin)
{
/*
- * Make sure that if bin->runcur is non-NULL, it refers to the lowest
- * non-full run. It is okay to NULL runcur out rather than proactively
- * keeping it pointing at the lowest non-full run.
+ * Make sure that if bin->runcur is non-NULL, it refers to the
+ * oldest/lowest non-full run. It is okay to NULL runcur out rather
+ * than proactively keeping it pointing at the oldest/lowest non-full
+ * run.
*/
- if ((uintptr_t)run < (uintptr_t)bin->runcur) {
+ if (bin->runcur != NULL &&
+ arena_snad_comp(arena_run_to_miscelm(bin->runcur),
+ arena_run_to_miscelm(run)) > 0) {
/* Switch runcur. */
if (bin->runcur->nfree > 0)
arena_bin_runs_insert(bin, bin->runcur);
@@ -2865,7 +2938,7 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
arena_dissociate_bin_run(chunk, run, bin);
arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
} else if (run->nfree == 1 && run != bin->runcur)
- arena_bin_lower_run(arena, chunk, run, bin);
+ arena_bin_lower_run(arena, run, bin);
if (config_stats) {
bin->stats.ndalloc++;
@@ -3452,6 +3525,13 @@ arena_nthreads_dec(arena_t *arena, bool internal)
atomic_sub_u(&arena->nthreads[internal], 1);
}
+size_t
+arena_extent_sn_next(arena_t *arena)
+{
+
+ return (atomic_add_z(&arena->extent_sn_next, 1) - 1);
+}
+
arena_t *
arena_new(tsdn_t *tsdn, unsigned ind)
{
@@ -3511,6 +3591,8 @@ arena_new(tsdn_t *tsdn, unsigned ind)
ql_new(&arena->achunks);
+ arena->extent_sn_next = 0;
+
arena->spare = NULL;
arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
@@ -3532,9 +3614,9 @@ arena_new(tsdn_t *tsdn, unsigned ind)
WITNESS_RANK_ARENA_HUGE))
return (NULL);
- extent_tree_szad_new(&arena->chunks_szad_cached);
+ extent_tree_szsnad_new(&arena->chunks_szsnad_cached);
extent_tree_ad_new(&arena->chunks_ad_cached);
- extent_tree_szad_new(&arena->chunks_szad_retained);
+ extent_tree_szsnad_new(&arena->chunks_szsnad_retained);
extent_tree_ad_new(&arena->chunks_ad_retained);
if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
WITNESS_RANK_ARENA_CHUNKS))
diff --git a/src/base.c b/src/base.c
index 81b0801..5681a3f 100644
--- a/src/base.c
+++ b/src/base.c
@@ -5,7 +5,8 @@
/* Data. */
static malloc_mutex_t base_mtx;
-static extent_tree_t base_avail_szad;
+static size_t base_extent_sn_next;
+static extent_tree_t base_avail_szsnad;
static extent_node_t *base_nodes;
static size_t base_allocated;
static size_t base_resident;
@@ -39,6 +40,14 @@ base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
base_nodes = node;
}
+static void
+base_extent_node_init(extent_node_t *node, void *addr, size_t size)
+{
+ size_t sn = atomic_add_z(&base_extent_sn_next, 1) - 1;
+
+ extent_node_init(node, NULL, addr, size, sn, true, true);
+}
+
static extent_node_t *
base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
{
@@ -68,7 +77,7 @@ base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
base_resident += PAGE_CEILING(nsize);
}
}
- extent_node_init(node, NULL, addr, csize, true, true);
+ base_extent_node_init(node, addr, csize);
return (node);
}
@@ -92,12 +101,12 @@ base_alloc(tsdn_t *tsdn, size_t size)
csize = CACHELINE_CEILING(size);
usize = s2u(csize);
- extent_node_init(&key, NULL, NULL, usize, false, false);
+ extent_node_init(&key, NULL, NULL, usize, 0, false, false);
malloc_mutex_lock(tsdn, &base_mtx);
- node = extent_tree_szad_nsearch(&base_avail_szad, &key);
+ node = extent_tree_szsnad_nsearch(&base_avail_szsnad, &key);
if (node != NULL) {
/* Use existing space. */
- extent_tree_szad_remove(&base_avail_szad, node);
+ extent_tree_szsnad_remove(&base_avail_szsnad, node);
} else {
/* Try to allocate more space. */
node = base_chunk_alloc(tsdn, csize);
@@ -111,7 +120,7 @@ base_alloc(tsdn_t *tsdn, size_t size)
if (extent_node_size_get(node) > csize) {
extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
extent_node_size_set(node, extent_node_size_get(node) - csize);
- extent_tree_szad_insert(&base_avail_szad, node);
+ extent_tree_szsnad_insert(&base_avail_szsnad, node);
} else
base_node_dalloc(tsdn, node);
if (config_stats) {
@@ -149,7 +158,8 @@ base_boot(void)
if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
return (true);
- extent_tree_szad_new(&base_avail_szad);
+ base_extent_sn_next = 0;
+ extent_tree_szsnad_new(&base_avail_szsnad);
base_nodes = NULL;
return (false);
diff --git a/src/chunk.c b/src/chunk.c
index 07e26f7..c1c514a 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -50,9 +50,9 @@ const chunk_hooks_t chunk_hooks_default = {
*/
static void chunk_record(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad,
- extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, bool zeroed,
- bool committed);
+ chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szsnad,
+ extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, size_t sn,
+ bool zeroed, bool committed);
/******************************************************************************/
@@ -183,33 +183,35 @@ chunk_deregister(const void *chunk, const extent_node_t *node)
}
/*
- * Do first-best-fit chunk selection, i.e. select the lowest chunk that best
- * fits.
+ * Do first-best-fit chunk selection, i.e. select the oldest/lowest chunk that
+ * best fits.
*/
static extent_node_t *
-chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
- extent_tree_t *chunks_ad, size_t size)
+chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szsnad, size_t size)
{
extent_node_t key;
assert(size == CHUNK_CEILING(size));
- extent_node_init(&key, arena, NULL, size, false, false);
- return (extent_tree_szad_nsearch(chunks_szad, &key));
+ extent_node_init(&key, arena, NULL, size, 0, false, false);
+ return (extent_tree_szsnad_nsearch(chunks_szsnad, &key));
}
static void *
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
- void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
- bool dalloc_node)
+ extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
+ void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
+ bool *commit, bool dalloc_node)
{
void *ret;
extent_node_t *node;
size_t alloc_size, leadsize, trailsize;
bool zeroed, committed;
+ assert(CHUNK_CEILING(size) == size);
+ assert(alignment > 0);
assert(new_addr == NULL || alignment == chunksize);
+ assert(CHUNK_ADDR2BASE(new_addr) == new_addr);
/*
* Cached chunks use the node linkage embedded in their headers, in
* which case dalloc_node is true, and new_addr is non-NULL because
@@ -217,7 +219,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
*/
assert(dalloc_node || new_addr != NULL);
- alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
+ alloc_size = size + CHUNK_CEILING(alignment) - chunksize;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
@@ -225,12 +227,11 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
if (new_addr != NULL) {
extent_node_t key;
- extent_node_init(&key, arena, new_addr, alloc_size, false,
+ extent_node_init(&key, arena, new_addr, alloc_size, 0, false,
false);
node = extent_tree_ad_search(chunks_ad, &key);
} else {
- node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
- alloc_size);
+ node = chunk_first_best_fit(arena, chunks_szsnad, alloc_size);
}
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
size)) {
@@ -243,6 +244,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert(extent_node_size_get(node) >= leadsize + size);
trailsize = extent_node_size_get(node) - leadsize - size;
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
+ *sn = extent_node_sn_get(node);
zeroed = extent_node_zeroed_get(node);
if (zeroed)
*zero = true;
@@ -257,13 +259,13 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
return (NULL);
}
/* Remove node from the tree. */
- extent_tree_szad_remove(chunks_szad, node);
+ extent_tree_szsnad_remove(chunks_szsnad, node);
extent_tree_ad_remove(chunks_ad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */
extent_node_size_set(node, leadsize);
- extent_tree_szad_insert(chunks_szad, node);
+ extent_tree_szsnad_insert(chunks_szsnad, node);
extent_tree_ad_insert(chunks_ad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL;
@@ -275,9 +277,9 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (dalloc_node && node != NULL)
arena_node_dalloc(tsdn, arena, node);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
- chunk_record(tsdn, arena, chunk_hooks, chunks_szad,
- chunks_ad, cache, ret, size + trailsize, zeroed,
- committed);
+ chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad,
+ chunks_ad, cache, ret, size + trailsize, *sn,
+ zeroed, committed);
return (NULL);
}
/* Insert the trailing space as a smaller chunk. */
@@ -286,22 +288,22 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (node == NULL) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks,
- chunks_szad, chunks_ad, cache, ret, size +
- trailsize, zeroed, committed);
+ chunks_szsnad, chunks_ad, cache, ret, size
+ + trailsize, *sn, zeroed, committed);
return (NULL);
}
}
extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
- trailsize, zeroed, committed);
- extent_tree_szad_insert(chunks_szad, node);
+ trailsize, *sn, zeroed, committed);
+ extent_tree_szsnad_insert(chunks_szsnad, node);
extent_tree_ad_insert(chunks_ad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL;
}
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
- chunk_record(tsdn, arena, chunk_hooks, chunks_szad, chunks_ad,
- cache, ret, size, zeroed, committed);
+ chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad,
+ cache, ret, size, *sn, zeroed, committed);
return (NULL);
}
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
@@ -385,8 +387,8 @@ chunk_alloc_base(size_t size)
void *
chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
- bool dalloc_node)
+ void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
+ bool *commit, bool dalloc_node)
{
void *ret;
@@ -396,8 +398,8 @@ chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert((alignment & chunksize_mask) == 0);
ret = chunk_recycle(tsdn, arena, chunk_hooks,
- &arena->chunks_szad_cached, &arena->chunks_ad_cached, true,
- new_addr, size, alignment, zero, commit, dalloc_node);
+ &arena->chunks_szsnad_cached, &arena->chunks_ad_cached, true,
+ new_addr, size, alignment, sn, zero, commit, dalloc_node);
if (ret == NULL)
return (NULL);
if (config_valgrind)
@@ -451,7 +453,8 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
static void *
chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
+ void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
+ bool *commit)
{
void *ret;
@@ -461,8 +464,8 @@ chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert((alignment & chunksize_mask) == 0);
ret = chunk_recycle(tsdn, arena, chunk_hooks,
- &arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
- new_addr, size, alignment, zero, commit, true);
+ &arena->chunks_szsnad_retained, &arena->chunks_ad_retained, false,
+ new_addr, size, alignment, sn, zero, commit, true);
if (config_stats && ret != NULL)
arena->stats.retained -= size;
@@ -472,14 +475,15 @@ chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *
chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
+ void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
+ bool *commit)
{
void *ret;
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
- alignment, zero, commit);
+ alignment, sn, zero, commit);
if (ret == NULL) {
if (chunk_hooks->alloc == chunk_alloc_default) {
/* Call directly to propagate tsdn. */
@@ -493,6 +497,8 @@ chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (ret == NULL)
return (NULL);
+ *sn = arena_extent_sn_next(arena);
+
if (config_valgrind && chunk_hooks->alloc !=
chunk_alloc_default)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
@@ -503,8 +509,8 @@ chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
static void
chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
- void *chunk, size_t size, bool zeroed, bool committed)
+ extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
+ void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
{
bool unzeroed;
extent_node_t *node, *prev;
@@ -516,7 +522,7 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
- extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
+ extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, 0,
false, false);
node = extent_tree_ad_nsearch(chunks_ad, &key);
/* Try to coalesce forward. */
@@ -528,15 +534,17 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
/*
* Coalesce chunk with the following address range. This does
* not change the position within chunks_ad, so only
- * remove/insert from/into chunks_szad.
+ * remove/insert from/into chunks_szsnad.
*/
- extent_tree_szad_remove(chunks_szad, node);
+ extent_tree_szsnad_remove(chunks_szsnad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
extent_node_addr_set(node, chunk);
extent_node_size_set(node, size + extent_node_size_get(node));
+ if (sn < extent_node_sn_get(node))
+ extent_node_sn_set(node, sn);
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
!unzeroed);
- extent_tree_szad_insert(chunks_szad, node);
+ extent_tree_szsnad_insert(chunks_szsnad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
} else {
/* Coalescing forward failed, so insert a new node. */
@@ -554,10 +562,10 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
goto label_return;
}
- extent_node_init(node, arena, chunk, size, !unzeroed,
+ extent_node_init(node, arena, chunk, size, sn, !unzeroed,
committed);
extent_tree_ad_insert(chunks_ad, node);
- extent_tree_szad_insert(chunks_szad, node);
+ extent_tree_szsnad_insert(chunks_szsnad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
}
@@ -571,19 +579,21 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
/*
* Coalesce chunk with the previous address range. This does
* not change the position within chunks_ad, so only
- * remove/insert node from/into chunks_szad.
+ * remove/insert node from/into chunks_szsnad.
*/
- extent_tree_szad_remove(chunks_szad, prev);
+ extent_tree_szsnad_remove(chunks_szsnad, prev);
extent_tree_ad_remove(chunks_ad, prev);
arena_chunk_cache_maybe_remove(arena, prev, cache);
- extent_tree_szad_remove(chunks_szad, node);
+ extent_tree_szsnad_remove(chunks_szsnad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
extent_node_addr_set(node, extent_node_addr_get(prev));
extent_node_size_set(node, extent_node_size_get(prev) +
extent_node_size_get(node));
+ if (extent_node_sn_get(prev) < extent_node_sn_get(node))
+ extent_node_sn_set(node, extent_node_sn_get(prev));
extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
extent_node_zeroed_get(node));
- extent_tree_szad_insert(chunks_szad, node);
+ extent_tree_szsnad_insert(chunks_szsnad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
arena_node_dalloc(tsdn, arena, prev);
@@ -595,7 +605,7 @@ label_return:
void
chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *chunk, size_t size, bool committed)
+ void *chunk, size_t size, size_t sn, bool committed)
{
assert(chunk != NULL);
@@ -603,8 +613,9 @@ chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert(size != 0);
assert((size & chunksize_mask) == 0);
- chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_cached,
- &arena->chunks_ad_cached, true, chunk, size, false, committed);
+ chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_cached,
+ &arena->chunks_ad_cached, true, chunk, size, sn, false,
+ committed);
arena_maybe_purge(tsdn, arena);
}
@@ -627,7 +638,7 @@ chunk_dalloc_default(void *chunk, size_t size, bool committed,
void
chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *chunk, size_t size, bool zeroed, bool committed)
+ void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
{
bool err;
@@ -653,8 +664,9 @@ chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
arena->ind);
- chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_retained,
- &arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
+ chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_retained,
+ &arena->chunks_ad_retained, false, chunk, size, sn, zeroed,
+ committed);
if (config_stats)
arena->stats.retained += size;
diff --git a/src/chunk_dss.c b/src/chunk_dss.c
index 85a1354..ee3f838 100644
--- a/src/chunk_dss.c
+++ b/src/chunk_dss.c
@@ -162,7 +162,8 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
CHUNK_HOOKS_INITIALIZER;
chunk_dalloc_wrapper(tsdn, arena,
&chunk_hooks, cpad, cpad_size,
- false, true);
+ arena_extent_sn_next(arena), false,
+ true);
}
if (*zero) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
diff --git a/src/extent.c b/src/extent.c
index 9f5146e..218156c 100644
--- a/src/extent.c
+++ b/src/extent.c
@@ -3,43 +3,46 @@
/******************************************************************************/
+/*
+ * Round down to the nearest chunk size that can actually be requested during
+ * normal huge allocation.
+ */
JEMALLOC_INLINE_C size_t
extent_quantize(size_t size)
{
+ size_t ret;
+ szind_t ind;
- /*
- * Round down to the nearest chunk size that can actually be requested
- * during normal huge allocation.
- */
- return (index2size(size2index(size + 1) - 1));
+ assert(size > 0);
+
+ ind = size2index(size + 1);
+ if (ind == 0) {
+ /* Avoid underflow. */
+ return (index2size(0));
+ }
+ ret = index2size(ind - 1);
+ assert(ret <= size);
+ return (ret);
}
JEMALLOC_INLINE_C int
-extent_szad_comp(const extent_node_t *a, const extent_node_t *b)
+extent_sz_comp(const extent_node_t *a, const extent_node_t *b)
{
- int ret;
size_t a_qsize = extent_quantize(extent_node_size_get(a));
size_t b_qsize = extent_quantize(extent_node_size_get(b));
- /*
- * Compare based on quantized size rather than size, in order to sort
- * equally useful extents only by address.
- */
- ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
- if (ret == 0) {
- uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
- uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
+ return ((a_qsize > b_qsize) - (a_qsize < b_qsize));
+}
- ret = (a_addr > b_addr) - (a_addr < b_addr);
- }
+JEMALLOC_INLINE_C int
+extent_sn_comp(const extent_node_t *a, const extent_node_t *b)
+{
+ size_t a_sn = extent_node_sn_get(a);
+ size_t b_sn = extent_node_sn_get(b);
- return (ret);
+ return ((a_sn > b_sn) - (a_sn < b_sn));
}
-/* Generate red-black tree functions. */
-rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link,
- extent_szad_comp)
-
JEMALLOC_INLINE_C int
extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
{
@@ -49,5 +52,26 @@ extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
return ((a_addr > b_addr) - (a_addr < b_addr));
}
+JEMALLOC_INLINE_C int
+extent_szsnad_comp(const extent_node_t *a, const extent_node_t *b)
+{
+ int ret;
+
+ ret = extent_sz_comp(a, b);
+ if (ret != 0)
+ return (ret);
+
+ ret = extent_sn_comp(a, b);
+ if (ret != 0)
+ return (ret);
+
+ ret = extent_ad_comp(a, b);
+ return (ret);
+}
+
+/* Generate red-black tree functions. */
+rb_gen(, extent_tree_szsnad_, extent_tree_t, extent_node_t, szsnad_link,
+ extent_szsnad_comp)
+
/* Generate red-black tree functions. */
rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp)
diff --git a/src/huge.c b/src/huge.c
index 62e6932..8abd8c0 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -56,6 +56,7 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
size_t ausize;
arena_t *iarena;
extent_node_t *node;
+ size_t sn;
bool is_zeroed;
/* Allocate one or more contiguous chunks for this request. */
@@ -68,7 +69,8 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
assert(ausize >= chunksize);
/* Allocate an extent node with which to track the chunk. */
- iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) : a0get();
+ iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) :
+ a0get();
node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)),
CACHELINE, false, NULL, true, iarena);
if (node == NULL)
@@ -82,15 +84,15 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
if (likely(!tsdn_null(tsdn)))
arena = arena_choose(tsdn_tsd(tsdn), arena);
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
- arena, usize, alignment, &is_zeroed)) == NULL) {
+ arena, usize, alignment, &sn, &is_zeroed)) == NULL) {
idalloctm(tsdn, node, NULL, true, true);
return (NULL);
}
- extent_node_init(node, arena, ret, usize, is_zeroed, true);
+ extent_node_init(node, arena, ret, usize, sn, is_zeroed, true);
if (huge_node_set(tsdn, ret, node)) {
- arena_chunk_dalloc_huge(tsdn, arena, ret, usize);
+ arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn);
idalloctm(tsdn, node, NULL, true, true);
return (NULL);
}
@@ -245,7 +247,8 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
/* Zap the excess chunks. */
- arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize);
+ arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize,
+ extent_node_sn_get(node));
return (false);
}
@@ -407,7 +410,8 @@ huge_dalloc(tsdn_t *tsdn, void *ptr)
huge_dalloc_junk(extent_node_addr_get(node),
extent_node_size_get(node));
arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node),
- extent_node_addr_get(node), extent_node_size_get(node));
+ extent_node_addr_get(node), extent_node_size_get(node),
+ extent_node_sn_get(node));
idalloctm(tsdn, node, NULL, true, true);
arena_decay_tick(tsdn, arena);
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 38650ff..baead66 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -1056,7 +1056,11 @@ malloc_conf_init(void)
if (cont) \
continue; \
}
-#define CONF_HANDLE_T_U(t, o, n, min, max, clip) \
+#define CONF_MIN_no(um, min) false
+#define CONF_MIN_yes(um, min) ((um) < (min))
+#define CONF_MAX_no(um, max) false
+#define CONF_MAX_yes(um, max) ((um) > (max))
+#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
if (CONF_MATCH(n)) { \
uintmax_t um; \
char *end; \
@@ -1069,15 +1073,19 @@ malloc_conf_init(void)
"Invalid conf value", \
k, klen, v, vlen); \
} else if (clip) { \
- if ((min) != 0 && um < (min)) \
+ if (CONF_MIN_##check_min(um, \
+ (min))) \
o = (t)(min); \
- else if (um > (max)) \
+ else if (CONF_MAX_##check_max( \
+ um, (max))) \
o = (t)(max); \
else \
o = (t)um; \
} else { \
- if (((min) != 0 && um < (min)) \
- || um > (max)) { \
+ if (CONF_MIN_##check_min(um, \
+ (min)) || \
+ CONF_MAX_##check_max(um, \
+ (max))) { \
malloc_conf_error( \
"Out-of-range " \
"conf value", \
@@ -1087,10 +1095,13 @@ malloc_conf_init(void)
} \
continue; \
}
-#define CONF_HANDLE_UNSIGNED(o, n, min, max, clip) \
- CONF_HANDLE_T_U(unsigned, o, n, min, max, clip)
-#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
- CONF_HANDLE_T_U(size_t, o, n, min, max, clip)
+#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
+ clip) \
+ CONF_HANDLE_T_U(unsigned, o, n, min, max, \
+ check_min, check_max, clip)
+#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
+ CONF_HANDLE_T_U(size_t, o, n, min, max, \
+ check_min, check_max, clip)
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
if (CONF_MATCH(n)) { \
long l; \
@@ -1133,7 +1144,7 @@ malloc_conf_init(void)
*/
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
- (sizeof(size_t) << 3) - 1, true)
+ (sizeof(size_t) << 3) - 1, yes, yes, true)
if (strncmp("dss", k, klen) == 0) {
int i;
bool match = false;
@@ -1159,7 +1170,7 @@ malloc_conf_init(void)
continue;
}
CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
- UINT_MAX, false)
+ UINT_MAX, yes, no, false)
if (strncmp("purge", k, klen) == 0) {
int i;
bool match = false;
@@ -1230,7 +1241,7 @@ malloc_conf_init(void)
continue;
}
CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
- 0, SIZE_T_MAX, false)
+ 0, SIZE_T_MAX, no, no, false)
CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
CONF_HANDLE_BOOL(opt_zero, "zero", true)
}
@@ -1267,8 +1278,8 @@ malloc_conf_init(void)
CONF_HANDLE_BOOL(opt_prof_thread_active_init,
"prof_thread_active_init", true)
CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
- "lg_prof_sample", 0,
- (sizeof(uint64_t) << 3) - 1, true)
+ "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
+ - 1, no, yes, true)
CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
true)
CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
@@ -1284,7 +1295,14 @@ malloc_conf_init(void)
malloc_conf_error("Invalid conf pair", k, klen, v,
vlen);
#undef CONF_MATCH
+#undef CONF_MATCH_VALUE
#undef CONF_HANDLE_BOOL
+#undef CONF_MIN_no
+#undef CONF_MIN_yes
+#undef CONF_MAX_no
+#undef CONF_MAX_yes
+#undef CONF_HANDLE_T_U
+#undef CONF_HANDLE_UNSIGNED
#undef CONF_HANDLE_SIZE_T
#undef CONF_HANDLE_SSIZE_T
#undef CONF_HANDLE_CHAR_P
@@ -1393,8 +1411,9 @@ malloc_init_hard_recursible(void)
ncpus = malloc_ncpus();
-#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
- && !defined(_WIN32) && !defined(__native_client__))
+#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
+ && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
+ !defined(__native_client__))
/* LinuxThreads' pthread_atfork() allocates. */
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
jemalloc_postfork_child) != 0) {
@@ -1973,8 +1992,8 @@ je_realloc(void *ptr, size_t size)
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, ret);
- JEMALLOC_VALGRIND_REALLOC(true, tsdn, ret, usize, true, ptr, old_usize,
- old_rzsize, true, false);
+ JEMALLOC_VALGRIND_REALLOC(maybe, tsdn, ret, usize, maybe, ptr,
+ old_usize, old_rzsize, maybe, false);
witness_assert_lockless(tsdn);
return (ret);
}
@@ -2400,8 +2419,8 @@ je_rallocx(void *ptr, size_t size, int flags)
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, p);
- JEMALLOC_VALGRIND_REALLOC(true, tsd_tsdn(tsd), p, usize, false, ptr,
- old_usize, old_rzsize, false, zero);
+ JEMALLOC_VALGRIND_REALLOC(maybe, tsd_tsdn(tsd), p, usize, no, ptr,
+ old_usize, old_rzsize, no, zero);
witness_assert_lockless(tsd_tsdn(tsd));
return (p);
label_oom:
@@ -2543,8 +2562,8 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
*tsd_thread_allocatedp_get(tsd) += usize;
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
- JEMALLOC_VALGRIND_REALLOC(false, tsd_tsdn(tsd), ptr, usize, false, ptr,
- old_usize, old_rzsize, false, zero);
+ JEMALLOC_VALGRIND_REALLOC(no, tsd_tsdn(tsd), ptr, usize, no, ptr,
+ old_usize, old_rzsize, no, zero);
label_not_resized:
UTRACE(ptr, size, ptr);
witness_assert_lockless(tsd_tsdn(tsd));
diff --git a/src/pages.c b/src/pages.c
index 647952a..5f0c966 100644
--- a/src/pages.c
+++ b/src/pages.c
@@ -170,15 +170,16 @@ pages_purge(void *addr, size_t size)
#ifdef _WIN32
VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
unzeroed = true;
-#elif defined(JEMALLOC_HAVE_MADVISE)
-# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
-# define JEMALLOC_MADV_PURGE MADV_DONTNEED
-# define JEMALLOC_MADV_ZEROS true
-# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
+#elif (defined(JEMALLOC_PURGE_MADVISE_FREE) || \
+ defined(JEMALLOC_PURGE_MADVISE_DONTNEED))
+# if defined(JEMALLOC_PURGE_MADVISE_FREE)
# define JEMALLOC_MADV_PURGE MADV_FREE
# define JEMALLOC_MADV_ZEROS false
+# elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED)
+# define JEMALLOC_MADV_PURGE MADV_DONTNEED
+# define JEMALLOC_MADV_ZEROS true
# else
-# error "No madvise(2) flag defined for purging unused dirty pages."
+# error No madvise(2) flag defined for purging unused dirty pages
# endif
int err = madvise(addr, size, JEMALLOC_MADV_PURGE);
unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
@@ -191,6 +192,34 @@ pages_purge(void *addr, size_t size)
return (unzeroed);
}
+bool
+pages_huge(void *addr, size_t size)
+{
+
+ assert(PAGE_ADDR2BASE(addr) == addr);
+ assert(PAGE_CEILING(size) == size);
+
+#ifdef JEMALLOC_THP
+ return (madvise(addr, size, MADV_HUGEPAGE) != 0);
+#else
+ return (false);
+#endif
+}
+
+bool
+pages_nohuge(void *addr, size_t size)
+{
+
+ assert(PAGE_ADDR2BASE(addr) == addr);
+ assert(PAGE_CEILING(size) == size);
+
+#ifdef JEMALLOC_THP
+ return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
+#else
+ return (false);
+#endif
+}
+
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
static bool
os_overcommits_sysctl(void)
@@ -219,7 +248,7 @@ os_overcommits_proc(void)
char buf[1];
ssize_t nread;
-#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_open)
+#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY);
#else
fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
@@ -227,13 +256,13 @@ os_overcommits_proc(void)
if (fd == -1)
return (false); /* Error. */
-#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_read)
+#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
#else
nread = read(fd, &buf, sizeof(buf));
#endif
-#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_close)
+#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
syscall(SYS_close, fd);
#else
close(fd);
diff --git a/src/stats.c b/src/stats.c
index bd8af39..1360f3b 100644..100755
--- a/src/stats.c
+++ b/src/stats.c
@@ -3,7 +3,7 @@
#define CTL_GET(n, v, t) do { \
size_t sz = sizeof(t); \
- xmallctl(n, v, &sz, NULL, 0); \
+ xmallctl(n, (void *)v, &sz, NULL, 0); \
} while (0)
#define CTL_M2_GET(n, i, v, t) do { \
@@ -12,7 +12,7 @@
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
mib[2] = (i); \
- xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
+ xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
} while (0)
#define CTL_M2_M4_GET(n, i, j, v, t) do { \
@@ -22,7 +22,7 @@
xmallctlnametomib(n, mib, &miblen); \
mib[2] = (i); \
mib[4] = (j); \
- xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
+ xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
} while (0)
/******************************************************************************/
@@ -647,7 +647,7 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
#define OPT_WRITE_BOOL_MUTABLE(n, m, c) { \
bool bv2; \
if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0 && \
- je_mallctl(#m, &bv2, &bsz, NULL, 0) == 0) { \
+ je_mallctl(#m, &bv2, (void *)&bsz, NULL, 0) == 0) { \
if (json) { \
malloc_cprintf(write_cb, cbopaque, \
"\t\t\t\""#n"\": %s%s\n", bv ? "true" : \
@@ -692,7 +692,7 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
#define OPT_WRITE_SSIZE_T_MUTABLE(n, m, c) { \
ssize_t ssv2; \
if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0 && \
- je_mallctl(#m, &ssv2, &sssz, NULL, 0) == 0) { \
+ je_mallctl(#m, (void *)&ssv2, &sssz, NULL, 0) == 0) { \
if (json) { \
malloc_cprintf(write_cb, cbopaque, \
"\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \
@@ -1084,7 +1084,8 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
* */
epoch = 1;
u64sz = sizeof(uint64_t);
- err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t));
+ err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch,
+ sizeof(uint64_t));
if (err != 0) {
if (err == EAGAIN) {
malloc_write("<jemalloc>: Memory allocation failure in "
diff --git a/src/tcache.c b/src/tcache.c
index f97aa42..21540ff 100644..100755
--- a/src/tcache.c
+++ b/src/tcache.c
@@ -517,12 +517,12 @@ tcache_boot(tsdn_t *tsdn)
* If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
* known.
*/
- if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
+ if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < SMALL_MAXCLASS)
tcache_maxclass = SMALL_MAXCLASS;
- else if ((1U << opt_lg_tcache_max) > large_maxclass)
+ else if ((ZU(1) << opt_lg_tcache_max) > large_maxclass)
tcache_maxclass = large_maxclass;
else
- tcache_maxclass = (1U << opt_lg_tcache_max);
+ tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
nhbins = size2index(tcache_maxclass) + 1;
diff --git a/src/util.c b/src/util.c
index 7905267..dd8c236 100644..100755
--- a/src/util.c
+++ b/src/util.c
@@ -49,7 +49,7 @@ static void
wrtmessage(void *cbopaque, const char *s)
{
-#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_write)
+#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
/*
* Use syscall(2) rather than write(2) when possible in order to avoid
* the possibility of memory allocation within libc. This is necessary
@@ -200,7 +200,7 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
p++;
}
if (neg)
- ret = -ret;
+ ret = (uintmax_t)(-((intmax_t)ret));
if (p == ns) {
/* No conversion performed. */