summaryrefslogtreecommitdiffstats
path: root/src/arena.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/arena.c')
-rw-r--r--src/arena.c1150
1 files changed, 669 insertions, 481 deletions
diff --git a/src/arena.c b/src/arena.c
index 48e9b20..c605bcd 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -37,11 +37,12 @@ static szind_t runs_avail_nclasses; /* Number of runs_avail trees. */
* definition.
*/
-static void arena_purge_to_limit(arena_t *arena, size_t ndirty_limit);
-static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
- bool cleaned, bool decommitted);
-static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, arena_bin_t *bin);
+static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
+ size_t ndirty_limit);
+static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
+ bool dirty, bool cleaned, bool decommitted);
+static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, arena_bin_t *bin);
@@ -72,9 +73,9 @@ arena_run_addr_comp(const arena_chunk_map_misc_t *a,
return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
}
-/* Generate red-black tree functions. */
-rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t,
- rb_link, arena_run_addr_comp)
+/* Generate pairing heap functions. */
+ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
+ ph_link, arena_run_addr_comp)
static size_t
run_quantize_floor_compute(size_t size)
@@ -155,7 +156,7 @@ run_quantize_ceil_compute(size_t size)
#ifdef JEMALLOC_JET
#undef run_quantize_floor
-#define run_quantize_floor JEMALLOC_N(run_quantize_floor_impl)
+#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor)
#endif
static size_t
run_quantize_floor(size_t size)
@@ -173,12 +174,12 @@ run_quantize_floor(size_t size)
#ifdef JEMALLOC_JET
#undef run_quantize_floor
#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
-run_quantize_t *run_quantize_floor = JEMALLOC_N(run_quantize_floor_impl);
+run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor);
#endif
#ifdef JEMALLOC_JET
#undef run_quantize_ceil
-#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil_impl)
+#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil)
#endif
static size_t
run_quantize_ceil(size_t size)
@@ -196,10 +197,10 @@ run_quantize_ceil(size_t size)
#ifdef JEMALLOC_JET
#undef run_quantize_ceil
#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
-run_quantize_t *run_quantize_ceil = JEMALLOC_N(run_quantize_ceil_impl);
+run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil);
#endif
-static arena_run_tree_t *
+static arena_run_heap_t *
arena_runs_avail_get(arena_t *arena, szind_t ind)
{
@@ -214,11 +215,11 @@ arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get(
- arena_miscelm_get(chunk, pageind))));
+ arena_miscelm_get_const(chunk, pageind))));
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
- arena_run_tree_insert(arena_runs_avail_get(arena, ind),
- arena_miscelm_get(chunk, pageind));
+ arena_run_heap_insert(arena_runs_avail_get(arena, ind),
+ arena_miscelm_get_mutable(chunk, pageind));
}
static void
@@ -226,18 +227,19 @@ arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get(
- arena_miscelm_get(chunk, pageind))));
+ arena_miscelm_get_const(chunk, pageind))));
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
- arena_run_tree_remove(arena_runs_avail_get(arena, ind),
- arena_miscelm_get(chunk, pageind));
+ arena_run_heap_remove(arena_runs_avail_get(arena, ind),
+ arena_miscelm_get_mutable(chunk, pageind));
}
static void
arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
+ pageind);
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
@@ -254,7 +256,8 @@ static void
arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
+ pageind);
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
@@ -589,7 +592,8 @@ arena_chunk_init_spare(arena_t *arena)
}
static bool
-arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero)
+arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ bool zero)
{
/*
@@ -600,62 +604,63 @@ arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero)
*/
extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
extent_node_achunk_set(&chunk->node, true);
- return (chunk_register(chunk, &chunk->node));
+ return (chunk_register(tsdn, chunk, &chunk->node));
}
static arena_chunk_t *
-arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
- bool *zero, bool *commit)
+arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
{
arena_chunk_t *chunk;
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
- chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL,
- chunksize, chunksize, zero, commit);
+ chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
+ NULL, chunksize, chunksize, zero, commit);
if (chunk != NULL && !*commit) {
/* Commit header. */
if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind)) {
- chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
- chunksize, *zero, *commit);
+ chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
+ (void *)chunk, chunksize, *zero, *commit);
chunk = NULL;
}
}
- if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) {
+ if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) {
if (!*commit) {
/* Undo commit of header. */
chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind);
}
- chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
+ chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
chunksize, *zero, *commit);
chunk = NULL;
}
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
return (chunk);
}
static arena_chunk_t *
-arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit)
+arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
+ bool *commit)
{
arena_chunk_t *chunk;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
- chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize,
+ chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
chunksize, zero, true);
if (chunk != NULL) {
- if (arena_chunk_register(arena, chunk, *zero)) {
- chunk_dalloc_cache(arena, &chunk_hooks, chunk,
+ if (arena_chunk_register(tsdn, arena, chunk, *zero)) {
+ chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
chunksize, true);
return (NULL);
}
*commit = true;
}
if (chunk == NULL) {
- chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks,
- zero, commit);
+ chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
+ &chunk_hooks, zero, commit);
}
if (config_stats && chunk != NULL) {
@@ -667,7 +672,7 @@ arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit)
}
static arena_chunk_t *
-arena_chunk_init_hard(arena_t *arena)
+arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
{
arena_chunk_t *chunk;
bool zero, commit;
@@ -677,14 +682,14 @@ arena_chunk_init_hard(arena_t *arena)
zero = false;
commit = false;
- chunk = arena_chunk_alloc_internal(arena, &zero, &commit);
+ chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
if (chunk == NULL)
return (NULL);
/*
* Initialize the map to contain one maximal free untouched run. Mark
- * the pages as zeroed if chunk_alloc() returned a zeroed or decommitted
- * chunk.
+ * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
+ * or decommitted chunk.
*/
flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
@@ -696,17 +701,18 @@ arena_chunk_init_hard(arena_t *arena)
*/
if (!zero) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
- (void *)arena_bitselm_get(chunk, map_bias+1),
- (size_t)((uintptr_t) arena_bitselm_get(chunk,
- chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk,
- map_bias+1)));
+ (void *)arena_bitselm_get_const(chunk, map_bias+1),
+ (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
+ chunk_npages-1) -
+ (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
for (i = map_bias+1; i < chunk_npages-1; i++)
arena_mapbits_internal_set(chunk, i, flag_unzeroed);
} else {
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
- *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t)
- arena_bitselm_get(chunk, chunk_npages-1) -
- (uintptr_t)arena_bitselm_get(chunk, map_bias+1)));
+ *)arena_bitselm_get_const(chunk, map_bias+1),
+ (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
+ chunk_npages-1) -
+ (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(chunk, i) ==
@@ -721,26 +727,73 @@ arena_chunk_init_hard(arena_t *arena)
}
static arena_chunk_t *
-arena_chunk_alloc(arena_t *arena)
+arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
{
arena_chunk_t *chunk;
if (arena->spare != NULL)
chunk = arena_chunk_init_spare(arena);
else {
- chunk = arena_chunk_init_hard(arena);
+ chunk = arena_chunk_init_hard(tsdn, arena);
if (chunk == NULL)
return (NULL);
}
+ ql_elm_new(&chunk->node, ql_link);
+ ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
return (chunk);
}
static void
-arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
+arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
+{
+ bool committed;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+
+ chunk_deregister(chunk, &chunk->node);
+
+ committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
+ if (!committed) {
+ /*
+ * Decommit the header. Mark the chunk as decommitted even if
+ * header decommit fails, since treating a partially committed
+ * chunk as committed has a high potential for causing later
+ * access of decommitted memory.
+ */
+ chunk_hooks = chunk_hooks_get(tsdn, arena);
+ chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
+ arena->ind);
+ }
+
+ chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
+ committed);
+
+ if (config_stats) {
+ arena->stats.mapped -= chunksize;
+ arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
+ }
+}
+
+static void
+arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare)
+{
+
+ assert(arena->spare != spare);
+
+ if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
+ arena_run_dirty_remove(arena, spare, map_bias,
+ chunk_npages-map_bias);
+ }
+
+ arena_chunk_discard(tsdn, arena, spare);
+}
+
+static void
+arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
{
+ arena_chunk_t *spare;
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
@@ -756,43 +809,11 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
/* Remove run from runs_avail, so that the arena does not use it. */
arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
- if (arena->spare != NULL) {
- arena_chunk_t *spare = arena->spare;
- chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
- bool committed;
-
- arena->spare = chunk;
- if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
- arena_run_dirty_remove(arena, spare, map_bias,
- chunk_npages-map_bias);
- }
-
- chunk_deregister(spare, &spare->node);
-
- committed = (arena_mapbits_decommitted_get(spare, map_bias) ==
- 0);
- if (!committed) {
- /*
- * Decommit the header. Mark the chunk as decommitted
- * even if header decommit fails, since treating a
- * partially committed chunk as committed has a high
- * potential for causing later access of decommitted
- * memory.
- */
- chunk_hooks = chunk_hooks_get(arena);
- chunk_hooks.decommit(spare, chunksize, 0, map_bias <<
- LG_PAGE, arena->ind);
- }
-
- chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare,
- chunksize, committed);
-
- if (config_stats) {
- arena->stats.mapped -= chunksize;
- arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
- }
- } else
- arena->spare = chunk;
+ ql_remove(&arena->achunks, &chunk->node, ql_link);
+ spare = arena->spare;
+ arena->spare = chunk;
+ if (spare != NULL)
+ arena_spare_discard(tsdn, arena, spare);
}
static void
@@ -835,6 +856,17 @@ arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
}
static void
+arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
+{
+ szind_t index = size2index(usize) - nlclasses - NBINS;
+
+ cassert(config_stats);
+
+ arena->stats.ndalloc_huge++;
+ arena->stats.hstats[index].ndalloc--;
+}
+
+static void
arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
{
szind_t index = size2index(usize) - nlclasses - NBINS;
@@ -865,63 +897,64 @@ arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
}
extent_node_t *
-arena_node_alloc(arena_t *arena)
+arena_node_alloc(tsdn_t *tsdn, arena_t *arena)
{
extent_node_t *node;
- malloc_mutex_lock(&arena->node_cache_mtx);
+ malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
node = ql_last(&arena->node_cache, ql_link);
if (node == NULL) {
- malloc_mutex_unlock(&arena->node_cache_mtx);
- return (base_alloc(sizeof(extent_node_t)));
+ malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
+ return (base_alloc(tsdn, sizeof(extent_node_t)));
}
ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
- malloc_mutex_unlock(&arena->node_cache_mtx);
+ malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
return (node);
}
void
-arena_node_dalloc(arena_t *arena, extent_node_t *node)
+arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
{
- malloc_mutex_lock(&arena->node_cache_mtx);
+ malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
ql_elm_new(node, ql_link);
ql_tail_insert(&arena->node_cache, node, ql_link);
- malloc_mutex_unlock(&arena->node_cache_mtx);
+ malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
}
static void *
-arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
- size_t usize, size_t alignment, bool *zero, size_t csize)
+arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero,
+ size_t csize)
{
void *ret;
bool commit = true;
- ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment,
- zero, &commit);
+ ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
+ alignment, zero, &commit);
if (ret == NULL) {
/* Revert optimistic stats updates. */
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
arena_huge_malloc_stats_update_undo(arena, usize);
arena->stats.mapped -= usize;
}
arena_nactive_sub(arena, usize >> LG_PAGE);
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
}
return (ret);
}
void *
-arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
- bool *zero)
+arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool *zero)
{
void *ret;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t csize = CHUNK_CEILING(usize);
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
/* Optimistically update stats. */
if (config_stats) {
@@ -930,61 +963,61 @@ arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
}
arena_nactive_add(arena, usize >> LG_PAGE);
- ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment,
- zero, true);
- malloc_mutex_unlock(&arena->lock);
+ ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
+ alignment, zero, true);
+ malloc_mutex_unlock(tsdn, &arena->lock);
if (ret == NULL) {
- ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize,
- alignment, zero, csize);
+ ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
+ usize, alignment, zero, csize);
}
return (ret);
}
void
-arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
+arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize)
{
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t csize;
csize = CHUNK_CEILING(usize);
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
arena_huge_dalloc_stats_update(arena, usize);
arena->stats.mapped -= usize;
}
arena_nactive_sub(arena, usize >> LG_PAGE);
- chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true);
- malloc_mutex_unlock(&arena->lock);
+ chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true);
+ malloc_mutex_unlock(tsdn, &arena->lock);
}
void
-arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize,
- size_t usize)
+arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
+ size_t oldsize, size_t usize)
{
assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
assert(oldsize != usize);
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats)
arena_huge_ralloc_stats_update(arena, oldsize, usize);
if (oldsize < usize)
arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
else
arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
}
void
-arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
- size_t usize)
+arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
+ size_t oldsize, size_t usize)
{
size_t udiff = oldsize - usize;
size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
arena_huge_ralloc_stats_update(arena, oldsize, usize);
if (cdiff != 0)
@@ -997,51 +1030,52 @@ arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
void *nchunk = (void *)((uintptr_t)chunk +
CHUNK_CEILING(usize));
- chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true);
+ chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
+ true);
}
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
}
static bool
-arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk,
- size_t udiff, size_t cdiff)
+arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
+ bool *zero, void *nchunk, size_t udiff, size_t cdiff)
{
bool err;
bool commit = true;
- err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize,
- zero, &commit) == NULL);
+ err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
+ chunksize, zero, &commit) == NULL);
if (err) {
/* Revert optimistic stats updates. */
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
arena_huge_ralloc_stats_update_undo(arena, oldsize,
usize);
arena->stats.mapped -= cdiff;
}
arena_nactive_sub(arena, udiff >> LG_PAGE);
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) {
- chunk_dalloc_wrapper(arena, chunk_hooks, nchunk, cdiff, *zero,
- true);
+ chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
+ *zero, true);
err = true;
}
return (err);
}
bool
-arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
- size_t usize, bool *zero)
+arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
+ size_t oldsize, size_t usize, bool *zero)
{
bool err;
- chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
+ chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
size_t udiff = usize - oldsize;
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
/* Optimistically update stats. */
if (config_stats) {
@@ -1050,17 +1084,17 @@ arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
}
arena_nactive_add(arena, udiff >> LG_PAGE);
- err = (chunk_alloc_cache(arena, &chunk_hooks, nchunk, cdiff, chunksize,
- zero, true) == NULL);
- malloc_mutex_unlock(&arena->lock);
+ err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
+ chunksize, zero, true) == NULL);
+ malloc_mutex_unlock(tsdn, &arena->lock);
if (err) {
- err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks,
- chunk, oldsize, usize, zero, nchunk, udiff,
+ err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
+ &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff,
cdiff);
} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) {
- chunk_dalloc_wrapper(arena, &chunk_hooks, nchunk, cdiff, *zero,
- true);
+ chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
+ *zero, true);
err = true;
}
@@ -1079,7 +1113,7 @@ arena_run_first_best_fit(arena_t *arena, size_t size)
ind = size2index(run_quantize_ceil(size));
for (i = ind; i < runs_avail_nclasses + runs_avail_bias; i++) {
- arena_chunk_map_misc_t *miscelm = arena_run_tree_first(
+ arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
arena_runs_avail_get(arena, i));
if (miscelm != NULL)
return (&miscelm->run);
@@ -1100,7 +1134,7 @@ arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
}
static arena_run_t *
-arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
+arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
{
arena_chunk_t *chunk;
arena_run_t *run;
@@ -1116,9 +1150,9 @@ arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
/*
* No usable runs. Create a new chunk from which to allocate the run.
*/
- chunk = arena_chunk_alloc(arena);
+ chunk = arena_chunk_alloc(tsdn, arena);
if (chunk != NULL) {
- run = &arena_miscelm_get(chunk, map_bias)->run;
+ run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
if (arena_run_split_large(arena, run, size, zero))
run = NULL;
return (run);
@@ -1144,7 +1178,7 @@ arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
}
static arena_run_t *
-arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind)
+arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
{
arena_chunk_t *chunk;
arena_run_t *run;
@@ -1161,9 +1195,9 @@ arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind)
/*
* No usable runs. Create a new chunk from which to allocate the run.
*/
- chunk = arena_chunk_alloc(arena);
+ chunk = arena_chunk_alloc(tsdn, arena);
if (chunk != NULL) {
- run = &arena_miscelm_get(chunk, map_bias)->run;
+ run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
if (arena_run_split_small(arena, run, size, binind))
run = NULL;
return (run);
@@ -1186,28 +1220,28 @@ arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
}
ssize_t
-arena_lg_dirty_mult_get(arena_t *arena)
+arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena)
{
ssize_t lg_dirty_mult;
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
lg_dirty_mult = arena->lg_dirty_mult;
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (lg_dirty_mult);
}
bool
-arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult)
+arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult)
{
if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
return (true);
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
arena->lg_dirty_mult = lg_dirty_mult;
- arena_maybe_purge(arena);
- malloc_mutex_unlock(&arena->lock);
+ arena_maybe_purge(tsdn, arena);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (false);
}
@@ -1265,7 +1299,7 @@ arena_decay_backlog_npages_limit(const arena_t *arena)
sum = 0;
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
sum += arena->decay_backlog[i] * h_steps[i];
- npages_limit_backlog = (sum >> SMOOTHSTEP_BFP);
+ npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
return (npages_limit_backlog);
}
@@ -1273,7 +1307,7 @@ arena_decay_backlog_npages_limit(const arena_t *arena)
static void
arena_decay_epoch_advance(arena_t *arena, const nstime_t *time)
{
- uint64_t nadvance;
+ uint64_t nadvance_u64;
nstime_t delta;
size_t ndirty_delta;
@@ -1282,27 +1316,31 @@ arena_decay_epoch_advance(arena_t *arena, const nstime_t *time)
nstime_copy(&delta, time);
nstime_subtract(&delta, &arena->decay_epoch);
- nadvance = nstime_divide(&delta, &arena->decay_interval);
- assert(nadvance > 0);
+ nadvance_u64 = nstime_divide(&delta, &arena->decay_interval);
+ assert(nadvance_u64 > 0);
- /* Add nadvance decay intervals to epoch. */
+ /* Add nadvance_u64 decay intervals to epoch. */
nstime_copy(&delta, &arena->decay_interval);
- nstime_imultiply(&delta, nadvance);
+ nstime_imultiply(&delta, nadvance_u64);
nstime_add(&arena->decay_epoch, &delta);
/* Set a new deadline. */
arena_decay_deadline_init(arena);
/* Update the backlog. */
- if (nadvance >= SMOOTHSTEP_NSTEPS) {
+ if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
memset(arena->decay_backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
sizeof(size_t));
} else {
- memmove(arena->decay_backlog, &arena->decay_backlog[nadvance],
- (SMOOTHSTEP_NSTEPS - nadvance) * sizeof(size_t));
- if (nadvance > 1) {
+ size_t nadvance_z = (size_t)nadvance_u64;
+
+ assert((uint64_t)nadvance_z == nadvance_u64);
+
+ memmove(arena->decay_backlog, &arena->decay_backlog[nadvance_z],
+ (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
+ if (nadvance_z > 1) {
memset(&arena->decay_backlog[SMOOTHSTEP_NSTEPS -
- nadvance], 0, (nadvance-1) * sizeof(size_t));
+ nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
}
}
ndirty_delta = (arena->ndirty > arena->decay_ndirty) ? arena->ndirty -
@@ -1352,29 +1390,33 @@ static bool
arena_decay_time_valid(ssize_t decay_time)
{
- return (decay_time >= -1 && decay_time <= NSTIME_SEC_MAX);
+ if (decay_time < -1)
+ return (false);
+ if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
+ return (true);
+ return (false);
}
ssize_t
-arena_decay_time_get(arena_t *arena)
+arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
{
ssize_t decay_time;
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
decay_time = arena->decay_time;
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (decay_time);
}
bool
-arena_decay_time_set(arena_t *arena, ssize_t decay_time)
+arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
{
if (!arena_decay_time_valid(decay_time))
return (true);
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
/*
* Restart decay backlog from scratch, which may cause many dirty pages
* to be immediately purged. It would conceptually be possible to map
@@ -1384,14 +1426,14 @@ arena_decay_time_set(arena_t *arena, ssize_t decay_time)
* arbitrary change during initial arena configuration.
*/
arena_decay_init(arena, decay_time);
- arena_maybe_purge(arena);
- malloc_mutex_unlock(&arena->lock);
+ arena_maybe_purge(tsdn, arena);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (false);
}
static void
-arena_maybe_purge_ratio(arena_t *arena)
+arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
{
assert(opt_purge == purge_mode_ratio);
@@ -1414,12 +1456,12 @@ arena_maybe_purge_ratio(arena_t *arena)
*/
if (arena->ndirty <= threshold)
return;
- arena_purge_to_limit(arena, threshold);
+ arena_purge_to_limit(tsdn, arena, threshold);
}
}
static void
-arena_maybe_purge_decay(arena_t *arena)
+arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
{
nstime_t time;
size_t ndirty_limit;
@@ -1429,7 +1471,7 @@ arena_maybe_purge_decay(arena_t *arena)
/* Purge all or nothing if the option is disabled. */
if (arena->decay_time <= 0) {
if (arena->decay_time == 0)
- arena_purge_to_limit(arena, 0);
+ arena_purge_to_limit(tsdn, arena, 0);
return;
}
@@ -1450,11 +1492,11 @@ arena_maybe_purge_decay(arena_t *arena)
*/
if (arena->ndirty <= ndirty_limit)
return;
- arena_purge_to_limit(arena, ndirty_limit);
+ arena_purge_to_limit(tsdn, arena, ndirty_limit);
}
void
-arena_maybe_purge(arena_t *arena)
+arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
{
/* Don't recursively purge. */
@@ -1462,9 +1504,9 @@ arena_maybe_purge(arena_t *arena)
return;
if (opt_purge == purge_mode_ratio)
- arena_maybe_purge_ratio(arena);
+ arena_maybe_purge_ratio(tsdn, arena);
else
- arena_maybe_purge_decay(arena);
+ arena_maybe_purge_decay(tsdn, arena);
}
static size_t
@@ -1502,7 +1544,7 @@ arena_dirty_count(arena_t *arena)
}
static size_t
-arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks,
+arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
{
@@ -1533,7 +1575,7 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks,
* dalloc_node=false argument to chunk_alloc_cache().
*/
zero = false;
- chunk = chunk_alloc_cache(arena, chunk_hooks,
+ chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
extent_node_addr_get(chunkselm),
extent_node_size_get(chunkselm), chunksize, &zero,
false);
@@ -1568,7 +1610,7 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks,
* prior to allocation.
*/
if (chunk == arena->spare)
- arena_chunk_alloc(arena);
+ arena_chunk_alloc(tsdn, arena);
/* Temporarily allocate the free dirty run. */
arena_run_split_large(arena, run, run_size, false);
@@ -1592,7 +1634,7 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks,
}
static size_t
-arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
+arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
{
@@ -1604,7 +1646,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
nmadvise = 0;
npurged = 0;
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
for (rdelm = qr_next(purge_runs_sentinel, rd_link),
chunkselm = qr_next(purge_chunks_sentinel, cc_link);
rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
@@ -1643,7 +1685,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
flag_unzeroed = 0;
flags = CHUNK_MAP_DECOMMITTED;
} else {
- flag_unzeroed = chunk_purge_wrapper(arena,
+ flag_unzeroed = chunk_purge_wrapper(tsdn, arena,
chunk_hooks, chunk, chunksize, pageind <<
LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
flags = flag_unzeroed;
@@ -1674,7 +1716,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
if (config_stats)
nmadvise++;
}
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
arena->stats.nmadvise += nmadvise;
@@ -1685,7 +1727,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
}
static void
-arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
+arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
{
@@ -1705,10 +1747,10 @@ arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
bool zeroed = extent_node_zeroed_get(chunkselm);
bool committed = extent_node_committed_get(chunkselm);
extent_node_dirty_remove(chunkselm);
- arena_node_dalloc(arena, chunkselm);
+ arena_node_dalloc(tsdn, arena, chunkselm);
chunkselm = chunkselm_next;
- chunk_dalloc_wrapper(arena, chunk_hooks, addr, size,
- zeroed, committed);
+ chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
+ size, zeroed, committed);
} else {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
@@ -1719,7 +1761,8 @@ arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
pageind) != 0);
arena_run_t *run = &miscelm->run;
qr_remove(rdelm, rd_link);
- arena_run_dalloc(arena, run, false, true, decommitted);
+ arena_run_dalloc(tsdn, arena, run, false, true,
+ decommitted);
}
}
}
@@ -1734,9 +1777,9 @@ arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
* (arena->ndirty >= ndirty_limit)
*/
static void
-arena_purge_to_limit(arena_t *arena, size_t ndirty_limit)
+arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
{
- chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
+ chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
size_t npurge, npurged;
arena_runs_dirty_link_t purge_runs_sentinel;
extent_node_t purge_chunks_sentinel;
@@ -1757,14 +1800,14 @@ arena_purge_to_limit(arena_t *arena, size_t ndirty_limit)
qr_new(&purge_runs_sentinel, rd_link);
extent_node_dirty_linkage_init(&purge_chunks_sentinel);
- npurge = arena_stash_dirty(arena, &chunk_hooks, ndirty_limit,
+ npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
&purge_runs_sentinel, &purge_chunks_sentinel);
if (npurge == 0)
goto label_return;
- npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel,
- &purge_chunks_sentinel);
+ npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks,
+ &purge_runs_sentinel, &purge_chunks_sentinel);
assert(npurged == npurge);
- arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel,
+ arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel,
&purge_chunks_sentinel);
if (config_stats)
@@ -1775,15 +1818,159 @@ label_return:
}
void
-arena_purge(arena_t *arena, bool all)
+arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
{
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
if (all)
- arena_purge_to_limit(arena, 0);
+ arena_purge_to_limit(tsdn, arena, 0);
else
- arena_maybe_purge(arena);
- malloc_mutex_unlock(&arena->lock);
+ arena_maybe_purge(tsdn, arena);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+}
+
+static void
+arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
+{
+ size_t pageind, npages;
+
+ cassert(config_prof);
+ assert(opt_prof);
+
+ /*
+ * Iterate over the allocated runs and remove profiled allocations from
+ * the sample set.
+ */
+ for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
+ if (arena_mapbits_allocated_get(chunk, pageind) != 0) {
+ if (arena_mapbits_large_get(chunk, pageind) != 0) {
+ void *ptr = (void *)((uintptr_t)chunk + (pageind
+ << LG_PAGE));
+ size_t usize = isalloc(tsd_tsdn(tsd), ptr,
+ config_prof);
+
+ prof_free(tsd, ptr, usize);
+ npages = arena_mapbits_large_size_get(chunk,
+ pageind) >> LG_PAGE;
+ } else {
+ /* Skip small run. */
+ size_t binind = arena_mapbits_binind_get(chunk,
+ pageind);
+ arena_bin_info_t *bin_info =
+ &arena_bin_info[binind];
+ npages = bin_info->run_size >> LG_PAGE;
+ }
+ } else {
+ /* Skip unallocated run. */
+ npages = arena_mapbits_unallocated_size_get(chunk,
+ pageind) >> LG_PAGE;
+ }
+ assert(pageind + npages <= chunk_npages);
+ }
+}
+
+void
+arena_reset(tsd_t *tsd, arena_t *arena)
+{
+ unsigned i;
+ extent_node_t *node;
+
+ /*
+ * Locking in this function is unintuitive. The caller guarantees that
+ * no concurrent operations are happening in this arena, but there are
+ * still reasons that some locking is necessary:
+ *
+ * - Some of the functions in the transitive closure of calls assume
+ * appropriate locks are held, and in some cases these locks are
+ * temporarily dropped to avoid lock order reversal or deadlock due to
+ * reentry.
+ * - mallctl("epoch", ...) may concurrently refresh stats. While
+ * strictly speaking this is a "concurrent operation", disallowing
+ * stats refreshes would impose an inconvenient burden.
+ */
+
+ /* Remove large allocations from prof sample set. */
+ if (config_prof && opt_prof) {
+ ql_foreach(node, &arena->achunks, ql_link) {
+ arena_achunk_prof_reset(tsd, arena,
+ extent_node_addr_get(node));
+ }
+ }
+
+ /* Reset curruns for large size classes. */
+ if (config_stats) {
+ for (i = 0; i < nlclasses; i++)
+ arena->stats.lstats[i].curruns = 0;
+ }
+
+ /* Huge allocations. */
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
+ for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
+ ql_last(&arena->huge, ql_link)) {
+ void *ptr = extent_node_addr_get(node);
+ size_t usize;
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
+ if (config_stats || (config_prof && opt_prof))
+ usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+ /* Remove huge allocation from prof sample set. */
+ if (config_prof && opt_prof)
+ prof_free(tsd, ptr, usize);
+ huge_dalloc(tsd_tsdn(tsd), ptr);
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
+ /* Cancel out unwanted effects on stats. */
+ if (config_stats)
+ arena_huge_reset_stats_cancel(arena, usize);
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
+
+ /* Bins. */
+ for (i = 0; i < NBINS; i++) {
+ arena_bin_t *bin = &arena->bins[i];
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ bin->runcur = NULL;
+ arena_run_heap_new(&bin->runs);
+ if (config_stats) {
+ bin->stats.curregs = 0;
+ bin->stats.curruns = 0;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ }
+
+ /*
+ * Re-initialize runs_dirty such that the chunks_cache and runs_dirty
+ * chains directly correspond.
+ */
+ qr_new(&arena->runs_dirty, rd_link);
+ for (node = qr_next(&arena->chunks_cache, cc_link);
+ node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
+ qr_new(&node->rd, rd_link);
+ qr_meld(&arena->runs_dirty, &node->rd, rd_link);
+ }
+
+ /* Arena chunks. */
+ for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
+ ql_last(&arena->achunks, ql_link)) {
+ ql_remove(&arena->achunks, node, ql_link);
+ arena_chunk_discard(tsd_tsdn(tsd), arena,
+ extent_node_addr_get(node));
+ }
+
+ /* Spare. */
+ if (arena->spare != NULL) {
+ arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare);
+ arena->spare = NULL;
+ }
+
+ assert(!arena->purging);
+ arena->nactive = 0;
+
+ for(i = 0; i < runs_avail_nclasses; i++)
+ arena_run_heap_new(&arena->runs_avail[i]);
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
}
static void
@@ -1900,8 +2087,8 @@ arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
}
static void
-arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
- bool decommitted)
+arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
+ bool cleaned, bool decommitted)
{
arena_chunk_t *chunk;
arena_chunk_map_misc_t *miscelm;
@@ -1961,7 +2148,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
if (size == arena_maxrun) {
assert(run_ind == map_bias);
assert(run_pages == (arena_maxrun >> LG_PAGE));
- arena_chunk_dalloc(arena, chunk);
+ arena_chunk_dalloc(tsdn, arena, chunk);
}
/*
@@ -1972,12 +2159,12 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
* chances of spuriously crossing the dirty page purging threshold.
*/
if (dirty)
- arena_maybe_purge(arena);
+ arena_maybe_purge(tsdn, arena);
}
static void
-arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- size_t oldsize, size_t newsize)
+arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run, size_t oldsize, size_t newsize)
{
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
size_t pageind = arena_miscelm_to_pageind(miscelm);
@@ -2012,12 +2199,13 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
pageind+head_npages)));
- arena_run_dalloc(arena, run, false, false, (flag_decommitted != 0));
+ arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted !=
+ 0));
}
static void
-arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- size_t oldsize, size_t newsize, bool dirty)
+arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
{
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
size_t pageind = arena_miscelm_to_pageind(miscelm);
@@ -2054,20 +2242,10 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
pageind+head_npages)));
- tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages);
+ tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
tail_run = &tail_miscelm->run;
- arena_run_dalloc(arena, tail_run, dirty, false, (flag_decommitted !=
- 0));
-}
-
-static arena_run_t *
-arena_bin_runs_first(arena_bin_t *bin)
-{
- arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs);
- if (miscelm != NULL)
- return (&miscelm->run);
-
- return (NULL);
+ arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted
+ != 0));
}
static void
@@ -2075,35 +2253,25 @@ arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
{
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
- assert(arena_run_tree_search(&bin->runs, miscelm) == NULL);
-
- arena_run_tree_insert(&bin->runs, miscelm);
-}
-
-static void
-arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
-{
- arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
-
- assert(arena_run_tree_search(&bin->runs, miscelm) != NULL);
-
- arena_run_tree_remove(&bin->runs, miscelm);
+ arena_run_heap_insert(&bin->runs, miscelm);
}
static arena_run_t *
arena_bin_nonfull_run_tryget(arena_bin_t *bin)
{
- arena_run_t *run = arena_bin_runs_first(bin);
- if (run != NULL) {
- arena_bin_runs_remove(bin, run);
- if (config_stats)
- bin->stats.reruns++;
- }
- return (run);
+ arena_chunk_map_misc_t *miscelm;
+
+ miscelm = arena_run_heap_remove_first(&bin->runs);
+ if (miscelm == NULL)
+ return (NULL);
+ if (config_stats)
+ bin->stats.reruns++;
+
+ return (&miscelm->run);
}
static arena_run_t *
-arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
+arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
{
arena_run_t *run;
szind_t binind;
@@ -2119,19 +2287,19 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
bin_info = &arena_bin_info[binind];
/* Allocate a new run. */
- malloc_mutex_unlock(&bin->lock);
+ malloc_mutex_unlock(tsdn, &bin->lock);
/******************************/
- malloc_mutex_lock(&arena->lock);
- run = arena_run_alloc_small(arena, bin_info->run_size, binind);
+ malloc_mutex_lock(tsdn, &arena->lock);
+ run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind);
if (run != NULL) {
/* Initialize run internals. */
run->binind = binind;
run->nfree = bin_info->nregs;
bitmap_init(run->bitmap, &bin_info->bitmap_info);
}
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
/********************************/
- malloc_mutex_lock(&bin->lock);
+ malloc_mutex_lock(tsdn, &bin->lock);
if (run != NULL) {
if (config_stats) {
bin->stats.nruns++;
@@ -2154,7 +2322,7 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
static void *
-arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
+arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
{
szind_t binind;
arena_bin_info_t *bin_info;
@@ -2163,7 +2331,7 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
binind = arena_bin_index(arena, bin);
bin_info = &arena_bin_info[binind];
bin->runcur = NULL;
- run = arena_bin_nonfull_run_get(arena, bin);
+ run = arena_bin_nonfull_run_get(tsdn, arena, bin);
if (bin->runcur != NULL && bin->runcur->nfree > 0) {
/*
* Another thread updated runcur while this one ran without the
@@ -2184,9 +2352,10 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
* were just deallocated from the run.
*/
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- if (run->nfree == bin_info->nregs)
- arena_dalloc_bin_run(arena, chunk, run, bin);
- else
+ if (run->nfree == bin_info->nregs) {
+ arena_dalloc_bin_run(tsdn, arena, chunk, run,
+ bin);
+ } else
arena_bin_lower_run(arena, chunk, run, bin);
}
return (ret);
@@ -2203,7 +2372,7 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
}
void
-arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
+arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
szind_t binind, uint64_t prof_accumbytes)
{
unsigned i, nfill;
@@ -2211,10 +2380,10 @@ arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
assert(tbin->ncached == 0);
- if (config_prof && arena_prof_accum(arena, prof_accumbytes))
- prof_idump();
+ if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes))
+ prof_idump(tsdn);
bin = &arena->bins[binind];
- malloc_mutex_lock(&bin->lock);
+ malloc_mutex_lock(tsdn, &bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
tbin->lg_fill_div); i < nfill; i++) {
arena_run_t *run;
@@ -2222,7 +2391,7 @@ arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
if ((run = bin->runcur) != NULL && run->nfree > 0)
ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else
- ptr = arena_bin_malloc_hard(arena, bin);
+ ptr = arena_bin_malloc_hard(tsdn, arena, bin);
if (ptr == NULL) {
/*
* OOM. tbin->avail isn't yet filled down to its first
@@ -2249,30 +2418,31 @@ arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
bin->stats.nfills++;
tbin->tstats.nrequests = 0;
}
- malloc_mutex_unlock(&bin->lock);
+ malloc_mutex_unlock(tsdn, &bin->lock);
tbin->ncached = i;
- arena_decay_tick(tsd, arena);
+ arena_decay_tick(tsdn, arena);
}
void
arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
{
+ size_t redzone_size = bin_info->redzone_size;
+
if (zero) {
- size_t redzone_size = bin_info->redzone_size;
- memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
- redzone_size);
- memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
- redzone_size);
+ memset((void *)((uintptr_t)ptr - redzone_size),
+ JEMALLOC_ALLOC_JUNK, redzone_size);
+ memset((void *)((uintptr_t)ptr + bin_info->reg_size),
+ JEMALLOC_ALLOC_JUNK, redzone_size);
} else {
- memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
- bin_info->reg_interval);
+ memset((void *)((uintptr_t)ptr - redzone_size),
+ JEMALLOC_ALLOC_JUNK, bin_info->reg_interval);
}
}
#ifdef JEMALLOC_JET
#undef arena_redzone_corruption
-#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
+#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption)
#endif
static void
arena_redzone_corruption(void *ptr, size_t usize, bool after,
@@ -2287,7 +2457,7 @@ arena_redzone_corruption(void *ptr, size_t usize, bool after,
#undef arena_redzone_corruption
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
arena_redzone_corruption_t *arena_redzone_corruption =
- JEMALLOC_N(arena_redzone_corruption_impl);
+ JEMALLOC_N(n_arena_redzone_corruption);
#endif
static void
@@ -2302,22 +2472,22 @@ arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
for (i = 1; i <= redzone_size; i++) {
uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
- if (*byte != 0xa5) {
+ if (*byte != JEMALLOC_ALLOC_JUNK) {
error = true;
arena_redzone_corruption(ptr, size, false, i,
*byte);
if (reset)
- *byte = 0xa5;
+ *byte = JEMALLOC_ALLOC_JUNK;
}
}
for (i = 0; i < redzone_size; i++) {
uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
- if (*byte != 0xa5) {
+ if (*byte != JEMALLOC_ALLOC_JUNK) {
error = true;
arena_redzone_corruption(ptr, size, true, i,
*byte);
if (reset)
- *byte = 0xa5;
+ *byte = JEMALLOC_ALLOC_JUNK;
}
}
}
@@ -2328,7 +2498,7 @@ arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small
-#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
+#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
#endif
void
arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
@@ -2336,14 +2506,14 @@ arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
size_t redzone_size = bin_info->redzone_size;
arena_redzones_validate(ptr, bin_info, false);
- memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
+ memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK,
bin_info->reg_interval);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
arena_dalloc_junk_small_t *arena_dalloc_junk_small =
- JEMALLOC_N(arena_dalloc_junk_small_impl);
+ JEMALLOC_N(n_arena_dalloc_junk_small);
#endif
void
@@ -2362,7 +2532,7 @@ arena_quarantine_junk_small(void *ptr, size_t usize)
}
static void *
-arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
+arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
{
void *ret;
arena_bin_t *bin;
@@ -2373,14 +2543,14 @@ arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
bin = &arena->bins[binind];
usize = index2size(binind);
- malloc_mutex_lock(&bin->lock);
+ malloc_mutex_lock(tsdn, &bin->lock);
if ((run = bin->runcur) != NULL && run->nfree > 0)
ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else
- ret = arena_bin_malloc_hard(arena, bin);
+ ret = arena_bin_malloc_hard(tsdn, arena, bin);
if (ret == NULL) {
- malloc_mutex_unlock(&bin->lock);
+ malloc_mutex_unlock(tsdn, &bin->lock);
return (NULL);
}
@@ -2389,9 +2559,9 @@ arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
bin->stats.nrequests++;
bin->stats.curregs++;
}
- malloc_mutex_unlock(&bin->lock);
- if (config_prof && !isthreaded && arena_prof_accum(arena, usize))
- prof_idump();
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize))
+ prof_idump(tsdn);
if (!zero) {
if (config_fill) {
@@ -2411,12 +2581,12 @@ arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
memset(ret, 0, usize);
}
- arena_decay_tick(tsd, arena);
+ arena_decay_tick(tsdn, arena);
return (ret);
}
void *
-arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
+arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
{
void *ret;
size_t usize;
@@ -2427,7 +2597,7 @@ arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
/* Large allocation. */
usize = index2size(binind);
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
if (config_cache_oblivious) {
uint64_t r;
@@ -2440,9 +2610,9 @@ arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
random_offset = ((uintptr_t)r) << LG_CACHELINE;
} else
random_offset = 0;
- run = arena_run_alloc_large(arena, usize + large_pad, zero);
+ run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero);
if (run == NULL) {
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (NULL);
}
miscelm = arena_run_to_miscelm(run);
@@ -2460,42 +2630,45 @@ arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
}
if (config_prof)
idump = arena_prof_accum_locked(arena, usize);
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
if (config_prof && idump)
- prof_idump();
+ prof_idump(tsdn);
if (!zero) {
if (config_fill) {
if (unlikely(opt_junk_alloc))
- memset(ret, 0xa5, usize);
+ memset(ret, JEMALLOC_ALLOC_JUNK, usize);
else if (unlikely(opt_zero))
memset(ret, 0, usize);
}
}
- arena_decay_tick(tsd, arena);
+ arena_decay_tick(tsdn, arena);
return (ret);
}
void *
-arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
- bool zero, tcache_t *tcache)
+arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
+ bool zero)
{
- arena = arena_choose(tsd, arena);
+ assert(!tsdn_null(tsdn) || arena != NULL);
+
+ if (likely(!tsdn_null(tsdn)))
+ arena = arena_choose(tsdn_tsd(tsdn), arena);
if (unlikely(arena == NULL))
return (NULL);
if (likely(size <= SMALL_MAXCLASS))
- return (arena_malloc_small(tsd, arena, ind, zero));
+ return (arena_malloc_small(tsdn, arena, ind, zero));
if (likely(size <= large_maxclass))
- return (arena_malloc_large(tsd, arena, ind, zero));
- return (huge_malloc(tsd, arena, index2size(ind), zero, tcache));
+ return (arena_malloc_large(tsdn, arena, ind, zero));
+ return (huge_malloc(tsdn, arena, index2size(ind), zero));
}
/* Only handles large allocations that require more than page alignment. */
static void *
-arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
+arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero)
{
void *ret;
@@ -2505,19 +2678,21 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
arena_chunk_map_misc_t *miscelm;
void *rpages;
+ assert(!tsdn_null(tsdn) || arena != NULL);
assert(usize == PAGE_CEILING(usize));
- arena = arena_choose(tsd, arena);
+ if (likely(!tsdn_null(tsdn)))
+ arena = arena_choose(tsdn_tsd(tsdn), arena);
if (unlikely(arena == NULL))
return (NULL);
alignment = PAGE_CEILING(alignment);
- alloc_size = usize + large_pad + alignment - PAGE;
+ alloc_size = usize + large_pad + alignment;
- malloc_mutex_lock(&arena->lock);
- run = arena_run_alloc_large(arena, alloc_size, false);
+ malloc_mutex_lock(tsdn, &arena->lock);
+ run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
if (run == NULL) {
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (NULL);
}
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
@@ -2532,16 +2707,16 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
arena_chunk_map_misc_t *head_miscelm = miscelm;
arena_run_t *head_run = run;
- miscelm = arena_miscelm_get(chunk,
+ miscelm = arena_miscelm_get_mutable(chunk,
arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
LG_PAGE));
run = &miscelm->run;
- arena_run_trim_head(arena, chunk, head_run, alloc_size,
+ arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size,
alloc_size - leadsize);
}
if (trailsize != 0) {
- arena_run_trim_tail(arena, chunk, run, usize + large_pad +
+ arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad +
trailsize, usize + large_pad, false);
}
if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
@@ -2552,8 +2727,8 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
run_ind) != 0);
assert(decommitted); /* Cause of OOM. */
- arena_run_dalloc(arena, run, dirty, false, decommitted);
- malloc_mutex_unlock(&arena->lock);
+ arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (NULL);
}
ret = arena_miscelm_to_rpages(miscelm);
@@ -2568,20 +2743,20 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++;
}
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
if (config_fill && !zero) {
if (unlikely(opt_junk_alloc))
- memset(ret, 0xa5, usize);
+ memset(ret, JEMALLOC_ALLOC_JUNK, usize);
else if (unlikely(opt_zero))
memset(ret, 0, usize);
}
- arena_decay_tick(tsd, arena);
+ arena_decay_tick(tsdn, arena);
return (ret);
}
void *
-arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
+arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero, tcache_t *tcache)
{
void *ret;
@@ -2589,7 +2764,7 @@ arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
&& (usize & PAGE_MASK) == 0))) {
/* Small; alignment doesn't require special run placement. */
- ret = arena_malloc(tsd, arena, usize, size2index(usize), zero,
+ ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
tcache, true);
} else if (usize <= large_maxclass && alignment <= PAGE) {
/*
@@ -2598,26 +2773,25 @@ arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
* the base of the run, so do some bit manipulation to retrieve
* the base.
*/
- ret = arena_malloc(tsd, arena, usize, size2index(usize), zero,
+ ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
tcache, true);
if (config_cache_oblivious)
ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
} else {
if (likely(usize <= large_maxclass)) {
- ret = arena_palloc_large(tsd, arena, usize, alignment,
+ ret = arena_palloc_large(tsdn, arena, usize, alignment,
zero);
} else if (likely(alignment <= chunksize))
- ret = huge_malloc(tsd, arena, usize, zero, tcache);
+ ret = huge_malloc(tsdn, arena, usize, zero);
else {
- ret = huge_palloc(tsd, arena, usize, alignment, zero,
- tcache);
+ ret = huge_palloc(tsdn, arena, usize, alignment, zero);
}
}
return (ret);
}
void
-arena_prof_promoted(const void *ptr, size_t size)
+arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
{
arena_chunk_t *chunk;
size_t pageind;
@@ -2626,8 +2800,8 @@ arena_prof_promoted(const void *ptr, size_t size)
cassert(config_prof);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
- assert(isalloc(ptr, false) == LARGE_MINCLASS);
- assert(isalloc(ptr, true) == LARGE_MINCLASS);
+ assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
+ assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS);
assert(size <= SMALL_MAXCLASS);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
@@ -2636,8 +2810,8 @@ arena_prof_promoted(const void *ptr, size_t size)
assert(binind < NBINS);
arena_mapbits_large_binind_set(chunk, pageind, binind);
- assert(isalloc(ptr, false) == LARGE_MINCLASS);
- assert(isalloc(ptr, true) == size);
+ assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
+ assert(isalloc(tsdn, ptr, true) == size);
}
static void
@@ -2653,33 +2827,34 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
&chunk->node), bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
+ /*
+ * The following block's conditional is necessary because if the
+ * run only contains one region, then it never gets inserted
+ * into the non-full runs tree.
+ */
if (bin_info->nregs != 1) {
- /*
- * This block's conditional is necessary because if the
- * run only contains one region, then it never gets
- * inserted into the non-full runs tree.
- */
- arena_bin_runs_remove(bin, run);
+ arena_chunk_map_misc_t *miscelm =
+ arena_run_to_miscelm(run);
+
+ arena_run_heap_remove(&bin->runs, miscelm);
}
}
}
static void
-arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- arena_bin_t *bin)
+arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run, arena_bin_t *bin)
{
assert(run != bin->runcur);
- assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) ==
- NULL);
- malloc_mutex_unlock(&bin->lock);
+ malloc_mutex_unlock(tsdn, &bin->lock);
/******************************/
- malloc_mutex_lock(&arena->lock);
- arena_run_dalloc(arena, run, true, false, false);
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
+ arena_run_dalloc(tsdn, arena, run, true, false, false);
+ malloc_mutex_unlock(tsdn, &arena->lock);
/****************************/
- malloc_mutex_lock(&bin->lock);
+ malloc_mutex_lock(tsdn, &bin->lock);
if (config_stats)
bin->stats.curruns--;
}
@@ -2706,8 +2881,8 @@ arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
}
static void
-arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- arena_chunk_map_bits_t *bitselm, bool junked)
+arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, arena_chunk_map_bits_t *bitselm, bool junked)
{
size_t pageind, rpages_ind;
arena_run_t *run;
@@ -2717,7 +2892,7 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
- run = &arena_miscelm_get(chunk, rpages_ind)->run;
+ run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
binind = run->binind;
bin = &arena->bins[binind];
bin_info = &arena_bin_info[binind];
@@ -2728,7 +2903,7 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_run_reg_dalloc(run, ptr);
if (run->nfree == bin_info->nregs) {
arena_dissociate_bin_run(chunk, run, bin);
- arena_dalloc_bin_run(arena, chunk, run, bin);
+ arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
} else if (run->nfree == 1 && run != bin->runcur)
arena_bin_lower_run(arena, chunk, run, bin);
@@ -2739,15 +2914,15 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
}
void
-arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- arena_chunk_map_bits_t *bitselm)
+arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm)
{
- arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true);
+ arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true);
}
void
-arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind, arena_chunk_map_bits_t *bitselm)
{
arena_run_t *run;
@@ -2755,16 +2930,16 @@ arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t rpages_ind;
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
- run = &arena_miscelm_get(chunk, rpages_ind)->run;
+ run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
bin = &arena->bins[run->binind];
- malloc_mutex_lock(&bin->lock);
- arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false);
- malloc_mutex_unlock(&bin->lock);
+ malloc_mutex_lock(tsdn, &bin->lock);
+ arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false);
+ malloc_mutex_unlock(tsdn, &bin->lock);
}
void
-arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t pageind)
+arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, size_t pageind)
{
arena_chunk_map_bits_t *bitselm;
@@ -2773,35 +2948,36 @@ arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr,
assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
pageind)) != BININD_INVALID);
}
- bitselm = arena_bitselm_get(chunk, pageind);
- arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm);
- arena_decay_tick(tsd, arena);
+ bitselm = arena_bitselm_get_mutable(chunk, pageind);
+ arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm);
+ arena_decay_tick(tsdn, arena);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large
-#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
+#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large)
#endif
void
arena_dalloc_junk_large(void *ptr, size_t usize)
{
if (config_fill && unlikely(opt_junk_free))
- memset(ptr, 0x5a, usize);
+ memset(ptr, JEMALLOC_FREE_JUNK, usize);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
arena_dalloc_junk_large_t *arena_dalloc_junk_large =
- JEMALLOC_N(arena_dalloc_junk_large_impl);
+ JEMALLOC_N(n_arena_dalloc_junk_large);
#endif
static void
-arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
- void *ptr, bool junked)
+arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk, void *ptr, bool junked)
{
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
+ pageind);
arena_run_t *run = &miscelm->run;
if (config_fill || config_stats) {
@@ -2820,33 +2996,35 @@ arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
}
}
- arena_run_dalloc(arena, run, true, false, false);
+ arena_run_dalloc(tsdn, arena, run, true, false, false);
}
void
-arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
- void *ptr)
+arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk, void *ptr)
{
- arena_dalloc_large_locked_impl(arena, chunk, ptr, true);
+ arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true);
}
void
-arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr)
+arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr)
{
- malloc_mutex_lock(&arena->lock);
- arena_dalloc_large_locked_impl(arena, chunk, ptr, false);
- malloc_mutex_unlock(&arena->lock);
- arena_decay_tick(tsd, arena);
+ malloc_mutex_lock(tsdn, &arena->lock);
+ arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ arena_decay_tick(tsdn, arena);
}
static void
-arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t oldsize, size_t size)
+arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, size_t oldsize, size_t size)
{
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
+ pageind);
arena_run_t *run = &miscelm->run;
assert(size < oldsize);
@@ -2855,8 +3033,8 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
* Shrink the run, and make trailing pages available for other
* allocations.
*/
- malloc_mutex_lock(&arena->lock);
- arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size +
+ malloc_mutex_lock(tsdn, &arena->lock);
+ arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size +
large_pad, true);
if (config_stats) {
szind_t oldindex = size2index(oldsize) - NBINS;
@@ -2874,12 +3052,12 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++;
}
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
}
static bool
-arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
+arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
{
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t npages = (oldsize + large_pad) >> LG_PAGE;
@@ -2889,7 +3067,7 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
large_pad);
/* Try to extend the run. */
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
pageind+npages) != 0)
goto label_fail;
@@ -2912,7 +3090,7 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
if (splitsize == 0)
goto label_fail;
- run = &arena_miscelm_get(chunk, pageind+npages)->run;
+ run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
if (arena_run_split_large(arena, run, splitsize, zero))
goto label_fail;
@@ -2969,24 +3147,24 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++;
}
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (false);
}
label_fail:
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (true);
}
#ifdef JEMALLOC_JET
#undef arena_ralloc_junk_large
-#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
+#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large)
#endif
static void
arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
{
if (config_fill && unlikely(opt_junk_free)) {
- memset((void *)((uintptr_t)ptr + usize), 0x5a,
+ memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK,
old_usize - usize);
}
}
@@ -2994,7 +3172,7 @@ arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
#undef arena_ralloc_junk_large
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
arena_ralloc_junk_large_t *arena_ralloc_junk_large =
- JEMALLOC_N(arena_ralloc_junk_large_impl);
+ JEMALLOC_N(n_arena_ralloc_junk_large);
#endif
/*
@@ -3002,7 +3180,7 @@ arena_ralloc_junk_large_t *arena_ralloc_junk_large =
* always fail if growing an object, and the following run is already in use.
*/
static bool
-arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
+arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
size_t usize_max, bool zero)
{
arena_chunk_t *chunk;
@@ -3017,15 +3195,16 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
arena = extent_node_arena_get(&chunk->node);
if (oldsize < usize_max) {
- bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize,
- usize_min, usize_max, zero);
+ bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
+ oldsize, usize_min, usize_max, zero);
if (config_fill && !ret && !zero) {
if (unlikely(opt_junk_alloc)) {
- memset((void *)((uintptr_t)ptr + oldsize), 0xa5,
- isalloc(ptr, config_prof) - oldsize);
+ memset((void *)((uintptr_t)ptr + oldsize),
+ JEMALLOC_ALLOC_JUNK,
+ isalloc(tsdn, ptr, config_prof) - oldsize);
} else if (unlikely(opt_zero)) {
memset((void *)((uintptr_t)ptr + oldsize), 0,
- isalloc(ptr, config_prof) - oldsize);
+ isalloc(tsdn, ptr, config_prof) - oldsize);
}
}
return (ret);
@@ -3034,12 +3213,12 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
assert(oldsize > usize_max);
/* Fill before shrinking in order avoid a race. */
arena_ralloc_junk_large(ptr, oldsize, usize_max);
- arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max);
+ arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max);
return (false);
}
bool
-arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
+arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero)
{
size_t usize_min, usize_max;
@@ -3069,32 +3248,32 @@ arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
} else {
if (usize_max <= SMALL_MAXCLASS)
return (true);
- if (arena_ralloc_large(ptr, oldsize, usize_min,
+ if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min,
usize_max, zero))
return (true);
}
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- arena_decay_tick(tsd, extent_node_arena_get(&chunk->node));
+ arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node));
return (false);
} else {
- return (huge_ralloc_no_move(tsd, ptr, oldsize, usize_min,
+ return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min,
usize_max, zero));
}
}
static void *
-arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
+arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache)
{
if (alignment == 0)
- return (arena_malloc(tsd, arena, usize, size2index(usize), zero,
- tcache, true));
+ return (arena_malloc(tsdn, arena, usize, size2index(usize),
+ zero, tcache, true));
usize = sa2u(usize, alignment);
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
return (NULL);
- return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
+ return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
}
void *
@@ -3112,7 +3291,8 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t copysize;
/* Try to avoid moving the allocation. */
- if (!arena_ralloc_no_move(tsd, ptr, oldsize, usize, 0, zero))
+ if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0,
+ zero))
return (ptr);
/*
@@ -3120,8 +3300,8 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
* the object. In that case, fall back to allocating new space
* and copying.
*/
- ret = arena_ralloc_move_helper(tsd, arena, usize, alignment,
- zero, tcache);
+ ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize,
+ alignment, zero, tcache);
if (ret == NULL)
return (NULL);
@@ -3133,7 +3313,7 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
copysize = (usize < oldsize) ? usize : oldsize;
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
memcpy(ret, ptr, copysize);
- isqalloc(tsd, ptr, oldsize, tcache);
+ isqalloc(tsd, ptr, oldsize, tcache, true);
} else {
ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
zero, tcache);
@@ -3142,25 +3322,25 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
}
dss_prec_t
-arena_dss_prec_get(arena_t *arena)
+arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
{
dss_prec_t ret;
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
ret = arena->dss_prec;
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (ret);
}
bool
-arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
+arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
{
if (!have_dss)
return (dss_prec != dss_prec_disabled);
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
arena->dss_prec = dss_prec;
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (false);
}
@@ -3208,7 +3388,7 @@ arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
size_t *nactive, size_t *ndirty)
{
- *nthreads += arena_nthreads_get(arena);
+ *nthreads += arena_nthreads_get(arena, false);
*dss = dss_prec_names[arena->dss_prec];
*lg_dirty_mult = arena->lg_dirty_mult;
*decay_time = arena->decay_time;
@@ -3217,32 +3397,34 @@ arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
}
void
-arena_basic_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
- ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
- size_t *ndirty)
+arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
+ const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
+ size_t *nactive, size_t *ndirty)
{
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
decay_time, nactive, ndirty);
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
}
void
-arena_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
- ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
- size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
- malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats)
+arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
+ const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
+ size_t *nactive, size_t *ndirty, arena_stats_t *astats,
+ malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
+ malloc_huge_stats_t *hstats)
{
unsigned i;
cassert(config_stats);
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
decay_time, nactive, ndirty);
astats->mapped += arena->stats.mapped;
+ astats->retained += arena->stats.retained;
astats->npurge += arena->stats.npurge;
astats->nmadvise += arena->stats.nmadvise;
astats->purged += arena->stats.purged;
@@ -3268,12 +3450,12 @@ arena_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
}
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
- malloc_mutex_lock(&bin->lock);
+ malloc_mutex_lock(tsdn, &bin->lock);
bstats[i].nmalloc += bin->stats.nmalloc;
bstats[i].ndalloc += bin->stats.ndalloc;
bstats[i].nrequests += bin->stats.nrequests;
@@ -3285,58 +3467,58 @@ arena_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
bstats[i].nruns += bin->stats.nruns;
bstats[i].reruns += bin->stats.reruns;
bstats[i].curruns += bin->stats.curruns;
- malloc_mutex_unlock(&bin->lock);
+ malloc_mutex_unlock(tsdn, &bin->lock);
}
}
unsigned
-arena_nthreads_get(arena_t *arena)
+arena_nthreads_get(arena_t *arena, bool internal)
{
- return (atomic_read_u(&arena->nthreads));
+ return (atomic_read_u(&arena->nthreads[internal]));
}
void
-arena_nthreads_inc(arena_t *arena)
+arena_nthreads_inc(arena_t *arena, bool internal)
{
- atomic_add_u(&arena->nthreads, 1);
+ atomic_add_u(&arena->nthreads[internal], 1);
}
void
-arena_nthreads_dec(arena_t *arena)
+arena_nthreads_dec(arena_t *arena, bool internal)
{
- atomic_sub_u(&arena->nthreads, 1);
+ atomic_sub_u(&arena->nthreads[internal], 1);
}
arena_t *
-arena_new(unsigned ind)
+arena_new(tsdn_t *tsdn, unsigned ind)
{
arena_t *arena;
size_t arena_size;
unsigned i;
- arena_bin_t *bin;
/* Compute arena size to incorporate sufficient runs_avail elements. */
- arena_size = offsetof(arena_t, runs_avail) + (sizeof(arena_run_tree_t) *
+ arena_size = offsetof(arena_t, runs_avail) + (sizeof(arena_run_heap_t) *
runs_avail_nclasses);
/*
* Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
* because there is no way to clean up if base_alloc() OOMs.
*/
if (config_stats) {
- arena = (arena_t *)base_alloc(CACHELINE_CEILING(arena_size) +
- QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) +
- nhclasses) * sizeof(malloc_huge_stats_t));
+ arena = (arena_t *)base_alloc(tsdn,
+ CACHELINE_CEILING(arena_size) + QUANTUM_CEILING(nlclasses *
+ sizeof(malloc_large_stats_t) + nhclasses) *
+ sizeof(malloc_huge_stats_t));
} else
- arena = (arena_t *)base_alloc(arena_size);
+ arena = (arena_t *)base_alloc(tsdn, arena_size);
if (arena == NULL)
return (NULL);
arena->ind = ind;
- arena->nthreads = 0;
- if (malloc_mutex_init(&arena->lock))
+ arena->nthreads[0] = arena->nthreads[1] = 0;
+ if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
return (NULL);
if (config_stats) {
@@ -3369,7 +3551,9 @@ arena_new(unsigned ind)
(uint64_t)(uintptr_t)arena;
}
- arena->dss_prec = chunk_dss_prec_get();
+ arena->dss_prec = chunk_dss_prec_get(tsdn);
+
+ ql_new(&arena->achunks);
arena->spare = NULL;
@@ -3379,7 +3563,7 @@ arena_new(unsigned ind)
arena->ndirty = 0;
for(i = 0; i < runs_avail_nclasses; i++)
- arena_run_tree_new(&arena->runs_avail[i]);
+ arena_run_heap_new(&arena->runs_avail[i]);
qr_new(&arena->runs_dirty, rd_link);
qr_new(&arena->chunks_cache, cc_link);
@@ -3387,28 +3571,32 @@ arena_new(unsigned ind)
arena_decay_init(arena, arena_decay_time_default_get());
ql_new(&arena->huge);
- if (malloc_mutex_init(&arena->huge_mtx))
+ if (malloc_mutex_init(&arena->huge_mtx, "arena_huge",
+ WITNESS_RANK_ARENA_HUGE))
return (NULL);
extent_tree_szad_new(&arena->chunks_szad_cached);
extent_tree_ad_new(&arena->chunks_ad_cached);
extent_tree_szad_new(&arena->chunks_szad_retained);
extent_tree_ad_new(&arena->chunks_ad_retained);
- if (malloc_mutex_init(&arena->chunks_mtx))
+ if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
+ WITNESS_RANK_ARENA_CHUNKS))
return (NULL);
ql_new(&arena->node_cache);
- if (malloc_mutex_init(&arena->node_cache_mtx))
+ if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
+ WITNESS_RANK_ARENA_NODE_CACHE))
return (NULL);
arena->chunk_hooks = chunk_hooks_default;
/* Initialize bins. */
for (i = 0; i < NBINS; i++) {
- bin = &arena->bins[i];
- if (malloc_mutex_init(&bin->lock))
+ arena_bin_t *bin = &arena->bins[i];
+ if (malloc_mutex_init(&bin->lock, "arena_bin",
+ WITNESS_RANK_ARENA_BIN))
return (NULL);
bin->runcur = NULL;
- arena_run_tree_new(&bin->runs);
+ arena_run_heap_new(&bin->runs);
if (config_stats)
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
}
@@ -3537,7 +3725,7 @@ small_run_size_init(void)
assert(small_maxrun != 0);
- small_run_tab = (bool *)base_alloc(sizeof(bool) * (small_maxrun >>
+ small_run_tab = (bool *)base_alloc(NULL, sizeof(bool) * (small_maxrun >>
LG_PAGE));
if (small_run_tab == NULL)
return (true);
@@ -3564,12 +3752,12 @@ run_quantize_init(void)
run_quantize_max = chunksize + large_pad;
- run_quantize_floor_tab = (size_t *)base_alloc(sizeof(size_t) *
+ run_quantize_floor_tab = (size_t *)base_alloc(NULL, sizeof(size_t) *
(run_quantize_max >> LG_PAGE));
if (run_quantize_floor_tab == NULL)
return (true);
- run_quantize_ceil_tab = (size_t *)base_alloc(sizeof(size_t) *
+ run_quantize_ceil_tab = (size_t *)base_alloc(NULL, sizeof(size_t) *
(run_quantize_max >> LG_PAGE));
if (run_quantize_ceil_tab == NULL)
return (true);
@@ -3646,58 +3834,58 @@ arena_boot(void)
}
void
-arena_prefork0(arena_t *arena)
+arena_prefork0(tsdn_t *tsdn, arena_t *arena)
{
- malloc_mutex_prefork(&arena->lock);
+ malloc_mutex_prefork(tsdn, &arena->lock);
}
void
-arena_prefork1(arena_t *arena)
+arena_prefork1(tsdn_t *tsdn, arena_t *arena)
{
- malloc_mutex_prefork(&arena->chunks_mtx);
+ malloc_mutex_prefork(tsdn, &arena->chunks_mtx);
}
void
-arena_prefork2(arena_t *arena)
+arena_prefork2(tsdn_t *tsdn, arena_t *arena)
{
- malloc_mutex_prefork(&arena->node_cache_mtx);
+ malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
}
void
-arena_prefork3(arena_t *arena)
+arena_prefork3(tsdn_t *tsdn, arena_t *arena)
{
unsigned i;
for (i = 0; i < NBINS; i++)
- malloc_mutex_prefork(&arena->bins[i].lock);
- malloc_mutex_prefork(&arena->huge_mtx);
+ malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
+ malloc_mutex_prefork(tsdn, &arena->huge_mtx);
}
void
-arena_postfork_parent(arena_t *arena)
+arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
{
unsigned i;
- malloc_mutex_postfork_parent(&arena->huge_mtx);
+ malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
for (i = 0; i < NBINS; i++)
- malloc_mutex_postfork_parent(&arena->bins[i].lock);
- malloc_mutex_postfork_parent(&arena->node_cache_mtx);
- malloc_mutex_postfork_parent(&arena->chunks_mtx);
- malloc_mutex_postfork_parent(&arena->lock);
+ malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
+ malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx);
+ malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
+ malloc_mutex_postfork_parent(tsdn, &arena->lock);
}
void
-arena_postfork_child(arena_t *arena)
+arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
{
unsigned i;
- malloc_mutex_postfork_child(&arena->huge_mtx);
+ malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
for (i = 0; i < NBINS; i++)
- malloc_mutex_postfork_child(&arena->bins[i].lock);
- malloc_mutex_postfork_child(&arena->node_cache_mtx);
- malloc_mutex_postfork_child(&arena->chunks_mtx);
- malloc_mutex_postfork_child(&arena->lock);
+ malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
+ malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx);
+ malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
+ malloc_mutex_postfork_child(tsdn, &arena->lock);
}