summaryrefslogtreecommitdiffstats
path: root/src/chunk.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/chunk.c')
-rw-r--r--src/chunk.c186
1 files changed, 97 insertions, 89 deletions
diff --git a/src/chunk.c b/src/chunk.c
index 304d4e5..0ee2a1a 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -49,9 +49,10 @@ const chunk_hooks_t chunk_hooks_default = {
* definition.
*/
-static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
- extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
- void *chunk, size_t size, bool zeroed, bool committed);
+static void chunk_record(tsd_t *tsd, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad,
+ extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, bool zeroed,
+ bool committed);
/******************************************************************************/
@@ -63,23 +64,23 @@ chunk_hooks_get_locked(arena_t *arena)
}
chunk_hooks_t
-chunk_hooks_get(arena_t *arena)
+chunk_hooks_get(tsd_t *tsd, arena_t *arena)
{
chunk_hooks_t chunk_hooks;
- malloc_mutex_lock(&arena->chunks_mtx);
+ malloc_mutex_lock(tsd, &arena->chunks_mtx);
chunk_hooks = chunk_hooks_get_locked(arena);
- malloc_mutex_unlock(&arena->chunks_mtx);
+ malloc_mutex_unlock(tsd, &arena->chunks_mtx);
return (chunk_hooks);
}
chunk_hooks_t
-chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
+chunk_hooks_set(tsd_t *tsd, arena_t *arena, const chunk_hooks_t *chunk_hooks)
{
chunk_hooks_t old_chunk_hooks;
- malloc_mutex_lock(&arena->chunks_mtx);
+ malloc_mutex_lock(tsd, &arena->chunks_mtx);
old_chunk_hooks = arena->chunk_hooks;
/*
* Copy each field atomically so that it is impossible for readers to
@@ -104,14 +105,14 @@ chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
ATOMIC_COPY_HOOK(split);
ATOMIC_COPY_HOOK(merge);
#undef ATOMIC_COPY_HOOK
- malloc_mutex_unlock(&arena->chunks_mtx);
+ malloc_mutex_unlock(tsd, &arena->chunks_mtx);
return (old_chunk_hooks);
}
static void
-chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
- bool locked)
+chunk_hooks_assure_initialized_impl(tsd_t *tsd, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, bool locked)
{
static const chunk_hooks_t uninitialized_hooks =
CHUNK_HOOKS_INITIALIZER;
@@ -119,27 +120,28 @@ chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
0) {
*chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
- chunk_hooks_get(arena);
+ chunk_hooks_get(tsd, arena);
}
}
static void
-chunk_hooks_assure_initialized_locked(arena_t *arena,
+chunk_hooks_assure_initialized_locked(tsd_t *tsd, arena_t *arena,
chunk_hooks_t *chunk_hooks)
{
- chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true);
+ chunk_hooks_assure_initialized_impl(tsd, arena, chunk_hooks, true);
}
static void
-chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks)
+chunk_hooks_assure_initialized(tsd_t *tsd, arena_t *arena,
+ chunk_hooks_t *chunk_hooks)
{
- chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false);
+ chunk_hooks_assure_initialized_impl(tsd, arena, chunk_hooks, false);
}
bool
-chunk_register(const void *chunk, const extent_node_t *node)
+chunk_register(tsd_t *tsd, const void *chunk, const extent_node_t *node)
{
assert(extent_node_addr_get(node) == chunk);
@@ -159,7 +161,7 @@ chunk_register(const void *chunk, const extent_node_t *node)
high = atomic_read_z(&highchunks);
}
if (cur > high && prof_gdump_get_unlocked())
- prof_gdump();
+ prof_gdump(tsd);
}
return (false);
@@ -197,7 +199,7 @@ chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
}
static void *
-chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
+chunk_recycle(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
bool dalloc_node)
@@ -219,8 +221,8 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
- malloc_mutex_lock(&arena->chunks_mtx);
- chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
+ malloc_mutex_lock(tsd, &arena->chunks_mtx);
+ chunk_hooks_assure_initialized_locked(tsd, arena, chunk_hooks);
if (new_addr != NULL) {
extent_node_t key;
extent_node_init(&key, arena, new_addr, alloc_size, false,
@@ -232,7 +234,7 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
}
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
size)) {
- malloc_mutex_unlock(&arena->chunks_mtx);
+ malloc_mutex_unlock(tsd, &arena->chunks_mtx);
return (NULL);
}
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
@@ -251,7 +253,7 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
if (leadsize != 0 &&
chunk_hooks->split(extent_node_addr_get(node),
extent_node_size_get(node), leadsize, size, false, arena->ind)) {
- malloc_mutex_unlock(&arena->chunks_mtx);
+ malloc_mutex_unlock(tsd, &arena->chunks_mtx);
return (NULL);
}
/* Remove node from the tree. */
@@ -271,20 +273,21 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
if (chunk_hooks->split(ret, size + trailsize, size,
trailsize, false, arena->ind)) {
if (dalloc_node && node != NULL)
- arena_node_dalloc(arena, node);
- malloc_mutex_unlock(&arena->chunks_mtx);
- chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad,
- cache, ret, size + trailsize, zeroed, committed);
+ arena_node_dalloc(tsd, arena, node);
+ malloc_mutex_unlock(tsd, &arena->chunks_mtx);
+ chunk_record(tsd, arena, chunk_hooks, chunks_szad,
+ chunks_ad, cache, ret, size + trailsize, zeroed,
+ committed);
return (NULL);
}
/* Insert the trailing space as a smaller chunk. */
if (node == NULL) {
- node = arena_node_alloc(arena);
+ node = arena_node_alloc(tsd, arena);
if (node == NULL) {
- malloc_mutex_unlock(&arena->chunks_mtx);
- chunk_record(arena, chunk_hooks, chunks_szad,
- chunks_ad, cache, ret, size + trailsize,
- zeroed, committed);
+ malloc_mutex_unlock(tsd, &arena->chunks_mtx);
+ chunk_record(tsd, arena, chunk_hooks,
+ chunks_szad, chunks_ad, cache, ret, size +
+ trailsize, zeroed, committed);
return (NULL);
}
}
@@ -296,16 +299,16 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
node = NULL;
}
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
- malloc_mutex_unlock(&arena->chunks_mtx);
- chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache,
- ret, size, zeroed, committed);
+ malloc_mutex_unlock(tsd, &arena->chunks_mtx);
+ chunk_record(tsd, arena, chunk_hooks, chunks_szad, chunks_ad,
+ cache, ret, size, zeroed, committed);
return (NULL);
}
- malloc_mutex_unlock(&arena->chunks_mtx);
+ malloc_mutex_unlock(tsd, &arena->chunks_mtx);
assert(dalloc_node || node != NULL);
if (dalloc_node && node != NULL)
- arena_node_dalloc(arena, node);
+ arena_node_dalloc(tsd, arena, node);
if (*zero) {
if (!zeroed)
memset(ret, 0, size);
@@ -328,8 +331,8 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
* them if they are returned.
*/
static void *
-chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
- bool *zero, bool *commit, dss_prec_t dss_prec)
+chunk_alloc_core(tsd_t *tsd, arena_t *arena, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec)
{
void *ret;
@@ -340,8 +343,8 @@ chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
/* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary && (ret =
- chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
- NULL)
+ chunk_alloc_dss(tsd, arena, new_addr, size, alignment, zero,
+ commit)) != NULL)
return (ret);
/* mmap. */
if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
@@ -349,8 +352,8 @@ chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
return (ret);
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary && (ret =
- chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
- NULL)
+ chunk_alloc_dss(tsd, arena, new_addr, size, alignment, zero,
+ commit)) != NULL)
return (ret);
/* All strategies for allocation failed. */
@@ -380,8 +383,8 @@ chunk_alloc_base(size_t size)
}
void *
-chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
- size_t size, size_t alignment, bool *zero, bool dalloc_node)
+chunk_alloc_cache(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *new_addr, size_t size, size_t alignment, bool *zero, bool dalloc_node)
{
void *ret;
bool commit;
@@ -392,7 +395,7 @@ chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
assert((alignment & chunksize_mask) == 0);
commit = true;
- ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached,
+ ret = chunk_recycle(tsd, arena, chunk_hooks, &arena->chunks_szad_cached,
&arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
&commit, dalloc_node);
if (ret == NULL)
@@ -404,11 +407,11 @@ chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
}
static arena_t *
-chunk_arena_get(unsigned arena_ind)
+chunk_arena_get(tsd_t *tsd, unsigned arena_ind)
{
arena_t *arena;
- arena = arena_get(arena_ind, false);
+ arena = arena_get(tsd, arena_ind, false);
/*
* The arena we're allocating on behalf of must have been initialized
* already.
@@ -422,11 +425,13 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
bool *commit, unsigned arena_ind)
{
void *ret;
+ tsd_t *tsd;
arena_t *arena;
- arena = chunk_arena_get(arena_ind);
- ret = chunk_alloc_core(arena, new_addr, size, alignment, zero, commit,
- arena->dss_prec);
+ tsd = tsd_fetch();
+ arena = chunk_arena_get(tsd, arena_ind);
+ ret = chunk_alloc_core(tsd, arena, new_addr, size, alignment, zero,
+ commit, arena->dss_prec);
if (ret == NULL)
return (NULL);
if (config_valgrind)
@@ -436,8 +441,8 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
}
static void *
-chunk_alloc_retained(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
- size_t size, size_t alignment, bool *zero, bool *commit)
+chunk_alloc_retained(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
{
assert(size != 0);
@@ -445,20 +450,20 @@ chunk_alloc_retained(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
- return (chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_retained,
- &arena->chunks_ad_retained, false, new_addr, size, alignment, zero,
- commit, true));
+ return (chunk_recycle(tsd, arena, chunk_hooks,
+ &arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
+ new_addr, size, alignment, zero, commit, true));
}
void *
-chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
- size_t size, size_t alignment, bool *zero, bool *commit)
+chunk_alloc_wrapper(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
{
void *ret;
- chunk_hooks_assure_initialized(arena, chunk_hooks);
+ chunk_hooks_assure_initialized(tsd, arena, chunk_hooks);
- ret = chunk_alloc_retained(arena, chunk_hooks, new_addr, size,
+ ret = chunk_alloc_retained(tsd, arena, chunk_hooks, new_addr, size,
alignment, zero, commit);
if (ret == NULL) {
ret = chunk_hooks->alloc(new_addr, size, alignment, zero,
@@ -473,7 +478,7 @@ chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
}
static void
-chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
+chunk_record(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
void *chunk, size_t size, bool zeroed, bool committed)
{
@@ -485,8 +490,8 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
unzeroed = cache || !zeroed;
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
- malloc_mutex_lock(&arena->chunks_mtx);
- chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
+ malloc_mutex_lock(tsd, &arena->chunks_mtx);
+ chunk_hooks_assure_initialized_locked(tsd, arena, chunk_hooks);
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
false, false);
node = extent_tree_ad_nsearch(chunks_ad, &key);
@@ -511,7 +516,7 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_chunk_cache_maybe_insert(arena, node, cache);
} else {
/* Coalescing forward failed, so insert a new node. */
- node = arena_node_alloc(arena);
+ node = arena_node_alloc(tsd, arena);
if (node == NULL) {
/*
* Node allocation failed, which is an exceedingly
@@ -520,8 +525,8 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
* a virtual memory leak.
*/
if (cache) {
- chunk_purge_wrapper(arena, chunk_hooks, chunk,
- size, 0, size);
+ chunk_purge_wrapper(tsd, arena, chunk_hooks,
+ chunk, size, 0, size);
}
goto label_return;
}
@@ -557,16 +562,16 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_szad_insert(chunks_szad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
- arena_node_dalloc(arena, prev);
+ arena_node_dalloc(tsd, arena, prev);
}
label_return:
- malloc_mutex_unlock(&arena->chunks_mtx);
+ malloc_mutex_unlock(tsd, &arena->chunks_mtx);
}
void
-chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
- size_t size, bool committed)
+chunk_dalloc_cache(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, bool committed)
{
assert(chunk != NULL);
@@ -574,9 +579,9 @@ chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
assert(size != 0);
assert((size & chunksize_mask) == 0);
- chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached,
+ chunk_record(tsd, arena, chunk_hooks, &arena->chunks_szad_cached,
&arena->chunks_ad_cached, true, chunk, size, false, committed);
- arena_maybe_purge(arena);
+ arena_maybe_purge(tsd, arena);
}
static bool
@@ -584,14 +589,14 @@ chunk_dalloc_default(void *chunk, size_t size, bool committed,
unsigned arena_ind)
{
- if (!have_dss || !chunk_in_dss(chunk))
+ if (!have_dss || !chunk_in_dss(tsd_fetch(), chunk))
return (chunk_dalloc_mmap(chunk, size));
return (true);
}
void
-chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
- size_t size, bool zeroed, bool committed)
+chunk_dalloc_wrapper(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, bool zeroed, bool committed)
{
assert(chunk != NULL);
@@ -599,7 +604,7 @@ chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
assert(size != 0);
assert((size & chunksize_mask) == 0);
- chunk_hooks_assure_initialized(arena, chunk_hooks);
+ chunk_hooks_assure_initialized(tsd, arena, chunk_hooks);
/* Try to deallocate. */
if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
return;
@@ -610,7 +615,7 @@ chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
}
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
arena->ind);
- chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained,
+ chunk_record(tsd, arena, chunk_hooks, &arena->chunks_szad_retained,
&arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
}
@@ -648,11 +653,11 @@ chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
}
bool
-chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
- size_t size, size_t offset, size_t length)
+chunk_purge_wrapper(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, size_t offset, size_t length)
{
- chunk_hooks_assure_initialized(arena, chunk_hooks);
+ chunk_hooks_assure_initialized(tsd, arena, chunk_hooks);
return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
}
@@ -673,8 +678,11 @@ chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
if (!maps_coalesce)
return (true);
- if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b))
- return (true);
+ if (have_dss) {
+ tsd_t *tsd = tsd_fetch();
+ if (chunk_in_dss(tsd, chunk_a) != chunk_in_dss(tsd, chunk_b))
+ return (true);
+ }
return (false);
}
@@ -683,7 +691,7 @@ static rtree_node_elm_t *
chunks_rtree_node_alloc(size_t nelms)
{
- return ((rtree_node_elm_t *)base_alloc(nelms *
+ return ((rtree_node_elm_t *)base_alloc(tsd_fetch(), nelms *
sizeof(rtree_node_elm_t)));
}
@@ -730,22 +738,22 @@ chunk_boot(void)
}
void
-chunk_prefork(void)
+chunk_prefork(tsd_t *tsd)
{
- chunk_dss_prefork();
+ chunk_dss_prefork(tsd);
}
void
-chunk_postfork_parent(void)
+chunk_postfork_parent(tsd_t *tsd)
{
- chunk_dss_postfork_parent();
+ chunk_dss_postfork_parent(tsd);
}
void
-chunk_postfork_child(void)
+chunk_postfork_child(tsd_t *tsd)
{
- chunk_dss_postfork_child();
+ chunk_dss_postfork_child(tsd);
}