summaryrefslogtreecommitdiffstats
path: root/src/chunk.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/chunk.c')
-rw-r--r--src/chunk.c346
1 files changed, 246 insertions, 100 deletions
diff --git a/src/chunk.c b/src/chunk.c
index 7a4ede8..cdd5311 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -18,7 +18,103 @@ size_t chunksize;
size_t chunksize_mask; /* (chunksize - 1). */
size_t chunk_npages;
+static void *chunk_alloc_default(void *new_addr, size_t size,
+ size_t alignment, bool *zero, unsigned arena_ind);
+static bool chunk_dalloc_default(void *chunk, size_t size,
+ unsigned arena_ind);
+static bool chunk_commit_default(void *chunk, size_t size,
+ unsigned arena_ind);
+static bool chunk_decommit_default(void *chunk, size_t size,
+ unsigned arena_ind);
+static bool chunk_purge_default(void *chunk, size_t size, size_t offset,
+ size_t length, unsigned arena_ind);
+static bool chunk_split_default(void *chunk, size_t size, size_t size_a,
+ size_t size_b, bool committed, unsigned arena_ind);
+static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b,
+ size_t size_b, bool committed, unsigned arena_ind);
+
+const chunk_hooks_t chunk_hooks_default = {
+ chunk_alloc_default,
+ chunk_dalloc_default,
+ chunk_commit_default,
+ chunk_decommit_default,
+ chunk_purge_default,
+ chunk_split_default,
+ chunk_merge_default
+};
+
/******************************************************************************/
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
+static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
+ void *chunk, size_t size, bool committed, bool zeroed);
+
+/******************************************************************************/
+
+static chunk_hooks_t
+chunk_hooks_get_locked(arena_t *arena)
+{
+
+ return (arena->chunk_hooks);
+}
+
+chunk_hooks_t
+chunk_hooks_get(arena_t *arena)
+{
+ chunk_hooks_t chunk_hooks;
+
+ malloc_mutex_lock(&arena->chunks_mtx);
+ chunk_hooks = chunk_hooks_get_locked(arena);
+ malloc_mutex_unlock(&arena->chunks_mtx);
+
+ return (chunk_hooks);
+}
+
+chunk_hooks_t
+chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
+{
+ chunk_hooks_t old_chunk_hooks;
+
+ malloc_mutex_lock(&arena->chunks_mtx);
+ old_chunk_hooks = arena->chunk_hooks;
+ arena->chunk_hooks = *chunk_hooks;
+ malloc_mutex_unlock(&arena->chunks_mtx);
+
+ return (old_chunk_hooks);
+}
+
+static void
+chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ bool locked)
+{
+ static const chunk_hooks_t uninitialized_hooks =
+ CHUNK_HOOKS_INITIALIZER;
+
+ if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
+ 0) {
+ *chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
+ chunk_hooks_get(arena);
+ }
+}
+
+static void
+chunk_hooks_assure_initialized_locked(arena_t *arena,
+ chunk_hooks_t *chunk_hooks)
+{
+
+ chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true);
+}
+
+static void
+chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks)
+{
+
+ chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false);
+}
bool
chunk_register(const void *chunk, const extent_node_t *node)
@@ -74,21 +170,26 @@ chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
assert(size == CHUNK_CEILING(size));
- extent_node_init(&key, arena, NULL, size, false);
+ extent_node_init(&key, arena, NULL, size, false, false);
return (extent_tree_szad_nsearch(chunks_szad, &key));
}
static void *
-chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
- extent_tree_t *chunks_ad, bool cache, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool dalloc_node)
+chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
+ void *new_addr, size_t size, size_t alignment, bool *zero, bool dalloc_node)
{
void *ret;
extent_node_t *node;
size_t alloc_size, leadsize, trailsize;
- bool zeroed;
+ bool committed, zeroed;
assert(new_addr == NULL || alignment == chunksize);
+ /*
+ * Cached chunks use the node linkage embedded in their headers, in
+ * which case dalloc_node is true, and new_addr is non-NULL because
+ * we're operating on a specific chunk.
+ */
assert(dalloc_node || new_addr != NULL);
alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
@@ -96,9 +197,11 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
if (alloc_size < size)
return (NULL);
malloc_mutex_lock(&arena->chunks_mtx);
+ chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
if (new_addr != NULL) {
extent_node_t key;
- extent_node_init(&key, arena, new_addr, alloc_size, false);
+ extent_node_init(&key, arena, new_addr, alloc_size, false,
+ false);
node = extent_tree_ad_search(chunks_ad, &key);
} else {
node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
@@ -115,9 +218,17 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
assert(extent_node_size_get(node) >= leadsize + size);
trailsize = extent_node_size_get(node) - leadsize - size;
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
+ committed = extent_node_committed_get(node);
zeroed = extent_node_zeroed_get(node);
if (zeroed)
*zero = true;
+ /* Split the lead. */
+ if (leadsize != 0 &&
+ chunk_hooks->split(extent_node_addr_get(node),
+ extent_node_size_get(node), leadsize, size, false, arena->ind)) {
+ malloc_mutex_unlock(&arena->chunks_mtx);
+ return (NULL);
+ }
/* Remove node from the tree. */
extent_tree_szad_remove(chunks_szad, node);
extent_tree_ad_remove(chunks_ad, node);
@@ -131,23 +242,40 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
node = NULL;
}
if (trailsize != 0) {
+ /* Split the trail. */
+ if (chunk_hooks->split(ret, size + trailsize, size,
+ trailsize, false, arena->ind)) {
+ if (dalloc_node && node != NULL)
+ arena_node_dalloc(arena, node);
+ malloc_mutex_unlock(&arena->chunks_mtx);
+ chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad,
+ cache, ret, size + trailsize, committed, zeroed);
+ return (NULL);
+ }
/* Insert the trailing space as a smaller chunk. */
if (node == NULL) {
node = arena_node_alloc(arena);
if (node == NULL) {
malloc_mutex_unlock(&arena->chunks_mtx);
- chunk_record(arena, chunks_szad, chunks_ad,
- cache, ret, size, zeroed);
+ chunk_record(arena, chunk_hooks, chunks_szad,
+ chunks_ad, cache, ret, size + trailsize,
+ committed, zeroed);
return (NULL);
}
}
extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
- trailsize, zeroed);
+ trailsize, committed, zeroed);
extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL;
}
+ if (!committed && chunk_hooks->commit(ret, size, arena->ind)) {
+ malloc_mutex_unlock(&arena->chunks_mtx);
+ chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache,
+ ret, size, committed, zeroed);
+ return (NULL);
+ }
malloc_mutex_unlock(&arena->chunks_mtx);
assert(dalloc_node || node != NULL);
@@ -168,20 +296,6 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
return (ret);
}
-static void *
-chunk_alloc_core_dss(arena_t *arena, void *new_addr, size_t size,
- size_t alignment, bool *zero)
-{
- void *ret;
-
- if ((ret = chunk_recycle(arena, &arena->chunks_szad_dss,
- &arena->chunks_ad_dss, false, new_addr, size, alignment, zero,
- true)) != NULL)
- return (ret);
- ret = chunk_alloc_dss(arena, new_addr, size, alignment, zero);
- return (ret);
-}
-
/*
* If the caller specifies (!*zero), it is still possible to receive zeroed
* memory, in which case *zero is toggled to true. arena_chunk_alloc() takes
@@ -193,33 +307,33 @@ chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
bool *zero, dss_prec_t dss_prec)
{
void *ret;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
+ /* Retained. */
+ if ((ret = chunk_recycle(arena, &chunk_hooks,
+ &arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
+ new_addr, size, alignment, zero, true)) != NULL)
+ return (ret);
+
/* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary && (ret =
- chunk_alloc_core_dss(arena, new_addr, size, alignment, zero)) !=
- NULL)
- return (ret);
- /* mmap. */
- if (!config_munmap && (ret = chunk_recycle(arena,
- &arena->chunks_szad_mmap, &arena->chunks_ad_mmap, false, new_addr,
- size, alignment, zero, true)) != NULL)
+ chunk_alloc_dss(arena, new_addr, size, alignment, zero)) != NULL)
return (ret);
/*
- * Requesting an address is not implemented for chunk_alloc_mmap(), so
- * only call it if (new_addr == NULL).
+ * mmap. Requesting an address is not implemented for
+ * chunk_alloc_mmap(), so only call it if (new_addr == NULL).
*/
if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero))
!= NULL)
return (ret);
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary && (ret =
- chunk_alloc_core_dss(arena, new_addr, size, alignment, zero)) !=
- NULL)
+ chunk_alloc_dss(arena, new_addr, size, alignment, zero)) != NULL)
return (ret);
/* All strategies for allocation failed. */
@@ -248,8 +362,8 @@ chunk_alloc_base(size_t size)
}
void *
-chunk_alloc_cache(arena_t *arena, void *new_addr, size_t size, size_t alignment,
- bool *zero, bool dalloc_node)
+chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool dalloc_node)
{
void *ret;
@@ -258,8 +372,8 @@ chunk_alloc_cache(arena_t *arena, void *new_addr, size_t size, size_t alignment,
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
- ret = chunk_recycle(arena, &arena->chunks_szad_cache,
- &arena->chunks_ad_cache, true, new_addr, size, alignment, zero,
+ ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached,
+ &arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
dalloc_node);
if (ret == NULL)
return (NULL);
@@ -285,11 +399,13 @@ chunk_arena_get(unsigned arena_ind)
}
static void *
-chunk_alloc_arena(arena_t *arena, void *new_addr, size_t size, size_t alignment,
- bool *zero)
+chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
+ unsigned arena_ind)
{
void *ret;
+ arena_t *arena;
+ arena = chunk_arena_get(arena_ind);
ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
arena->dss_prec);
if (ret == NULL)
@@ -300,55 +416,45 @@ chunk_alloc_arena(arena_t *arena, void *new_addr, size_t size, size_t alignment,
return (ret);
}
-/*
- * Default arena chunk allocation routine in the absence of user override. This
- * function isn't actually used by jemalloc, but it does the right thing if the
- * application passes calls through to it during chunk allocation.
- */
void *
-chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
- unsigned arena_ind)
-{
- arena_t *arena;
-
- arena = chunk_arena_get(arena_ind);
- return (chunk_alloc_arena(arena, new_addr, size, alignment, zero));
-}
-
-void *
-chunk_alloc_wrapper(arena_t *arena, chunk_alloc_t *chunk_alloc, void *new_addr,
+chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
size_t size, size_t alignment, bool *zero)
{
void *ret;
- ret = chunk_alloc(new_addr, size, alignment, zero, arena->ind);
+ chunk_hooks_assure_initialized(arena, chunk_hooks);
+ ret = chunk_hooks->alloc(new_addr, size, alignment, zero, arena->ind);
if (ret == NULL)
return (NULL);
- if (config_valgrind && chunk_alloc != chunk_alloc_default)
+ if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
return (ret);
}
-void
-chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
- extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, bool zeroed)
+static void
+chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
+ void *chunk, size_t size, bool committed, bool zeroed)
{
bool unzeroed;
extent_node_t *node, *prev;
extent_node_t key;
- assert(maps_coalesce || size == chunksize);
assert(!cache || !zeroed);
unzeroed = cache || !zeroed;
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
malloc_mutex_lock(&arena->chunks_mtx);
+ chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
- false);
+ false, false);
node = extent_tree_ad_nsearch(chunks_ad, &key);
/* Try to coalesce forward. */
if (node != NULL && extent_node_addr_get(node) ==
- extent_node_addr_get(&key)) {
+ extent_node_addr_get(&key) && extent_node_committed_get(node) ==
+ committed && !chunk_hooks->merge(chunk, size,
+ extent_node_addr_get(node), extent_node_size_get(node), false,
+ arena->ind)) {
/*
* Coalesce chunk with the following address range. This does
* not change the position within chunks_ad, so only
@@ -373,12 +479,13 @@ chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
* a virtual memory leak.
*/
if (cache) {
- chunk_purge_wrapper(arena, arena->chunk_purge,
- chunk, 0, size);
+ chunk_purge_wrapper(arena, chunk_hooks, chunk,
+ size, 0, size);
}
goto label_return;
}
- extent_node_init(node, arena, chunk, size, !unzeroed);
+ extent_node_init(node, arena, chunk, size, committed,
+ !unzeroed);
extent_tree_ad_insert(chunks_ad, node);
extent_tree_szad_insert(chunks_szad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
@@ -387,7 +494,10 @@ chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
/* Try to coalesce backward. */
prev = extent_tree_ad_prev(chunks_ad, node);
if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
- extent_node_size_get(prev)) == chunk) {
+ extent_node_size_get(prev)) == chunk &&
+ extent_node_committed_get(prev) == committed &&
+ !chunk_hooks->merge(extent_node_addr_get(prev),
+ extent_node_size_get(prev), chunk, size, false, arena->ind)) {
/*
* Coalesce chunk with the previous address range. This does
* not change the position within chunks_ad, so only
@@ -414,7 +524,8 @@ label_return:
}
void
-chunk_dalloc_cache(arena_t *arena, void *chunk, size_t size)
+chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
+ size_t size)
{
assert(chunk != NULL);
@@ -422,57 +533,68 @@ chunk_dalloc_cache(arena_t *arena, void *chunk, size_t size)
assert(size != 0);
assert((size & chunksize_mask) == 0);
- if (!maps_coalesce && size != chunksize) {
- chunk_dalloc_arena(arena, chunk, size, false);
- return;
- }
-
- chunk_record(arena, &arena->chunks_szad_cache, &arena->chunks_ad_cache,
- true, chunk, size, false);
+ chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached,
+ &arena->chunks_ad_cached, true, chunk, size, true, false);
arena_maybe_purge(arena);
}
void
-chunk_dalloc_arena(arena_t *arena, void *chunk, size_t size, bool zeroed)
+chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
+ size_t size, bool zeroed)
{
+ bool committed;
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
assert(size != 0);
assert((size & chunksize_mask) == 0);
- if (have_dss && chunk_in_dss(chunk)) {
- chunk_record(arena, &arena->chunks_szad_dss,
- &arena->chunks_ad_dss, false, chunk, size, zeroed);
- } else if (chunk_dalloc_mmap(chunk, size)) {
- chunk_record(arena, &arena->chunks_szad_mmap,
- &arena->chunks_ad_mmap, false, chunk, size, zeroed);
- }
+ chunk_hooks_assure_initialized(arena, chunk_hooks);
+ /* Try to deallocate. */
+ if (!chunk_hooks->dalloc(chunk, size, arena->ind))
+ return;
+ /* Try to decommit; purge if that fails. */
+ committed = chunk_hooks->decommit(chunk, size, arena->ind);
+ zeroed = !committed || chunk_hooks->purge(chunk, size, 0, size,
+ arena->ind);
+ chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained,
+ &arena->chunks_ad_retained, false, chunk, size, committed, zeroed);
}
-/*
- * Default arena chunk deallocation routine in the absence of user override.
- * This function isn't actually used by jemalloc, but it does the right thing if
- * the application passes calls through to it during chunk deallocation.
- */
-bool
+static bool
chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
{
- chunk_dalloc_arena(chunk_arena_get(arena_ind), chunk, size, false);
- return (false);
+ if (!have_dss || !chunk_in_dss(chunk))
+ return (chunk_dalloc_mmap(chunk, size));
+ return (true);
}
void
-chunk_dalloc_wrapper(arena_t *arena, chunk_dalloc_t *chunk_dalloc, void *chunk,
+chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
size_t size)
{
- chunk_dalloc(chunk, size, arena->ind);
- if (config_valgrind && chunk_dalloc != chunk_dalloc_default)
+ chunk_hooks_assure_initialized(arena, chunk_hooks);
+ chunk_hooks->dalloc(chunk, size, arena->ind);
+ if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default)
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
}
+static bool
+chunk_commit_default(void *chunk, size_t size, unsigned arena_ind)
+{
+
+ return (pages_commit(chunk, size));
+}
+
+static bool
+chunk_decommit_default(void *chunk, size_t size, unsigned arena_ind)
+{
+
+ return (pages_decommit(chunk, size));
+}
+
bool
chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
{
@@ -487,8 +609,8 @@ chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
length));
}
-bool
-chunk_purge_default(void *chunk, size_t offset, size_t length,
+static bool
+chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
unsigned arena_ind)
{
@@ -497,11 +619,35 @@ chunk_purge_default(void *chunk, size_t offset, size_t length,
}
bool
-chunk_purge_wrapper(arena_t *arena, chunk_purge_t *chunk_purge, void *chunk,
- size_t offset, size_t length)
+chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
+ size_t size, size_t offset, size_t length)
+{
+
+ chunk_hooks_assure_initialized(arena, chunk_hooks);
+ return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
+}
+
+static bool
+chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
+ bool committed, unsigned arena_ind)
{
- return (chunk_purge(chunk, offset, length, arena->ind));
+ if (!maps_coalesce)
+ return (true);
+ return (false);
+}
+
+static bool
+chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
+ bool committed, unsigned arena_ind)
+{
+
+ if (!maps_coalesce)
+ return (true);
+ if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b))
+ return (true);
+
+ return (false);
}
static rtree_node_elm_t *