summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/jemalloc/internal/arena.h2
-rw-r--r--include/jemalloc/internal/chunk.h2
-rw-r--r--include/jemalloc/internal/private_symbols.txt1
-rw-r--r--src/arena.c90
-rw-r--r--src/chunk.c79
-rw-r--r--src/huge.c20
6 files changed, 101 insertions, 93 deletions
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index b6bfb25..68d1015 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -496,7 +496,7 @@ void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
void *chunk, size_t oldsize, size_t usize);
bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
- void *chunk, size_t oldsize, size_t usize, bool *zero);
+ extent_t *extent, size_t usize);
ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena,
ssize_t lg_dirty_mult);
diff --git a/include/jemalloc/internal/chunk.h b/include/jemalloc/internal/chunk.h
index 52a6d56..9634975 100644
--- a/include/jemalloc/internal/chunk.h
+++ b/include/jemalloc/internal/chunk.h
@@ -75,6 +75,8 @@ bool chunk_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
size_t length);
+bool chunk_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, extent_t *a, extent_t *b);
bool chunk_boot(void);
void chunk_prefork(tsdn_t *tsdn);
void chunk_postfork_parent(tsdn_t *tsdn);
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index bd18e76..9b507b1 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -176,6 +176,7 @@ chunk_hooks_get
chunk_hooks_set
chunk_in_dss
chunk_lookup
+chunk_merge_wrapper
chunk_npages
chunk_postfork_child
chunk_postfork_parent
diff --git a/src/arena.c b/src/arena.c
index 5ea3fc8..2b92733 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -948,69 +948,71 @@ arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
malloc_mutex_unlock(tsdn, &arena->lock);
}
-static bool
-arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
- bool *zero, void *nchunk, size_t udiff, size_t cdiff)
-{
- bool err;
- bool commit = true;
-
- err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
- chunksize, zero, &commit) == NULL);
- if (err) {
- /* Revert optimistic stats updates. */
- malloc_mutex_lock(tsdn, &arena->lock);
- if (config_stats) {
- arena_huge_ralloc_stats_update_undo(arena, oldsize,
- usize);
- arena->stats.mapped -= cdiff;
- }
- arena_nactive_sub(arena, udiff >> LG_PAGE);
- malloc_mutex_unlock(tsdn, &arena->lock);
- } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
- cdiff, true, arena->ind)) {
- chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
- *zero, true);
- err = true;
- }
- return (err);
-}
-
bool
-arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
- size_t oldsize, size_t usize, bool *zero)
+arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+ size_t usize)
{
bool err;
+ bool zero = false;
chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
- void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
- size_t udiff = usize - oldsize;
- size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
+ void *nchunk =
+ (void *)CHUNK_CEILING((uintptr_t)extent_past_get(extent));
+ size_t udiff = usize - extent_size_get(extent);
+ size_t cdiff = CHUNK_CEILING(usize) -
+ CHUNK_CEILING(extent_size_get(extent));
+ extent_t *trail;
malloc_mutex_lock(tsdn, &arena->lock);
/* Optimistically update stats. */
if (config_stats) {
- arena_huge_ralloc_stats_update(arena, oldsize, usize);
+ arena_huge_ralloc_stats_update(arena, extent_size_get(extent),
+ usize);
arena->stats.mapped += cdiff;
}
arena_nactive_add(arena, udiff >> LG_PAGE);
err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
- chunksize, zero, true) == NULL);
+ chunksize, &zero, true) == NULL);
malloc_mutex_unlock(tsdn, &arena->lock);
+
if (err) {
- err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
- &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff,
- cdiff);
- } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
- cdiff, true, arena->ind)) {
+ bool commit = true;
+
+ if (chunk_alloc_wrapper(tsdn, arena, &chunk_hooks, nchunk,
+ cdiff, chunksize, &zero, &commit) == NULL)
+ goto label_revert;
+ }
+
+ trail = arena_extent_alloc(tsdn, arena);
+ if (trail == NULL) {
+ chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
+ zero, true);
+ goto label_revert;
+ }
+ extent_init(trail, arena, nchunk, cdiff, true, zero, true, false);
+ if (chunk_merge_wrapper(tsdn, arena, &chunk_hooks, extent, trail)) {
+ arena_extent_dalloc(tsdn, arena, trail);
chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
- *zero, true);
- err = true;
+ zero, true);
+ goto label_revert;
}
- return (err);
+ if (usize < extent_size_get(extent))
+ extent_size_set(extent, usize);
+
+ return (false);
+label_revert:
+ /* Revert optimistic stats updates. */
+ malloc_mutex_lock(tsdn, &arena->lock);
+ if (config_stats) {
+ arena_huge_ralloc_stats_update_undo(arena,
+ extent_size_get(extent), usize);
+ arena->stats.mapped -= cdiff;
+ }
+ arena_nactive_sub(arena, udiff >> LG_PAGE);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ return (true);
}
/*
diff --git a/src/chunk.c b/src/chunk.c
index f8d9e63..59ebd29 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -586,51 +586,26 @@ static void
chunk_try_coalesce(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_t *a, extent_t *b, extent_heap_t extent_heaps[NPSIZES], bool cache)
{
- rtree_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
if (!chunk_can_coalesce(a, b))
return;
- if (chunk_hooks->merge(extent_addr_get(a), extent_size_get(a),
- extent_addr_get(b), extent_size_get(b), extent_committed_get(a),
- arena->ind))
- return;
-
- /*
- * The rtree writes must happen while all the relevant elements are
- * owned, so the following code uses decomposed helper functions rather
- * than chunk_{,de}register() to do things in the right order.
- */
- extent_rtree_acquire(tsdn, a, true, false, &a_elm_a, &a_elm_b);
- extent_rtree_acquire(tsdn, b, true, false, &b_elm_a, &b_elm_b);
-
- if (a_elm_b != NULL) {
- rtree_elm_write_acquired(tsdn, &chunks_rtree, a_elm_b, NULL);
- rtree_elm_release(tsdn, &chunks_rtree, a_elm_b);
- }
- if (b_elm_b != NULL) {
- rtree_elm_write_acquired(tsdn, &chunks_rtree, b_elm_a, NULL);
- rtree_elm_release(tsdn, &chunks_rtree, b_elm_a);
- } else
- b_elm_b = b_elm_a;
-
extent_heaps_remove(extent_heaps, a);
extent_heaps_remove(extent_heaps, b);
arena_chunk_cache_maybe_remove(extent_arena_get(a), a, cache);
arena_chunk_cache_maybe_remove(extent_arena_get(b), b, cache);
- extent_size_set(a, extent_size_get(a) + extent_size_get(b));
- extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
+ if (chunk_merge_wrapper(tsdn, arena, chunk_hooks, a, b)) {
+ extent_heaps_insert(extent_heaps, a);
+ extent_heaps_insert(extent_heaps, b);
+ arena_chunk_cache_maybe_insert(extent_arena_get(a), a, cache);
+ arena_chunk_cache_maybe_insert(extent_arena_get(b), b, cache);
+ return;
+ }
extent_heaps_insert(extent_heaps, a);
-
- extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a);
- extent_rtree_release(tsdn, a_elm_a, b_elm_b);
-
arena_chunk_cache_maybe_insert(extent_arena_get(a), a, cache);
-
- arena_extent_dalloc(tsdn, extent_arena_get(b), b);
}
static void
@@ -821,6 +796,46 @@ chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
}
bool
+chunk_merge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ extent_t *a, extent_t *b)
+{
+ rtree_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
+
+ if (chunk_hooks->merge(extent_addr_get(a), extent_size_get(a),
+ extent_addr_get(b), extent_size_get(b), extent_committed_get(a),
+ arena->ind))
+ return (true);
+
+ /*
+ * The rtree writes must happen while all the relevant elements are
+ * owned, so the following code uses decomposed helper functions rather
+ * than chunk_{,de}register() to do things in the right order.
+ */
+ extent_rtree_acquire(tsdn, a, true, false, &a_elm_a, &a_elm_b);
+ extent_rtree_acquire(tsdn, b, true, false, &b_elm_a, &b_elm_b);
+
+ if (a_elm_b != NULL) {
+ rtree_elm_write_acquired(tsdn, &chunks_rtree, a_elm_b, NULL);
+ rtree_elm_release(tsdn, &chunks_rtree, a_elm_b);
+ }
+ if (b_elm_b != NULL) {
+ rtree_elm_write_acquired(tsdn, &chunks_rtree, b_elm_a, NULL);
+ rtree_elm_release(tsdn, &chunks_rtree, b_elm_a);
+ } else
+ b_elm_b = b_elm_a;
+
+ extent_size_set(a, extent_size_get(a) + extent_size_get(b));
+ extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
+
+ extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a);
+ extent_rtree_release(tsdn, a_elm_a, b_elm_b);
+
+ arena_extent_dalloc(tsdn, extent_arena_get(b), b);
+
+ return (false);
+}
+
+bool
chunk_boot(void)
{
#ifdef _WIN32
diff --git a/src/huge.c b/src/huge.c
index 48b191a..dc0d680 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -215,31 +215,19 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, void *ptr,
size_t oldsize, size_t usize, bool zero)
{
arena_t *arena;
- bool is_zeroed_subchunk, is_zeroed_chunk;
+ bool is_zeroed_subchunk;
arena = extent_arena_get(extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
is_zeroed_subchunk = extent_zeroed_get(extent);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
- /*
- * Use is_zeroed_chunk to detect whether the trailing memory is zeroed,
- * update extent's zeroed field, and zero as necessary.
- */
- is_zeroed_chunk = false;
- if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize,
- &is_zeroed_chunk))
+ if (arena_chunk_ralloc_huge_expand(tsdn, arena, extent, usize))
return (true);
- /* Update the size of the huge allocation. */
- chunk_deregister(tsdn, extent);
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
- extent_size_set(extent, usize);
- extent_zeroed_set(extent, extent_zeroed_get(extent) && is_zeroed_chunk);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
- chunk_reregister(tsdn, extent);
-
if (zero || (config_fill && unlikely(opt_zero))) {
+ bool is_zeroed_chunk = extent_zeroed_get(extent);
+
if (!is_zeroed_subchunk) {
memset((void *)((uintptr_t)ptr + oldsize), 0,
CHUNK_CEILING(oldsize) - oldsize);