summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2016-05-19 04:02:46 (GMT)
committerJason Evans <jasone@canonware.com>2016-06-03 19:27:41 (GMT)
commitde0305a7f3e443d48e012272e1b91c44d2b129d2 (patch)
treeba82031992078ee5dab5572bab15aeb8b31f56e3
parent1ad060584f8ae4e0b9bc30c89ad7c1860ac3d89d (diff)
downloadjemalloc-de0305a7f3e443d48e012272e1b91c44d2b129d2.zip
jemalloc-de0305a7f3e443d48e012272e1b91c44d2b129d2.tar.gz
jemalloc-de0305a7f3e443d48e012272e1b91c44d2b129d2.tar.bz2
Add/use chunk_split_wrapper().
Remove redundant ptr/oldsize args from huge_*(). Refactor huge/chunk/arena code boundaries.
-rw-r--r--include/jemalloc/internal/arena.h38
-rw-r--r--include/jemalloc/internal/chunk.h8
-rw-r--r--include/jemalloc/internal/huge.h21
-rw-r--r--include/jemalloc/internal/private_symbols.txt3
-rw-r--r--src/arena.c511
-rw-r--r--src/chunk.c241
-rw-r--r--src/huge.c260
7 files changed, 563 insertions, 519 deletions
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index 68d1015..187b625 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -177,13 +177,6 @@ typedef ph(arena_chunk_map_misc_t) arena_run_heap_t;
/* Arena chunk header. */
struct arena_chunk_s {
/*
- * A pointer to the arena that owns the chunk is stored within the
- * extent structure. This field as a whole is used by chunks_rtree to
- * support both ivsalloc() and core-based debugging.
- */
- extent_t extent;
-
- /*
* Map of pages within chunk that keeps track of free/large/small. The
* first map_bias entries are omitted, since the chunk header does not
* need to be tracked in the map. This omission saves a header page
@@ -315,7 +308,7 @@ struct arena_s {
* order to avoid interactions between multiple threads that could make
* a single spare inadequate.
*/
- arena_chunk_t *spare;
+ extent_t *spare;
/* Minimum ratio (log base 2) of nactive:ndirty. */
ssize_t lg_dirty_mult;
@@ -481,22 +474,27 @@ typedef size_t (run_quantize_t)(size_t);
extern run_quantize_t *run_quantize_floor;
extern run_quantize_t *run_quantize_ceil;
#endif
+extent_t *arena_chunk_cache_alloc(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
+ bool *zero);
+void arena_chunk_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed);
void arena_chunk_cache_maybe_insert(arena_t *arena, extent_t *extent,
bool cache);
void arena_chunk_cache_maybe_remove(arena_t *arena, extent_t *extent,
bool cache);
extent_t *arena_extent_alloc(tsdn_t *tsdn, arena_t *arena);
void arena_extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
-void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, bool *zero);
+extent_t *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena,
+ size_t usize, size_t alignment, bool *zero);
void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
size_t usize);
void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
- void *chunk, size_t oldsize, size_t usize);
+ extent_t *extent, size_t oldsize);
void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
- void *chunk, size_t oldsize, size_t usize);
-bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
- extent_t *extent, size_t usize);
+ extent_t *extent, size_t oldsize);
+void arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
+ extent_t *extent, size_t oldsize);
ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena,
ssize_t lg_dirty_mult);
@@ -1193,7 +1191,7 @@ arena_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
ret = atomic_read_p(&elm->prof_tctx_pun);
}
} else
- ret = huge_prof_tctx_get(tsdn, extent, ptr);
+ ret = huge_prof_tctx_get(tsdn, extent);
return (ret);
}
@@ -1230,7 +1228,7 @@ arena_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
assert(arena_mapbits_large_get(chunk, pageind) == 0);
}
} else
- huge_prof_tctx_set(tsdn, extent, ptr, tctx);
+ huge_prof_tctx_set(tsdn, extent, tctx);
}
JEMALLOC_INLINE void
@@ -1258,7 +1256,7 @@ arena_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
atomic_write_p(&elm->prof_tctx_pun,
(prof_tctx_t *)(uintptr_t)1U);
} else
- huge_prof_tctx_reset(tsdn, extent, ptr);
+ huge_prof_tctx_reset(tsdn, extent);
}
}
@@ -1362,7 +1360,7 @@ arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr, bool demote)
ret = index2size(binind);
}
} else
- ret = huge_salloc(tsdn, extent, ptr);
+ ret = huge_salloc(tsdn, extent);
return (ret);
}
@@ -1413,7 +1411,7 @@ arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
}
}
} else
- huge_dalloc(tsdn, extent, ptr);
+ huge_dalloc(tsdn, extent);
}
JEMALLOC_ALWAYS_INLINE void
@@ -1470,7 +1468,7 @@ arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
}
}
} else
- huge_dalloc(tsdn, extent, ptr);
+ huge_dalloc(tsdn, extent);
}
# endif /* JEMALLOC_ARENA_INLINE_B */
#endif
diff --git a/include/jemalloc/internal/chunk.h b/include/jemalloc/internal/chunk.h
index 9634975..78cc4c2 100644
--- a/include/jemalloc/internal/chunk.h
+++ b/include/jemalloc/internal/chunk.h
@@ -55,10 +55,10 @@ chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
bool chunk_register(tsdn_t *tsdn, const extent_t *extent);
void chunk_deregister(tsdn_t *tsdn, const extent_t *extent);
void chunk_reregister(tsdn_t *tsdn, const extent_t *extent);
-void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
+extent_t *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
- bool *zero, bool dalloc_extent);
-void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
+ bool *zero);
+extent_t *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit);
void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
@@ -75,6 +75,8 @@ bool chunk_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
size_t length);
+extent_t *chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, extent_t *extent, size_t size_a, size_t size_b);
bool chunk_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, extent_t *a, extent_t *b);
bool chunk_boot(void);
diff --git a/include/jemalloc/internal/huge.h b/include/jemalloc/internal/huge.h
index a385a20..bdc8f84 100644
--- a/include/jemalloc/internal/huge.h
+++ b/include/jemalloc/internal/huge.h
@@ -12,22 +12,19 @@
void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero);
-bool huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr,
- size_t oldsize, size_t usize_min, size_t usize_max, bool zero);
-void *huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
- size_t oldsize, size_t usize, size_t alignment, bool zero,
- tcache_t *tcache);
+bool huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
+ size_t usize_max, bool zero);
+void *huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+ size_t usize, size_t alignment, bool zero, tcache_t *tcache);
#ifdef JEMALLOC_JET
typedef void (huge_dalloc_junk_t)(tsdn_t *, void *, size_t);
extern huge_dalloc_junk_t *huge_dalloc_junk;
#endif
-void huge_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr);
-size_t huge_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr);
-prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent,
- const void *ptr);
-void huge_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
- prof_tctx_t *tctx);
-void huge_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr);
+void huge_dalloc(tsdn_t *tsdn, extent_t *extent);
+size_t huge_salloc(tsdn_t *tsdn, const extent_t *extent);
+prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent);
+void huge_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx);
+void huge_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index 9b507b1..34a6816 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -12,6 +12,8 @@ arena_choose
arena_choose_hard
arena_choose_impl
arena_chunk_alloc_huge
+arena_chunk_cache_alloc
+arena_chunk_cache_dalloc
arena_chunk_cache_maybe_insert
arena_chunk_cache_maybe_remove
arena_chunk_dalloc_huge
@@ -184,6 +186,7 @@ chunk_prefork
chunk_purge_wrapper
chunk_register
chunk_reregister
+chunk_split_wrapper
chunks_rtree
chunksize
chunksize_mask
diff --git a/src/arena.c b/src/arena.c
index 2b92733..a610ec1 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -220,6 +220,55 @@ arena_chunk_dirty_npages(const extent_t *extent)
return (extent_size_get(extent) >> LG_PAGE);
}
+static extent_t *
+arena_chunk_cache_alloc_locked(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
+ bool *zero)
+{
+
+ malloc_mutex_assert_owner(tsdn, &arena->lock);
+
+ return (chunk_alloc_cache(tsdn, arena, chunk_hooks, new_addr, size,
+ alignment, zero));
+}
+
+extent_t *
+arena_chunk_cache_alloc(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
+ bool *zero)
+{
+ extent_t *extent;
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ extent = arena_chunk_cache_alloc_locked(tsdn, arena, chunk_hooks,
+ new_addr, size, alignment, zero);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+
+ return (extent);
+}
+
+static void
+arena_chunk_cache_dalloc_locked(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed)
+{
+
+ malloc_mutex_assert_owner(tsdn, &arena->lock);
+
+ chunk_dalloc_cache(tsdn, arena, chunk_hooks, chunk, size, committed);
+ arena_maybe_purge(tsdn, arena);
+}
+
+void
+arena_chunk_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed)
+{
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ arena_chunk_cache_dalloc_locked(tsdn, arena, chunk_hooks, chunk, size,
+ committed);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+}
+
void
arena_chunk_cache_maybe_insert(arena_t *arena, extent_t *extent, bool cache)
{
@@ -492,112 +541,119 @@ arena_run_split_small(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
return (false);
}
-static arena_chunk_t *
+static extent_t *
arena_chunk_init_spare(arena_t *arena)
{
- arena_chunk_t *chunk;
+ extent_t *extent;
assert(arena->spare != NULL);
- chunk = arena->spare;
+ extent = arena->spare;
arena->spare = NULL;
- assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
- assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
- assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
- arena_maxrun);
- assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
- arena_maxrun);
- assert(arena_mapbits_dirty_get(chunk, map_bias) ==
- arena_mapbits_dirty_get(chunk, chunk_npages-1));
-
- return (chunk);
-}
-
-static bool
-arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- bool zero)
-{
+ assert(arena_mapbits_allocated_get((arena_chunk_t *)
+ extent_addr_get(extent), map_bias) == 0);
+ assert(arena_mapbits_allocated_get((arena_chunk_t *)
+ extent_addr_get(extent), chunk_npages-1) == 0);
+ assert(arena_mapbits_unallocated_size_get((arena_chunk_t *)
+ extent_addr_get(extent), map_bias) == arena_maxrun);
+ assert(arena_mapbits_unallocated_size_get((arena_chunk_t *)
+ extent_addr_get(extent), chunk_npages-1) == arena_maxrun);
+ assert(arena_mapbits_dirty_get((arena_chunk_t *)
+ extent_addr_get(extent), map_bias) ==
+ arena_mapbits_dirty_get((arena_chunk_t *)extent_addr_get(extent),
+ chunk_npages-1));
- /*
- * The extent notion of "committed" doesn't directly apply to arena
- * chunks. Arbitrarily mark them as committed. The commit state of
- * runs is tracked individually, and upon chunk deallocation the entire
- * chunk is in a consistent commit state.
- */
- extent_init(&chunk->extent, arena, chunk, chunksize, true, zero, true,
- true);
- return (chunk_register(tsdn, &chunk->extent));
+ return (extent);
}
-static arena_chunk_t *
+static extent_t *
arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
{
- arena_chunk_t *chunk;
+ extent_t *extent;
malloc_mutex_unlock(tsdn, &arena->lock);
- chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
- NULL, chunksize, chunksize, zero, commit);
- if (chunk != NULL && !*commit) {
+ extent = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, chunksize,
+ chunksize, zero, commit);
+ if (extent != NULL && !*commit) {
/* Commit header. */
- if (chunk_commit_wrapper(tsdn, arena, chunk_hooks, chunk,
- chunksize, 0, map_bias << LG_PAGE)) {
+ if (chunk_commit_wrapper(tsdn, arena, chunk_hooks,
+ extent_addr_get(extent), extent_size_get(extent), 0,
+ map_bias << LG_PAGE)) {
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
- (void *)chunk, chunksize, *zero, *commit);
- chunk = NULL;
+ extent_addr_get(extent), extent_size_get(extent),
+ extent_zeroed_get(extent),
+ extent_committed_get(extent));
+ extent = NULL;
}
}
- if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) {
- if (!*commit) {
- /* Undo commit of header. */
- chunk_decommit_wrapper(tsdn, arena, chunk_hooks,
- chunk, chunksize, 0, map_bias << LG_PAGE);
+
+ if (extent != NULL) {
+ extent_slab_set(extent, true);
+
+ if (chunk_register(tsdn, extent)) {
+ if (!*commit) {
+ /* Undo commit of header. */
+ chunk_decommit_wrapper(tsdn, arena, chunk_hooks,
+ extent_addr_get(extent),
+ extent_size_get(extent), 0, map_bias <<
+ LG_PAGE);
+ }
+ chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
+ extent_addr_get(extent), extent_size_get(extent),
+ extent_zeroed_get(extent),
+ extent_committed_get(extent));
+ extent = NULL;
}
- chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
- chunksize, *zero, *commit);
- chunk = NULL;
}
malloc_mutex_lock(tsdn, &arena->lock);
- return (chunk);
+
+ return (extent);
}
-static arena_chunk_t *
+static extent_t *
arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
bool *commit)
{
- arena_chunk_t *chunk;
+ extent_t *extent;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
- chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
- chunksize, zero, true);
- if (chunk != NULL) {
- if (arena_chunk_register(tsdn, arena, chunk, *zero)) {
- chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
- chunksize, true);
+ extent = arena_chunk_cache_alloc_locked(tsdn, arena, &chunk_hooks, NULL,
+ chunksize, chunksize, zero);
+ if (extent != NULL) {
+ extent_slab_set(extent, true);
+
+ if (chunk_register(tsdn, extent)) {
+ arena_chunk_cache_dalloc_locked(tsdn, arena,
+ &chunk_hooks, extent_addr_get(extent),
+ extent_size_get(extent), true);
return (NULL);
}
*commit = true;
}
- if (chunk == NULL) {
- chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
+ if (extent == NULL) {
+ extent = arena_chunk_alloc_internal_hard(tsdn, arena,
&chunk_hooks, zero, commit);
+ if (extent == NULL)
+ return (NULL);
}
+ assert(extent_slab_get(extent));
- if (config_stats && chunk != NULL) {
- arena->stats.mapped += chunksize;
+ if (config_stats) {
+ arena->stats.mapped += extent_size_get(extent);
arena->stats.metadata_mapped += (map_bias << LG_PAGE);
}
- return (chunk);
+ return (extent);
}
-static arena_chunk_t *
+static extent_t *
arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
{
- arena_chunk_t *chunk;
+ extent_t *extent;
bool zero, commit;
size_t flag_unzeroed, flag_decommitted, i;
@@ -605,8 +661,8 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
zero = false;
commit = false;
- chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
- if (chunk == NULL)
+ extent = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
+ if (extent == NULL)
return (NULL);
/*
@@ -616,58 +672,63 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
*/
flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
- arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
- flag_unzeroed | flag_decommitted);
+ arena_mapbits_unallocated_set((arena_chunk_t *)extent_addr_get(extent),
+ map_bias, arena_maxrun, flag_unzeroed | flag_decommitted);
/*
* There is no need to initialize the internal page map entries unless
* the chunk is not zeroed.
*/
if (!zero) {
- for (i = map_bias+1; i < chunk_npages-1; i++)
- arena_mapbits_internal_set(chunk, i, flag_unzeroed);
+ for (i = map_bias+1; i < chunk_npages-1; i++) {
+ arena_mapbits_internal_set((arena_chunk_t *)
+ extent_addr_get(extent), i, flag_unzeroed);
+ }
} else {
if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++) {
- assert(arena_mapbits_unzeroed_get(chunk, i) ==
- flag_unzeroed);
+ assert(arena_mapbits_unzeroed_get(
+ (arena_chunk_t *)extent_addr_get(extent), i)
+ == flag_unzeroed);
}
}
}
- arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
- flag_unzeroed);
+ arena_mapbits_unallocated_set((arena_chunk_t *)extent_addr_get(extent),
+ chunk_npages-1, arena_maxrun, flag_unzeroed);
- return (chunk);
+ return (extent);
}
-static arena_chunk_t *
+static extent_t *
arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
{
- arena_chunk_t *chunk;
+ extent_t *extent;
if (arena->spare != NULL)
- chunk = arena_chunk_init_spare(arena);
+ extent = arena_chunk_init_spare(arena);
else {
- chunk = arena_chunk_init_hard(tsdn, arena);
- if (chunk == NULL)
+ extent = arena_chunk_init_hard(tsdn, arena);
+ if (extent == NULL)
return (NULL);
}
- ql_elm_new(&chunk->extent, ql_link);
- ql_tail_insert(&arena->achunks, &chunk->extent, ql_link);
- arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
+ ql_elm_new(extent, ql_link);
+ ql_tail_insert(&arena->achunks, extent, ql_link);
+ arena_avail_insert(arena, (arena_chunk_t *)extent_addr_get(extent),
+ map_bias, chunk_npages-map_bias);
- return (chunk);
+ return (extent);
}
static void
-arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
+arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
{
bool committed;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
- chunk_deregister(tsdn, &chunk->extent);
+ chunk_deregister(tsdn, extent);
- committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
+ committed = (arena_mapbits_decommitted_get((arena_chunk_t *)
+ extent_addr_get(extent), map_bias) == 0);
if (!committed) {
/*
* Decommit the header. Mark the chunk as decommitted even if
@@ -675,37 +736,42 @@ arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
* chunk as committed has a high potential for causing later
* access of decommitted memory.
*/
- chunk_decommit_wrapper(tsdn, arena, &chunk_hooks, chunk,
- chunksize, 0, map_bias << LG_PAGE);
+ chunk_decommit_wrapper(tsdn, arena, &chunk_hooks,
+ extent_addr_get(extent), extent_size_get(extent), 0,
+ map_bias << LG_PAGE);
}
- chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
- committed);
+ arena_chunk_cache_dalloc_locked(tsdn, arena, &chunk_hooks,
+ extent_addr_get(extent), extent_size_get(extent), committed);
if (config_stats) {
- arena->stats.mapped -= chunksize;
+ arena->stats.mapped -= extent_size_get(extent);
arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
}
+
+ arena_extent_dalloc(tsdn, arena, extent);
}
static void
-arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare)
+arena_spare_discard(tsdn_t *tsdn, arena_t *arena, extent_t *spare)
{
assert(arena->spare != spare);
- if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
- arena_run_dirty_remove(arena, spare, map_bias,
- chunk_npages-map_bias);
+ if (arena_mapbits_dirty_get((arena_chunk_t *)extent_addr_get(spare),
+ map_bias) != 0) {
+ arena_run_dirty_remove(arena, (arena_chunk_t *)
+ extent_addr_get(spare), map_bias, chunk_npages-map_bias);
}
arena_chunk_discard(tsdn, arena, spare);
}
static void
-arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
+arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
{
- arena_chunk_t *spare;
+ arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent);
+ extent_t *spare;
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
@@ -721,9 +787,9 @@ arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
/* Remove run from runs_avail, so that the arena does not use it. */
arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
- ql_remove(&arena->achunks, &chunk->extent, ql_link);
+ ql_remove(&arena->achunks, extent, ql_link);
spare = arena->spare;
- arena->spare = chunk;
+ arena->spare = extent;
if (spare != NULL)
arena_spare_discard(tsdn, arena, spare);
}
@@ -779,19 +845,6 @@ arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
}
static void
-arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
-{
- szind_t index = size2index(usize) - nlclasses - NBINS;
-
- cassert(config_stats);
-
- arena->stats.ndalloc_huge--;
- arena->stats.allocated_huge += usize;
- arena->stats.hstats[index].ndalloc--;
- arena->stats.hstats[index].curhchunks++;
-}
-
-static void
arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
{
@@ -799,15 +852,6 @@ arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
arena_huge_malloc_stats_update(arena, usize);
}
-static void
-arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
- size_t usize)
-{
-
- arena_huge_dalloc_stats_update_undo(arena, oldsize);
- arena_huge_malloc_stats_update_undo(arena, usize);
-}
-
extent_t *
arena_extent_alloc(tsdn_t *tsdn, arena_t *arena)
{
@@ -834,17 +878,17 @@ arena_extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
}
-static void *
+static extent_t *
arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero,
size_t csize)
{
- void *ret;
+ extent_t *extent;
bool commit = true;
- ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
+ extent = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
alignment, zero, &commit);
- if (ret == NULL) {
+ if (extent == NULL) {
/* Revert optimistic stats updates. */
malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
@@ -855,14 +899,14 @@ arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
malloc_mutex_unlock(tsdn, &arena->lock);
}
- return (ret);
+ return (extent);
}
-void *
+extent_t *
arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool *zero)
{
- void *ret;
+ extent_t *extent;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t csize = CHUNK_CEILING(usize);
@@ -875,15 +919,15 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
}
arena_nactive_add(arena, usize >> LG_PAGE);
- ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
- alignment, zero, true);
+ extent = arena_chunk_cache_alloc_locked(tsdn, arena, &chunk_hooks, NULL,
+ csize, alignment, zero);
malloc_mutex_unlock(tsdn, &arena->lock);
- if (ret == NULL) {
- ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
+ if (extent == NULL) {
+ extent = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
usize, alignment, zero, csize);
}
- return (ret);
+ return (extent);
}
void
@@ -900,14 +944,16 @@ arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize)
}
arena_nactive_sub(arena, usize >> LG_PAGE);
- chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true);
+ arena_chunk_cache_dalloc_locked(tsdn, arena, &chunk_hooks, chunk, csize,
+ true);
malloc_mutex_unlock(tsdn, &arena->lock);
}
void
-arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
- size_t oldsize, size_t usize)
+arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+ size_t oldsize)
{
+ size_t usize = extent_size_get(extent);
assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
assert(oldsize != usize);
@@ -923,9 +969,10 @@ arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
}
void
-arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
- size_t oldsize, size_t usize)
+arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+ size_t oldsize)
{
+ size_t usize = extent_size_get(extent);
size_t udiff = oldsize - usize;
size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
@@ -936,83 +983,24 @@ arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
arena->stats.mapped -= cdiff;
}
arena_nactive_sub(arena, udiff >> LG_PAGE);
-
- if (cdiff != 0) {
- chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
- void *nchunk = (void *)((uintptr_t)chunk +
- CHUNK_CEILING(usize));
-
- chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
- true);
- }
malloc_mutex_unlock(tsdn, &arena->lock);
}
-bool
+void
arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
- size_t usize)
+ size_t oldsize)
{
- bool err;
- bool zero = false;
- chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
- void *nchunk =
- (void *)CHUNK_CEILING((uintptr_t)extent_past_get(extent));
- size_t udiff = usize - extent_size_get(extent);
- size_t cdiff = CHUNK_CEILING(usize) -
- CHUNK_CEILING(extent_size_get(extent));
- extent_t *trail;
+ size_t usize = extent_size_get(extent);
+ size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
+ size_t udiff = usize - oldsize;
malloc_mutex_lock(tsdn, &arena->lock);
-
- /* Optimistically update stats. */
if (config_stats) {
- arena_huge_ralloc_stats_update(arena, extent_size_get(extent),
- usize);
+ arena_huge_ralloc_stats_update(arena, oldsize, usize);
arena->stats.mapped += cdiff;
}
arena_nactive_add(arena, udiff >> LG_PAGE);
-
- err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
- chunksize, &zero, true) == NULL);
- malloc_mutex_unlock(tsdn, &arena->lock);
-
- if (err) {
- bool commit = true;
-
- if (chunk_alloc_wrapper(tsdn, arena, &chunk_hooks, nchunk,
- cdiff, chunksize, &zero, &commit) == NULL)
- goto label_revert;
- }
-
- trail = arena_extent_alloc(tsdn, arena);
- if (trail == NULL) {
- chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
- zero, true);
- goto label_revert;
- }
- extent_init(trail, arena, nchunk, cdiff, true, zero, true, false);
- if (chunk_merge_wrapper(tsdn, arena, &chunk_hooks, extent, trail)) {
- arena_extent_dalloc(tsdn, arena, trail);
- chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
- zero, true);
- goto label_revert;
- }
-
- if (usize < extent_size_get(extent))
- extent_size_set(extent, usize);
-
- return (false);
-label_revert:
- /* Revert optimistic stats updates. */
- malloc_mutex_lock(tsdn, &arena->lock);
- if (config_stats) {
- arena_huge_ralloc_stats_update_undo(arena,
- extent_size_get(extent), usize);
- arena->stats.mapped -= cdiff;
- }
- arena_nactive_sub(arena, udiff >> LG_PAGE);
malloc_mutex_unlock(tsdn, &arena->lock);
- return (true);
}
/*
@@ -1053,8 +1041,8 @@ arena_run_alloc_large_helper(tsdn_t *tsdn, arena_t *arena, size_t size,
static arena_run_t *
arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
{
- arena_chunk_t *chunk;
arena_run_t *run;
+ extent_t *extent;
assert(size <= arena_maxrun);
assert(size == PAGE_CEILING(size));
@@ -1067,9 +1055,10 @@ arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
/*
* No usable runs. Create a new chunk from which to allocate the run.
*/
- chunk = arena_chunk_alloc(tsdn, arena);
- if (chunk != NULL) {
- run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
+ extent = arena_chunk_alloc(tsdn, arena);
+ if (extent != NULL) {
+ run = &arena_miscelm_get_mutable((arena_chunk_t *)
+ extent_addr_get(extent), map_bias)->run;
if (arena_run_split_large(tsdn, arena, iealloc(tsdn, run), run,
size, zero))
run = NULL;
@@ -1100,8 +1089,8 @@ arena_run_alloc_small_helper(tsdn_t *tsdn, arena_t *arena, size_t size,
static arena_run_t *
arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
{
- arena_chunk_t *chunk;
arena_run_t *run;
+ extent_t *extent;
assert(size <= arena_maxrun);
assert(size == PAGE_CEILING(size));
@@ -1115,9 +1104,10 @@ arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
/*
* No usable runs. Create a new chunk from which to allocate the run.
*/
- chunk = arena_chunk_alloc(tsdn, arena);
- if (chunk != NULL) {
- run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
+ extent = arena_chunk_alloc(tsdn, arena);
+ if (extent != NULL) {
+ run = &arena_miscelm_get_mutable(
+ (arena_chunk_t *)extent_addr_get(extent), map_bias)->run;
if (arena_run_split_small(tsdn, arena, iealloc(tsdn, run), run,
size, binind))
run = NULL;
@@ -1420,6 +1410,8 @@ void
arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
{
+ malloc_mutex_assert_owner(tsdn, &arena->lock);
+
/* Don't recursively purge. */
if (arena->purging)
return;
@@ -1484,7 +1476,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (rdelm == &chunkselm->rd) {
extent_t *chunkselm_next;
bool zero;
- UNUSED void *chunk;
+ UNUSED extent_t *extent;
npages = extent_size_get(chunkselm) >> LG_PAGE;
if (opt_purge == purge_mode_decay && arena->ndirty -
@@ -1492,16 +1484,12 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
break;
chunkselm_next = qr_next(chunkselm, cc_link);
- /*
- * Allocate. chunkselm remains valid due to the
- * dalloc_extent=false argument to chunk_alloc_cache().
- */
+ /* Allocate. */
zero = false;
- chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
- extent_addr_get(chunkselm),
- extent_size_get(chunkselm), chunksize, &zero,
- false);
- assert(chunk == extent_addr_get(chunkselm));
+ extent = arena_chunk_cache_alloc_locked(tsdn, arena,
+ chunk_hooks, extent_addr_get(chunkselm),
+ extent_size_get(chunkselm), chunksize, &zero);
+ assert(extent == chunkselm);
assert(zero == extent_zeroed_get(chunkselm));
extent_dirty_insert(chunkselm, purge_runs_sentinel,
purge_chunks_sentinel);
@@ -1510,14 +1498,13 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
chunkselm = chunkselm_next;
} else {
extent_t *extent = iealloc(tsdn, rdelm);
- arena_chunk_t *chunk =
- (arena_chunk_t *)extent_addr_get(extent);
arena_chunk_map_misc_t *miscelm =
arena_rd_to_miscelm(rdelm);
size_t pageind = arena_miscelm_to_pageind(miscelm);
arena_run_t *run = &miscelm->run;
size_t run_size =
- arena_mapbits_unallocated_size_get(chunk, pageind);
+ arena_mapbits_unallocated_size_get((arena_chunk_t *)
+ extent_addr_get(extent), pageind);
npages = run_size >> LG_PAGE;
if (opt_purge == purge_mode_decay && arena->ndirty -
@@ -1525,14 +1512,16 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
break;
assert(pageind + npages <= chunk_npages);
- assert(arena_mapbits_dirty_get(chunk, pageind) ==
- arena_mapbits_dirty_get(chunk, pageind+npages-1));
+ assert(arena_mapbits_dirty_get((arena_chunk_t *)
+ extent_addr_get(extent), pageind) ==
+ arena_mapbits_dirty_get((arena_chunk_t *)
+ extent_addr_get(extent), pageind+npages-1));
/*
* If purging the spare chunk's run, make it available
* prior to allocation.
*/
- if (chunk == arena->spare)
+ if (extent == arena->spare)
arena_chunk_alloc(tsdn, arena);
/* Temporarily allocate the free dirty run. */
@@ -1757,8 +1746,9 @@ arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
}
static void
-arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
+arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, extent_t *extent)
{
+ arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent);
size_t pageind, npages;
cassert(config_prof);
@@ -1773,10 +1763,10 @@ arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
if (arena_mapbits_large_get(chunk, pageind) != 0) {
void *ptr = (void *)((uintptr_t)chunk + (pageind
<< LG_PAGE));
- size_t usize = isalloc(tsd_tsdn(tsd),
- &chunk->extent, ptr, config_prof);
+ size_t usize = isalloc(tsd_tsdn(tsd), extent,
+ ptr, config_prof);
- prof_free(tsd, &chunk->extent, ptr, usize);
+ prof_free(tsd, extent, ptr, usize);
npages = arena_mapbits_large_size_get(chunk,
pageind) >> LG_PAGE;
} else {
@@ -1819,8 +1809,7 @@ arena_reset(tsd_t *tsd, arena_t *arena)
/* Remove large allocations from prof sample set. */
if (config_prof && opt_prof) {
ql_foreach(extent, &arena->achunks, ql_link) {
- arena_achunk_prof_reset(tsd, arena,
- extent_addr_get(extent));
+ arena_achunk_prof_reset(tsd, arena, extent);
}
}
@@ -1845,7 +1834,7 @@ arena_reset(tsd_t *tsd, arena_t *arena)
/* Remove huge allocation from prof sample set. */
if (config_prof && opt_prof)
prof_free(tsd, extent, ptr, usize);
- huge_dalloc(tsd_tsdn(tsd), extent, ptr);
+ huge_dalloc(tsd_tsdn(tsd), extent);
malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
/* Cancel out unwanted effects on stats. */
if (config_stats)
@@ -1883,8 +1872,7 @@ arena_reset(tsd_t *tsd, arena_t *arena)
for (extent = ql_last(&arena->achunks, ql_link); extent != NULL; extent
= ql_last(&arena->achunks, ql_link)) {
ql_remove(&arena->achunks, extent, ql_link);
- arena_chunk_discard(tsd_tsdn(tsd), arena,
- extent_addr_get(extent));
+ arena_chunk_discard(tsd_tsdn(tsd), arena, extent);
}
/* Spare. */
@@ -2078,7 +2066,7 @@ arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
if (size == arena_maxrun) {
assert(run_ind == map_bias);
assert(run_pages == (arena_maxrun >> LG_PAGE));
- arena_chunk_dalloc(tsdn, arena, chunk);
+ arena_chunk_dalloc(tsdn, arena, extent);
}
/*
@@ -3113,10 +3101,12 @@ arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
arena_decay_tick(tsdn, extent_arena_get(extent));
return (false);
- } else {
- return (huge_ralloc_no_move(tsdn, extent, ptr, oldsize,
- usize_min, usize_max, zero));
+ } else if (oldsize >= chunksize && usize_max >= chunksize) {
+ return (huge_ralloc_no_move(tsdn, extent, usize_min, usize_max,
+ zero));
}
+
+ return (true);
}
static void *
@@ -3138,42 +3128,41 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache)
{
void *ret;
- size_t usize;
+ size_t usize, copysize;
usize = s2u(size);
if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
return (NULL);
if (likely(usize <= large_maxclass)) {
- size_t copysize;
-
/* Try to avoid moving the allocation. */
if (!arena_ralloc_no_move(tsdn, extent, ptr, oldsize, usize, 0,
zero))
return (ptr);
+ }
- /*
- * size and oldsize are different enough that we need to move
- * the object. In that case, fall back to allocating new space
- * and copying.
- */
- ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment,
- zero, tcache);
- if (ret == NULL)
- return (NULL);
+ if (oldsize >= chunksize && usize >= chunksize) {
+ return (huge_ralloc(tsdn, arena, extent, usize, alignment, zero,
+ tcache));
+ }
- /*
- * Junk/zero-filling were already done by
- * ipalloc()/arena_malloc().
- */
+ /*
+ * size and oldsize are different enough that we need to move the
+ * object. In that case, fall back to allocating new space and copying.
+ */
+ ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, zero,
+ tcache);
+ if (ret == NULL)
+ return (NULL);
- copysize = (usize < oldsize) ? usize : oldsize;
- memcpy(ret, ptr, copysize);
- isdalloct(tsdn, extent, ptr, oldsize, tcache, true);
- } else {
- ret = huge_ralloc(tsdn, arena, extent, ptr, oldsize, usize,
- alignment, zero, tcache);
- }
+ /*
+ * Junk/zero-filling were already done by
+ * ipalloc()/arena_malloc().
+ */
+
+ copysize = (usize < oldsize) ? usize : oldsize;
+ memcpy(ret, ptr, copysize);
+ isdalloct(tsdn, extent, ptr, oldsize, tcache, true);
return (ret);
}
diff --git a/src/chunk.c b/src/chunk.c
index 59ebd29..4efba4a 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -58,7 +58,8 @@ static void chunk_record(tsdn_t *tsdn, arena_t *arena,
static void
extent_heaps_insert(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
{
- size_t psz = extent_size_quantize_floor(extent_size_get(extent));
+ size_t psz =
+ extent_size_quantize_floor(CHUNK_CEILING(extent_size_get(extent)));
pszind_t pind = psz2ind(psz);
extent_heap_insert(&extent_heaps[pind], extent);
}
@@ -66,7 +67,8 @@ extent_heaps_insert(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
static void
extent_heaps_remove(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
{
- size_t psz = extent_size_quantize_floor(extent_size_get(extent));
+ size_t psz =
+ extent_size_quantize_floor(CHUNK_CEILING(extent_size_get(extent)));
pszind_t pind = psz2ind(psz);
extent_heap_remove(&extent_heaps[pind], extent);
}
@@ -211,7 +213,7 @@ chunk_register(tsdn_t *tsdn, const extent_t *extent)
extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent);
extent_rtree_release(tsdn, elm_a, elm_b);
- if (config_prof && opt_prof) {
+ if (config_prof && opt_prof && extent_active_get(extent)) {
size_t nadd = (extent_size_get(extent) == 0) ? 1 :
extent_size_get(extent) / chunksize;
size_t cur = atomic_add_z(&curchunks, nadd);
@@ -239,7 +241,7 @@ chunk_deregister(tsdn_t *tsdn, const extent_t *extent)
extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL);
extent_rtree_release(tsdn, elm_a, elm_b);
- if (config_prof && opt_prof) {
+ if (config_prof && opt_prof && extent_active_get(extent)) {
size_t nsub = (extent_size_get(extent) == 0) ? 1 :
extent_size_get(extent) / chunksize;
assert(atomic_read_z(&curchunks) >= nsub);
@@ -293,23 +295,15 @@ chunk_leak(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, bool cache,
}
}
-static void *
+static extent_t *
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_heap_t extent_heaps[NPSIZES], bool cache, void *new_addr,
- size_t size, size_t alignment, bool *zero, bool *commit, bool dalloc_extent)
+ size_t size, size_t alignment, bool *zero, bool *commit)
{
- void *ret;
extent_t *extent;
size_t alloc_size, leadsize, trailsize;
- bool zeroed, committed;
assert(new_addr == NULL || alignment == chunksize);
- /*
- * Cached chunks use the extent linkage embedded in their headers, in
- * which case dalloc_extent is true, and new_addr is non-NULL because
- * we're operating on a specific chunk.
- */
- assert(dalloc_extent || new_addr != NULL);
alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
/* Beware size_t wrap-around. */
@@ -338,99 +332,79 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (NULL);
}
+ extent_heaps_remove(extent_heaps, extent);
+ arena_chunk_cache_maybe_remove(arena, extent, cache);
+
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
alignment) - (uintptr_t)extent_addr_get(extent);
assert(new_addr == NULL || leadsize == 0);
assert(extent_size_get(extent) >= leadsize + size);
trailsize = extent_size_get(extent) - leadsize - size;
- ret = (void *)((uintptr_t)extent_addr_get(extent) + leadsize);
- zeroed = extent_zeroed_get(extent);
- if (zeroed)
+ if (extent_zeroed_get(extent))
*zero = true;
- committed = extent_committed_get(extent);
- if (committed)
+ if (extent_committed_get(extent))
*commit = true;
+
/* Split the lead. */
- if (leadsize != 0 &&
- chunk_hooks->split(extent_addr_get(extent),
- extent_size_get(extent), leadsize, size, false, arena->ind)) {
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
- return (NULL);
- }
- /* Remove extent from the heap. */
- chunk_deregister(tsdn, extent);
- extent_heaps_remove(extent_heaps, extent);
- arena_chunk_cache_maybe_remove(arena, extent, cache);
if (leadsize != 0) {
- /* Insert the leading space as a smaller chunk. */
- extent_size_set(extent, leadsize);
- if (chunk_register(tsdn, extent)) {
+ extent_t *lead = extent;
+ extent = chunk_split_wrapper(tsdn, arena, chunk_hooks, lead,
+ leadsize, size + trailsize);
+ if (extent == NULL) {
chunk_leak(tsdn, arena, chunk_hooks, cache,
- extent_addr_get(extent), extent_size_get(extent));
- arena_extent_dalloc(tsdn, arena, extent);
- } else {
- extent_heaps_insert(extent_heaps, extent);
- arena_chunk_cache_maybe_insert(arena, extent, cache);
- }
- extent = NULL;
- }
- if (trailsize != 0) {
- /* Split the trail. */
- if (chunk_hooks->split(ret, size + trailsize, size,
- trailsize, false, arena->ind)) {
- if (dalloc_extent && extent != NULL)
- arena_extent_dalloc(tsdn, arena, extent);
+ extent_addr_get(lead), extent_size_get(lead));
+ arena_extent_dalloc(tsdn, arena, lead);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
- chunk_record(tsdn, arena, chunk_hooks, extent_heaps,
- cache, ret, size + trailsize, zeroed, committed);
return (NULL);
}
- /* Insert the trailing space as a smaller chunk. */
- if (extent == NULL) {
- extent = arena_extent_alloc(tsdn, arena);
- if (extent == NULL) {
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
- chunk_record(tsdn, arena, chunk_hooks,
- extent_heaps, cache, ret, size + trailsize,
- zeroed, committed);
- return (NULL);
- }
- }
- extent_init(extent, arena, (void *)((uintptr_t)(ret) + size),
- trailsize, false, zeroed, committed, false);
- if (chunk_register(tsdn, extent)) {
+ extent_heaps_insert(extent_heaps, lead);
+ arena_chunk_cache_maybe_insert(arena, lead, cache);
+ }
+
+ /* Split the trail. */
+ if (trailsize != 0) {
+ extent_t *trail = chunk_split_wrapper(tsdn, arena, chunk_hooks,
+ extent, size, trailsize);
+ if (trail == NULL) {
chunk_leak(tsdn, arena, chunk_hooks, cache,
extent_addr_get(extent), extent_size_get(extent));
arena_extent_dalloc(tsdn, arena, extent);
- } else {
- extent_heaps_insert(extent_heaps, extent);
- arena_chunk_cache_maybe_insert(arena, extent, cache);
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ return (NULL);
}
- extent = NULL;
+ extent_heaps_insert(extent_heaps, trail);
+ arena_chunk_cache_maybe_insert(arena, trail, cache);
}
- if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
+
+ if (!extent_committed_get(extent) &&
+ chunk_hooks->commit(extent_addr_get(extent),
+ extent_size_get(extent), 0, extent_size_get(extent), arena->ind)) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
- chunk_record(tsdn, arena, chunk_hooks, extent_heaps, cache, ret,
- size, zeroed, committed);
+ chunk_record(tsdn, arena, chunk_hooks, extent_heaps, cache,
+ extent_addr_get(extent), extent_size_get(extent),
+ extent_zeroed_get(extent), extent_committed_get(extent));
+ arena_extent_dalloc(tsdn, arena, extent);
return (NULL);
}
+
+ extent_active_set(extent, true);
+
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
- assert(dalloc_extent || extent != NULL);
- if (dalloc_extent && extent != NULL)
- arena_extent_dalloc(tsdn, arena, extent);
if (*zero) {
- if (!zeroed)
- memset(ret, 0, size);
- else if (config_debug) {
+ if (!extent_zeroed_get(extent)) {
+ memset(extent_addr_get(extent), 0,
+ extent_size_get(extent));
+ } else if (config_debug) {
size_t i;
- size_t *p = (size_t *)(uintptr_t)ret;
+ size_t *p = (size_t *)(uintptr_t)
+ extent_addr_get(extent);
for (i = 0; i < size / sizeof(size_t); i++)
assert(p[i] == 0);
}
}
- return (ret);
+ return (extent);
}
/*
@@ -469,12 +443,11 @@ chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
return (NULL);
}
-void *
+extent_t *
chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *new_addr, size_t size, size_t alignment, bool *zero,
- bool dalloc_extent)
+ void *new_addr, size_t size, size_t alignment, bool *zero)
{
- void *ret;
+ extent_t *extent;
bool commit;
assert(size != 0);
@@ -483,12 +456,12 @@ chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert((alignment & chunksize_mask) == 0);
commit = true;
- ret = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_cached,
- true, new_addr, size, alignment, zero, &commit, dalloc_extent);
- if (ret == NULL)
+ extent = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_cached,
+ true, new_addr, size, alignment, zero, &commit);
+ if (extent == NULL)
return (NULL);
assert(commit);
- return (ret);
+ return (extent);
}
static arena_t *
@@ -523,44 +496,51 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
return (ret);
}
-static void *
+static extent_t *
chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
{
- void *ret;
+ extent_t *extent;
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
- ret = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_retained,
- false, new_addr, size, alignment, zero, commit, true);
+ extent = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_retained,
+ false, new_addr, size, alignment, zero, commit);
- if (config_stats && ret != NULL)
+ if (config_stats && extent != NULL)
arena->stats.retained -= size;
- return (ret);
+ return (extent);
}
-void *
+extent_t *
chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
{
- void *ret;
+ extent_t *extent;
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
- ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
+ extent = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
alignment, zero, commit);
- if (ret == NULL) {
- ret = chunk_hooks->alloc(new_addr, size, alignment, zero,
- commit, arena->ind);
- if (ret == NULL)
+ if (extent == NULL) {
+ void *chunk;
+
+ extent = arena_extent_alloc(tsdn, arena);
+ if (extent == NULL)
+ return (NULL);
+ chunk = chunk_hooks->alloc(new_addr, size, alignment,
+ zero, commit, arena->ind);
+ if (chunk == NULL)
return (NULL);
+ extent_init(extent, arena, chunk, size, true, zero, commit,
+ false);
}
- return (ret);
+ return (extent);
}
static bool
@@ -668,7 +648,6 @@ chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
chunk_record(tsdn, arena, chunk_hooks, arena->chunks_cached, true,
chunk, size, false, committed);
- arena_maybe_purge(tsdn, arena);
}
static bool
@@ -779,6 +758,67 @@ chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
return (false);
}
+extent_t *
+chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ extent_t *extent, size_t size_a, size_t size_b)
+{
+ extent_t *trail;
+ rtree_elm_t *lead_elm_a, *lead_elm_b, *trail_elm_a, *trail_elm_b;
+
+ assert(CHUNK_CEILING(size_a) == size_a);
+ assert(CHUNK_CEILING(extent_size_get(extent)) == size_a +
+ CHUNK_CEILING(size_b));
+
+ chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
+
+ trail = arena_extent_alloc(tsdn, arena);
+ if (trail == NULL)
+ goto label_error_a;
+
+ {
+ extent_t lead;
+
+ extent_init(&lead, arena, extent_addr_get(extent), size_a,
+ extent_active_get(extent), extent_zeroed_get(extent),
+ extent_committed_get(extent), extent_slab_get(extent));
+
+ if (extent_rtree_acquire(tsdn, &lead, false, true, &lead_elm_a,
+ &lead_elm_b))
+ goto label_error_b;
+ }
+
+ extent_init(trail, arena, (void *)((uintptr_t)extent_addr_get(extent) +
+ size_a), CHUNK_CEILING(size_b), extent_active_get(extent),
+ extent_zeroed_get(extent), extent_committed_get(extent),
+ extent_slab_get(extent));
+ if (extent_rtree_acquire(tsdn, trail, false, true, &trail_elm_a,
+ &trail_elm_b))
+ goto label_error_c;
+
+ if (chunk_hooks->split(extent_addr_get(extent), size_a +
+ CHUNK_CEILING(size_b), size_a, CHUNK_CEILING(size_b),
+ extent_committed_get(extent), arena->ind))
+ goto label_error_d;
+
+ extent_size_set(extent, size_a);
+
+ extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent);
+ extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail);
+
+ extent_rtree_release(tsdn, lead_elm_a, lead_elm_b);
+ extent_rtree_release(tsdn, trail_elm_a, trail_elm_b);
+
+ return (trail);
+label_error_d:
+ extent_rtree_release(tsdn, lead_elm_a, lead_elm_b);
+label_error_c:
+ extent_rtree_release(tsdn, lead_elm_a, lead_elm_b);
+label_error_b:
+ arena_extent_dalloc(tsdn, arena, trail);
+label_error_a:
+ return (NULL);
+}
+
static bool
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
bool committed, unsigned arena_ind)
@@ -801,6 +841,7 @@ chunk_merge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
{
rtree_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
+ chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
if (chunk_hooks->merge(extent_addr_get(a), extent_size_get(a),
extent_addr_get(b), extent_size_get(b), extent_committed_get(a),
arena->ind))
diff --git a/src/huge.c b/src/huge.c
index dc0d680..fe4c6e0 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -16,7 +16,6 @@ void *
huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero)
{
- void *ret;
size_t ausize;
extent_t *extent;
bool is_zeroed;
@@ -30,12 +29,6 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
return (NULL);
assert(ausize >= chunksize);
- /* Allocate an extent with which to track the chunk. */
- extent = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_t)),
- CACHELINE, false, NULL, true, arena_ichoose(tsdn, arena));
- if (extent == NULL)
- return (NULL);
-
/*
* Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
* it is possible to make correct junk/zero fill decisions below.
@@ -43,19 +36,17 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
is_zeroed = zero;
if (likely(!tsdn_null(tsdn)))
arena = arena_choose(tsdn_tsd(tsdn), arena);
- if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
- arena, usize, alignment, &is_zeroed)) == NULL) {
- idalloctm(tsdn, iealloc(tsdn, extent), extent, NULL, true,
- true);
+ if (unlikely(arena == NULL) || (extent = arena_chunk_alloc_huge(tsdn,
+ arena, usize, alignment, &is_zeroed)) == NULL)
return (NULL);
- }
- extent_init(extent, arena, ret, usize, true, is_zeroed, true, false);
+ if (usize < extent_size_get(extent))
+ extent_size_set(extent, usize);
if (chunk_register(tsdn, extent)) {
- arena_chunk_dalloc_huge(tsdn, arena, ret, usize);
- idalloctm(tsdn, iealloc(tsdn, extent), extent, NULL, true,
- true);
+ arena_chunk_dalloc_huge(tsdn, arena, extent_addr_get(extent),
+ usize);
+ arena_extent_dalloc(tsdn, arena, extent);
return (NULL);
}
@@ -67,12 +58,12 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed)
- memset(ret, 0, usize);
+ memset(extent_addr_get(extent), 0, usize);
} else if (config_fill && unlikely(opt_junk_alloc))
- memset(ret, JEMALLOC_ALLOC_JUNK, usize);
+ memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK, usize);
arena_decay_tick(tsdn, arena);
- return (ret);
+ return (extent_addr_get(extent));
}
#ifdef JEMALLOC_JET
@@ -99,11 +90,12 @@ huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
#endif
static void
-huge_ralloc_no_move_similar(tsdn_t *tsdn, extent_t *extent, void *ptr,
- size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
+huge_ralloc_no_move_similar(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
+ size_t usize_max, bool zero)
{
size_t usize, usize_next;
- arena_t *arena;
+ arena_t *arena = extent_arena_get(extent);
+ size_t oldsize = extent_size_get(extent);
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
bool pre_zeroed, post_zeroed;
@@ -115,20 +107,19 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, extent_t *extent, void *ptr,
if (oldsize == usize)
return;
- arena = extent_arena_get(extent);
pre_zeroed = extent_zeroed_get(extent);
/* Fill if necessary (shrinking). */
if (oldsize > usize) {
size_t sdiff = oldsize - usize;
if (config_fill && unlikely(opt_junk_free)) {
- memset((void *)((uintptr_t)ptr + usize),
- JEMALLOC_FREE_JUNK, sdiff);
+ memset((void *)((uintptr_t)extent_addr_get(extent) +
+ usize), JEMALLOC_FREE_JUNK, sdiff);
post_zeroed = false;
} else {
post_zeroed = !chunk_purge_wrapper(tsdn, arena,
- &chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize,
- sdiff);
+ &chunk_hooks, extent_addr_get(extent),
+ CHUNK_CEILING(oldsize), usize, sdiff);
}
} else
post_zeroed = pre_zeroed;
@@ -143,132 +134,157 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, extent_t *extent, void *ptr,
/* Update zeroed. */
extent_zeroed_set(extent, post_zeroed);
- arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
+ arena_chunk_ralloc_huge_similar(tsdn, arena, extent, oldsize);
/* Fill if necessary (growing). */
if (oldsize < usize) {
if (zero || (config_fill && unlikely(opt_zero))) {
if (!pre_zeroed) {
- memset((void *)((uintptr_t)ptr + oldsize), 0,
- usize - oldsize);
+ memset((void *)
+ ((uintptr_t)extent_addr_get(extent) +
+ oldsize), 0, usize - oldsize);
}
} else if (config_fill && unlikely(opt_junk_alloc)) {
- memset((void *)((uintptr_t)ptr + oldsize),
- JEMALLOC_ALLOC_JUNK, usize - oldsize);
+ memset((void *)((uintptr_t)extent_addr_get(extent) +
+ oldsize), JEMALLOC_ALLOC_JUNK, usize - oldsize);
}
}
}
static bool
-huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, void *ptr,
- size_t oldsize, size_t usize)
+huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
{
- arena_t *arena;
- chunk_hooks_t chunk_hooks;
- size_t cdiff;
- bool pre_zeroed, post_zeroed;
-
- arena = extent_arena_get(extent);
- pre_zeroed = extent_zeroed_get(extent);
- chunk_hooks = chunk_hooks_get(tsdn, arena);
+ arena_t *arena = extent_arena_get(extent);
+ size_t oldsize = extent_size_get(extent);
+ chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
+ size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
+ size_t sdiff = CHUNK_CEILING(usize) - usize;
assert(oldsize > usize);
/* Split excess chunks. */
- cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
- if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
- CHUNK_CEILING(usize), cdiff, true, arena->ind))
- return (true);
+ if (cdiff != 0) {
+ extent_t *trail = chunk_split_wrapper(tsdn, arena, &chunk_hooks,
+ extent, CHUNK_CEILING(usize), cdiff);
+ if (trail == NULL)
+ return (true);
- if (oldsize > usize) {
- size_t sdiff = oldsize - usize;
if (config_fill && unlikely(opt_junk_free)) {
- huge_dalloc_junk(tsdn, (void *)((uintptr_t)ptr + usize),
- sdiff);
+ huge_dalloc_junk(tsdn, extent_addr_get(trail),
+ extent_size_get(trail));
+ }
+
+ arena_chunk_cache_dalloc(tsdn, arena, &chunk_hooks,
+ extent_addr_get(trail), extent_size_get(trail),
+ extent_committed_get(trail));
+
+ arena_extent_dalloc(tsdn, arena, trail);
+ }
+
+ /* Optionally fill trailing subchunk. */
+ if (sdiff != 0) {
+ bool post_zeroed;
+
+ if (config_fill && unlikely(opt_junk_free)) {
+ huge_dalloc_junk(tsdn,
+ (void *)((uintptr_t)extent_addr_get(extent) +
+ usize), sdiff);
post_zeroed = false;
} else {
post_zeroed = !chunk_purge_wrapper(tsdn, arena,
- &chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr +
- usize), CHUNK_CEILING(oldsize),
- CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
- }
- } else
- post_zeroed = pre_zeroed;
+ &chunk_hooks, extent_addr_get(extent),
+ CHUNK_CEILING(usize), usize, sdiff);
- /* Update the size of the huge allocation. */
- chunk_deregister(tsdn, extent);
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
- extent_size_set(extent, usize);
- /* Update zeroed. */
- extent_zeroed_set(extent, post_zeroed);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
- chunk_reregister(tsdn, extent);
+ if (config_fill && unlikely(opt_zero) && !post_zeroed) {
+ memset((void *)
+ ((uintptr_t)extent_addr_get(extent) +
+ usize), 0, sdiff);
+ }
+ }
+ extent_zeroed_set(extent, post_zeroed);
+ }
- /* Zap the excess chunks. */
- arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize);
+ arena_chunk_ralloc_huge_shrink(tsdn, arena, extent, oldsize);
return (false);
}
static bool
-huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, void *ptr,
- size_t oldsize, size_t usize, bool zero)
+huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
+ bool zero)
{
- arena_t *arena;
- bool is_zeroed_subchunk;
-
- arena = extent_arena_get(extent);
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
- is_zeroed_subchunk = extent_zeroed_get(extent);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+ arena_t *arena = extent_arena_get(extent);
+ size_t oldsize = extent_size_get(extent);
+ bool is_zeroed_subchunk = extent_zeroed_get(extent);
+ bool is_zeroed_chunk = false;
+ chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
+ size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
+ void *nchunk =
+ (void *)CHUNK_CEILING((uintptr_t)extent_past_get(extent));
+ extent_t *trail;
+
+ if ((trail = arena_chunk_cache_alloc(tsdn, arena, &chunk_hooks, nchunk,
+ cdiff, chunksize, &is_zeroed_chunk)) == NULL) {
+ bool commit = true;
+ if ((trail = chunk_alloc_wrapper(tsdn, arena, &chunk_hooks,
+ nchunk, cdiff, chunksize, &is_zeroed_chunk, &commit)) ==
+ NULL)
+ return (true);
+ }
- if (arena_chunk_ralloc_huge_expand(tsdn, arena, extent, usize))
+ if (chunk_merge_wrapper(tsdn, arena, &chunk_hooks, extent, trail)) {
+ arena_extent_dalloc(tsdn, arena, trail);
+ chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks,
+ extent_addr_get(trail), extent_size_get(trail),
+ extent_zeroed_get(trail), extent_committed_get(trail));
return (true);
+ }
if (zero || (config_fill && unlikely(opt_zero))) {
- bool is_zeroed_chunk = extent_zeroed_get(extent);
-
if (!is_zeroed_subchunk) {
- memset((void *)((uintptr_t)ptr + oldsize), 0,
- CHUNK_CEILING(oldsize) - oldsize);
+ memset((void *)((uintptr_t)extent_addr_get(extent) +
+ oldsize), 0, CHUNK_CEILING(oldsize) - oldsize);
}
if (!is_zeroed_chunk) {
- memset((void *)((uintptr_t)ptr +
+ memset((void *)((uintptr_t)extent_addr_get(extent) +
CHUNK_CEILING(oldsize)), 0, usize -
CHUNK_CEILING(oldsize));
}
} else if (config_fill && unlikely(opt_junk_alloc)) {
- memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK,
- usize - oldsize);
+ memset((void *)((uintptr_t)extent_addr_get(extent) + oldsize),
+ JEMALLOC_ALLOC_JUNK, usize - oldsize);
}
+ if (usize < extent_size_get(extent))
+ extent_size_set(extent, usize);
+
+ arena_chunk_ralloc_huge_expand(tsdn, arena, extent, oldsize);
+
return (false);
}
bool
-huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
- size_t usize_min, size_t usize_max, bool zero)
+huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
+ size_t usize_max, bool zero)
{
- assert(s2u(oldsize) == oldsize);
+ assert(s2u(extent_size_get(extent)) == extent_size_get(extent));
/* The following should have been caught by callers. */
assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
+ /* Both allocation sizes must be huge to avoid a move. */
+ assert(extent_size_get(extent) >= chunksize && usize_max >= chunksize);
- /* Both allocations must be huge to avoid a move. */
- if (oldsize < chunksize || usize_max < chunksize)
- return (true);
-
- if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
+ if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(extent_size_get(extent))) {
/* Attempt to expand the allocation in-place. */
- if (!huge_ralloc_no_move_expand(tsdn, extent, ptr, oldsize,
- usize_max, zero)) {
+ if (!huge_ralloc_no_move_expand(tsdn, extent, usize_max,
+ zero)) {
arena_decay_tick(tsdn, extent_arena_get(extent));
return (false);
}
/* Try again, this time with usize_min. */
if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
- CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn,
- extent, ptr, oldsize, usize_min, zero)) {
+ CHUNK_CEILING(extent_size_get(extent)) &&
+ huge_ralloc_no_move_expand(tsdn, extent, usize_min, zero)) {
arena_decay_tick(tsdn, extent_arena_get(extent));
return (false);
}
@@ -278,18 +294,18 @@ huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
* Avoid moving the allocation if the existing chunk size accommodates
* the new size.
*/
- if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
- && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
- huge_ralloc_no_move_similar(tsdn, extent, ptr, oldsize,
- usize_min, usize_max, zero);
+ if (CHUNK_CEILING(extent_size_get(extent)) >= CHUNK_CEILING(usize_min)
+ && CHUNK_CEILING(extent_size_get(extent)) <=
+ CHUNK_CEILING(usize_max)) {
+ huge_ralloc_no_move_similar(tsdn, extent, usize_min, usize_max,
+ zero);
arena_decay_tick(tsdn, extent_arena_get(extent));
return (false);
}
/* Attempt to shrink the allocation in-place. */
- if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
- if (!huge_ralloc_no_move_shrink(tsdn, extent, ptr, oldsize,
- usize_max)) {
+ if (CHUNK_CEILING(extent_size_get(extent)) > CHUNK_CEILING(usize_max)) {
+ if (!huge_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
arena_decay_tick(tsdn, extent_arena_get(extent));
return (false);
}
@@ -308,22 +324,23 @@ huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
}
void *
-huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
- size_t oldsize, size_t usize, size_t alignment, bool zero, tcache_t *tcache)
+huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache)
{
void *ret;
size_t copysize;
/* The following should have been caught by callers. */
assert(usize > 0 && usize <= HUGE_MAXCLASS);
+ /* Both allocation sizes must be huge to avoid a move. */
+ assert(extent_size_get(extent) >= chunksize && usize >= chunksize);
/* Try to avoid moving the allocation. */
- if (!huge_ralloc_no_move(tsdn, extent, ptr, oldsize, usize, usize,
- zero))
- return (ptr);
+ if (!huge_ralloc_no_move(tsdn, extent, usize, usize, zero))
+ return (extent_addr_get(extent));
/*
- * usize and oldsize are different enough that we need to use a
+ * usize and old size are different enough that we need to use a
* different size class. In that case, fall back to allocating new
* space and copying.
*/
@@ -331,14 +348,16 @@ huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
if (ret == NULL)
return (NULL);
- copysize = (usize < oldsize) ? usize : oldsize;
- memcpy(ret, ptr, copysize);
- isdalloct(tsdn, extent, ptr, oldsize, tcache, true);
+ copysize = (usize < extent_size_get(extent)) ? usize :
+ extent_size_get(extent);
+ memcpy(ret, extent_addr_get(extent), copysize);
+ isdalloct(tsdn, extent, extent_addr_get(extent),
+ extent_size_get(extent), tcache, true);
return (ret);
}
void
-huge_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr)
+huge_dalloc(tsdn_t *tsdn, extent_t *extent)
{
arena_t *arena;
@@ -352,13 +371,13 @@ huge_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr)
extent_size_get(extent));
arena_chunk_dalloc_huge(tsdn, extent_arena_get(extent),
extent_addr_get(extent), extent_size_get(extent));
- idalloctm(tsdn, iealloc(tsdn, extent), extent, NULL, true, true);
+ arena_extent_dalloc(tsdn, arena, extent);
arena_decay_tick(tsdn, arena);
}
size_t
-huge_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
+huge_salloc(tsdn_t *tsdn, const extent_t *extent)
{
size_t size;
arena_t *arena;
@@ -372,13 +391,11 @@ huge_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
}
prof_tctx_t *
-huge_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
+huge_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent)
{
prof_tctx_t *tctx;
arena_t *arena;
- assert(extent == iealloc(tsdn, ptr));
-
arena = extent_arena_get(extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
tctx = extent_prof_tctx_get(extent);
@@ -388,13 +405,10 @@ huge_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
}
void
-huge_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
- prof_tctx_t *tctx)
+huge_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx)
{
arena_t *arena;
- assert(extent == iealloc(tsdn, ptr));
-
arena = extent_arena_get(extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
extent_prof_tctx_set(extent, tctx);
@@ -402,8 +416,8 @@ huge_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
}
void
-huge_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr)
+huge_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent)
{
- huge_prof_tctx_set(tsdn, extent, ptr, (prof_tctx_t *)(uintptr_t)1U);
+ huge_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U);
}