summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/arena.c8
-rw-r--r--src/chunk.c47
-rw-r--r--src/huge.c74
3 files changed, 101 insertions, 28 deletions
diff --git a/src/arena.c b/src/arena.c
index c223946..b7300a9 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -450,7 +450,7 @@ arena_chunk_alloc_internal(arena_t *arena, size_t size, size_t alignment,
chunk_dalloc = arena->chunk_dalloc;
malloc_mutex_unlock(&arena->lock);
chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc,
- arena->ind, size, alignment, zero);
+ arena->ind, NULL, size, alignment, zero);
malloc_mutex_lock(&arena->lock);
if (config_stats && chunk != NULL)
arena->stats.mapped += chunksize;
@@ -459,8 +459,8 @@ arena_chunk_alloc_internal(arena_t *arena, size_t size, size_t alignment,
}
void *
-arena_chunk_alloc_huge(arena_t *arena, size_t size, size_t alignment,
- bool *zero)
+arena_chunk_alloc_huge(arena_t *arena, void *new_addr, size_t size,
+ size_t alignment, bool *zero)
{
void *ret;
chunk_alloc_t *chunk_alloc;
@@ -480,7 +480,7 @@ arena_chunk_alloc_huge(arena_t *arena, size_t size, size_t alignment,
malloc_mutex_unlock(&arena->lock);
ret = chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind,
- size, alignment, zero);
+ new_addr, size, alignment, zero);
if (config_stats) {
if (ret != NULL)
stats_cactive_add(size);
diff --git a/src/chunk.c b/src/chunk.c
index cde8606..32b8b3a 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -42,8 +42,8 @@ static void chunk_dalloc_core(void *chunk, size_t size);
/******************************************************************************/
static void *
-chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
- size_t alignment, bool base, bool *zero)
+chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad,
+ void *new_addr, size_t size, size_t alignment, bool base, bool *zero)
{
void *ret;
extent_node_t *node;
@@ -65,11 +65,11 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
- key.addr = NULL;
+ key.addr = new_addr;
key.size = alloc_size;
malloc_mutex_lock(&chunks_mtx);
node = extent_tree_szad_nsearch(chunks_szad, &key);
- if (node == NULL) {
+ if (node == NULL || (new_addr && node->addr != new_addr)) {
malloc_mutex_unlock(&chunks_mtx);
return (NULL);
}
@@ -142,8 +142,8 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
* them if they are returned.
*/
static void *
-chunk_alloc_core(size_t size, size_t alignment, bool base, bool *zero,
- dss_prec_t dss_prec)
+chunk_alloc_core(void *new_addr, size_t size, size_t alignment, bool base,
+ bool *zero, dss_prec_t dss_prec)
{
void *ret;
@@ -154,24 +154,30 @@ chunk_alloc_core(size_t size, size_t alignment, bool base, bool *zero,
/* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary) {
- if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
- alignment, base, zero)) != NULL)
+ if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss,
+ new_addr, size, alignment, base, zero)) != NULL)
return (ret);
- if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
+ /* requesting an address only implemented for recycle */
+ if (new_addr == NULL
+ && (ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
return (ret);
}
/* mmap. */
- if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size,
- alignment, base, zero)) != NULL)
+ if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, new_addr,
+ size, alignment, base, zero)) != NULL)
return (ret);
- if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
+ /* requesting an address only implemented for recycle */
+ if (new_addr == NULL &&
+ (ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
return (ret);
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary) {
- if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
- alignment, base, zero)) != NULL)
+ if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss,
+ new_addr, size, alignment, base, zero)) != NULL)
return (ret);
- if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
+ /* requesting an address only implemented for recycle */
+ if (new_addr == NULL &&
+ (ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
return (ret);
}
@@ -219,7 +225,7 @@ chunk_alloc_base(size_t size)
bool zero;
zero = false;
- ret = chunk_alloc_core(size, chunksize, true, &zero,
+ ret = chunk_alloc_core(NULL, size, chunksize, true, &zero,
chunk_dss_prec_get());
if (ret == NULL)
return (NULL);
@@ -232,11 +238,12 @@ chunk_alloc_base(size_t size)
void *
chunk_alloc_arena(chunk_alloc_t *chunk_alloc, chunk_dalloc_t *chunk_dalloc,
- unsigned arena_ind, size_t size, size_t alignment, bool *zero)
+ unsigned arena_ind, void *new_addr, size_t size, size_t alignment,
+ bool *zero)
{
void *ret;
- ret = chunk_alloc(size, alignment, zero, arena_ind);
+ ret = chunk_alloc(new_addr, size, alignment, zero, arena_ind);
if (ret != NULL && chunk_register(ret, size, false)) {
chunk_dalloc(ret, size, arena_ind);
ret = NULL;
@@ -247,11 +254,11 @@ chunk_alloc_arena(chunk_alloc_t *chunk_alloc, chunk_dalloc_t *chunk_dalloc,
/* Default arena chunk allocation routine in the absence of user override. */
void *
-chunk_alloc_default(size_t size, size_t alignment, bool *zero,
+chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
unsigned arena_ind)
{
- return (chunk_alloc_core(size, alignment, false, zero,
+ return (chunk_alloc_core(new_addr, size, alignment, false, zero,
arenas[arena_ind]->dss_prec));
}
diff --git a/src/huge.c b/src/huge.c
index 2f059b4..6bdc076 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -47,7 +47,7 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
*/
is_zeroed = zero;
arena = choose_arena(tsd, arena);
- ret = arena_chunk_alloc_huge(arena, csize, alignment, &is_zeroed);
+ ret = arena_chunk_alloc_huge(arena, NULL, csize, alignment, &is_zeroed);
if (ret == NULL) {
base_node_dalloc(node);
return (NULL);
@@ -95,8 +95,66 @@ huge_dalloc_junk(void *ptr, size_t usize)
huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
#endif
+static bool
+huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
+ size_t csize;
+ void *expand_addr;
+ size_t expand_size;
+ extent_node_t *node, key;
+ arena_t *arena;
+ bool is_zeroed;
+ void *ret;
+
+ csize = CHUNK_CEILING(size);
+ if (csize == 0) {
+ /* size is large enough to cause size_t wrap-around. */
+ return (true);
+ }
+
+ expand_addr = ptr + oldsize;
+ expand_size = csize - oldsize;
+
+ malloc_mutex_lock(&huge_mtx);
+
+ key.addr = ptr;
+ node = extent_tree_ad_search(&huge, &key);
+ assert(node != NULL);
+ assert(node->addr == ptr);
+
+ /* Find the current arena. */
+ arena = node->arena;
+
+ malloc_mutex_unlock(&huge_mtx);
+
+ /*
+ * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
+ * it is possible to make correct junk/zero fill decisions below.
+ */
+ is_zeroed = zero;
+ ret = arena_chunk_alloc_huge(arena, expand_addr, expand_size, chunksize,
+ &is_zeroed);
+ if (ret == NULL)
+ return (true);
+
+ assert(ret == expand_addr);
+
+ malloc_mutex_lock(&huge_mtx);
+ /* Update the size of the huge allocation. */
+ node->size = csize;
+ malloc_mutex_unlock(&huge_mtx);
+
+ if (config_fill && !zero) {
+ if (unlikely(opt_junk))
+ memset(expand_addr, 0xa5, expand_size);
+ else if (unlikely(opt_zero) && !is_zeroed)
+ memset(expand_addr, 0, expand_size);
+ }
+ return (false);
+}
+
bool
-huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
+huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
+ bool zero)
{
/* Both allocations must be huge to avoid a move. */
@@ -145,7 +203,15 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
return (false);
}
- return (true);
+ /* Attempt to expand the allocation in-place. */
+ if (huge_ralloc_no_move_expand(ptr, oldsize, size + extra, zero)) {
+ if (extra == 0)
+ return (true);
+
+ /* Try again, this time without extra. */
+ return (huge_ralloc_no_move_expand(ptr, oldsize, size, zero));
+ }
+ return (false);
}
void *
@@ -156,7 +222,7 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t copysize;
/* Try to avoid moving the allocation. */
- if (!huge_ralloc_no_move(ptr, oldsize, size, extra))
+ if (!huge_ralloc_no_move(ptr, oldsize, size, extra, zero))
return (ptr);
/*