summaryrefslogtreecommitdiffstats
path: root/src/huge.c
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2015-09-15 22:55:37 (GMT)
committerJason Evans <jasone@canonware.com>2015-09-15 22:55:37 (GMT)
commit9898051fd16ebd5d29dc27aae5e069fe04486ff3 (patch)
treeadeab60b769c0dffa0782bd46eb60e741b4a2ff9 /src/huge.c
parent6e98caf8f064482b9ab292ef3638dea67420bbc2 (diff)
parent1d7540c9d71ee8a85ea97c9459698e090ee04719 (diff)
downloadjemalloc-4.0.1.zip
jemalloc-4.0.1.tar.gz
jemalloc-4.0.1.tar.bz2
Merge branch 'dev'4.0.1
Diffstat (limited to 'src/huge.c')
-rw-r--r--src/huge.c130
1 files changed, 61 insertions, 69 deletions
diff --git a/src/huge.c b/src/huge.c
index 54c2114..f8778db 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -126,18 +126,19 @@ huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
#endif
static void
-huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
- size_t size, size_t extra, bool zero)
+huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
+ size_t usize_max, bool zero)
{
- size_t usize_next;
+ size_t usize, usize_next;
extent_node_t *node;
arena_t *arena;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
bool zeroed;
/* Increase usize to incorporate extra. */
- while (usize < s2u(size+extra) && (usize_next = s2u(usize+1)) < oldsize)
- usize = usize_next;
+ for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
+ <= oldsize; usize = usize_next)
+ ; /* Do nothing. */
if (oldsize == usize)
return;
@@ -148,11 +149,12 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
/* Fill if necessary (shrinking). */
if (oldsize > usize) {
size_t sdiff = oldsize - usize;
- zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, ptr,
- CHUNK_CEILING(usize), usize, sdiff);
if (config_fill && unlikely(opt_junk_free)) {
memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
zeroed = false;
+ } else {
+ zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, ptr,
+ CHUNK_CEILING(oldsize), usize, sdiff);
}
} else
zeroed = true;
@@ -194,6 +196,8 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
arena = extent_node_arena_get(node);
chunk_hooks = chunk_hooks_get(arena);
+ assert(oldsize > usize);
+
/* Split excess chunks. */
cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
@@ -202,14 +206,15 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
if (oldsize > usize) {
size_t sdiff = oldsize - usize;
- zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
- CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
- CHUNK_CEILING(usize), CHUNK_ADDR2OFFSET((uintptr_t)ptr +
- usize), sdiff);
if (config_fill && unlikely(opt_junk_free)) {
huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
sdiff);
zeroed = false;
+ } else {
+ zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
+ CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
+ CHUNK_CEILING(oldsize),
+ CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
}
} else
zeroed = true;
@@ -228,18 +233,11 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
}
static bool
-huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
- size_t usize;
+huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
extent_node_t *node;
arena_t *arena;
bool is_zeroed_subchunk, is_zeroed_chunk;
- usize = s2u(size);
- if (usize == 0) {
- /* size_t overflow. */
- return (true);
- }
-
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
malloc_mutex_lock(&arena->huge_mtx);
@@ -280,89 +278,76 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
}
bool
-huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
- bool zero)
+huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
+ size_t usize_max, bool zero)
{
- size_t usize;
+
+ assert(s2u(oldsize) == oldsize);
/* Both allocations must be huge to avoid a move. */
- if (oldsize < chunksize)
+ if (oldsize < chunksize || usize_max < chunksize)
return (true);
- assert(s2u(oldsize) == oldsize);
- usize = s2u(size);
- if (usize == 0) {
- /* size_t overflow. */
- return (true);
+ if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
+ /* Attempt to expand the allocation in-place. */
+ if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, zero))
+ return (false);
+ /* Try again, this time with usize_min. */
+ if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
+ CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
+ oldsize, usize_min, zero))
+ return (false);
}
/*
* Avoid moving the allocation if the existing chunk size accommodates
* the new size.
*/
- if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)
- && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(s2u(size+extra))) {
- huge_ralloc_no_move_similar(ptr, oldsize, usize, size, extra,
+ if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
+ && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
+ huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
zero);
return (false);
}
/* Attempt to shrink the allocation in-place. */
- if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize))
- return (huge_ralloc_no_move_shrink(ptr, oldsize, usize));
+ if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max))
+ return (huge_ralloc_no_move_shrink(ptr, oldsize, usize_max));
+ return (true);
+}
- /* Attempt to expand the allocation in-place. */
- if (huge_ralloc_no_move_expand(ptr, oldsize, size + extra, zero)) {
- if (extra == 0)
- return (true);
+static void *
+huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache)
+{
- /* Try again, this time without extra. */
- return (huge_ralloc_no_move_expand(ptr, oldsize, size, zero));
- }
- return (false);
+ if (alignment <= chunksize)
+ return (huge_malloc(tsd, arena, usize, zero, tcache));
+ return (huge_palloc(tsd, arena, usize, alignment, zero, tcache));
}
void *
-huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
- size_t extra, size_t alignment, bool zero, tcache_t *tcache)
+huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache)
{
void *ret;
size_t copysize;
/* Try to avoid moving the allocation. */
- if (!huge_ralloc_no_move(ptr, oldsize, size, extra, zero))
+ if (!huge_ralloc_no_move(ptr, oldsize, usize, usize, zero))
return (ptr);
/*
- * size and oldsize are different enough that we need to use a
+ * usize and oldsize are different enough that we need to use a
* different size class. In that case, fall back to allocating new
* space and copying.
*/
- if (alignment > chunksize) {
- ret = huge_palloc(tsd, arena, size + extra, alignment, zero,
- tcache);
- } else
- ret = huge_malloc(tsd, arena, size + extra, zero, tcache);
-
- if (ret == NULL) {
- if (extra == 0)
- return (NULL);
- /* Try again, this time without extra. */
- if (alignment > chunksize) {
- ret = huge_palloc(tsd, arena, size, alignment, zero,
- tcache);
- } else
- ret = huge_malloc(tsd, arena, size, zero, tcache);
-
- if (ret == NULL)
- return (NULL);
- }
+ ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero,
+ tcache);
+ if (ret == NULL)
+ return (NULL);
- /*
- * Copy at most size bytes (not size+extra), since the caller has no
- * expectation that the extra bytes will be reliably preserved.
- */
- copysize = (size < oldsize) ? size : oldsize;
+ copysize = (usize < oldsize) ? usize : oldsize;
memcpy(ret, ptr, copysize);
isqalloc(tsd, ptr, oldsize, tcache);
return (ret);
@@ -439,3 +424,10 @@ huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
extent_node_prof_tctx_set(node, tctx);
malloc_mutex_unlock(&arena->huge_mtx);
}
+
+void
+huge_prof_tctx_reset(const void *ptr)
+{
+
+ huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
+}