diff options
author | Jason Evans <je@fb.com> | 2014-04-15 19:09:48 (GMT) |
---|---|---|
committer | Jason Evans <je@fb.com> | 2014-04-15 19:09:48 (GMT) |
commit | 4d434adb146375ad17f0d5e994ed5728d2942e3f (patch) | |
tree | f819a7cef393ab203a738a1da01cb6223675f8de /src | |
parent | 644d414bc9ab52efbbf7ebeb350170106ec1f937 (diff) | |
download | jemalloc-4d434adb146375ad17f0d5e994ed5728d2942e3f.zip jemalloc-4d434adb146375ad17f0d5e994ed5728d2942e3f.tar.gz jemalloc-4d434adb146375ad17f0d5e994ed5728d2942e3f.tar.bz2 |
Make dss non-optional, and fix an "arena.<i>.dss" mallctl bug.
Make dss non-optional on all platforms which support sbrk(2).
Fix the "arena.<i>.dss" mallctl to return an error if "primary" or
"secondary" precedence is specified, but sbrk(2) is not supported.
Diffstat (limited to 'src')
-rw-r--r-- | src/arena.c | 5 | ||||
-rw-r--r-- | src/chunk.c | 8 | ||||
-rw-r--r-- | src/chunk_dss.c | 20 | ||||
-rw-r--r-- | src/ctl.c | 6 | ||||
-rw-r--r-- | src/huge.c | 6 |
5 files changed, 22 insertions, 23 deletions
diff --git a/src/arena.c b/src/arena.c index d574100..8aa36fd 100644 --- a/src/arena.c +++ b/src/arena.c @@ -2243,13 +2243,16 @@ arena_dss_prec_get(arena_t *arena) return (ret); } -void +bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) { + if (have_dss == false) + return (dss_prec != dss_prec_disabled); malloc_mutex_lock(&arena->lock); arena->dss_prec = dss_prec; malloc_mutex_unlock(&arena->lock); + return (false); } void diff --git a/src/chunk.c b/src/chunk.c index 90ab116..fdd693e 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -153,7 +153,7 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero, assert((alignment & chunksize_mask) == 0); /* "primary" dss. */ - if (config_dss && dss_prec == dss_prec_primary) { + if (have_dss && dss_prec == dss_prec_primary) { if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size, alignment, base, zero)) != NULL) goto label_return; @@ -167,7 +167,7 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero, if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL) goto label_return; /* "secondary" dss. */ - if (config_dss && dss_prec == dss_prec_secondary) { + if (have_dss && dss_prec == dss_prec_secondary) { if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size, alignment, base, zero)) != NULL) goto label_return; @@ -305,7 +305,7 @@ chunk_unmap(void *chunk, size_t size) assert(size != 0); assert((size & chunksize_mask) == 0); - if (config_dss && chunk_in_dss(chunk)) + if (have_dss && chunk_in_dss(chunk)) chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size); else if (chunk_dealloc_mmap(chunk, size)) chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size); @@ -348,7 +348,7 @@ chunk_boot(void) return (true); memset(&stats_chunks, 0, sizeof(chunk_stats_t)); } - if (config_dss && chunk_dss_boot()) + if (have_dss && chunk_dss_boot()) return (true); extent_tree_szad_new(&chunks_szad_mmap); extent_tree_ad_new(&chunks_ad_mmap); diff --git a/src/chunk_dss.c b/src/chunk_dss.c index 510bb8b..36133f1 100644 --- a/src/chunk_dss.c +++ b/src/chunk_dss.c @@ -32,7 +32,7 @@ static void * chunk_dss_sbrk(intptr_t increment) { -#ifdef JEMALLOC_HAVE_SBRK +#ifdef JEMALLOC_DSS return (sbrk(increment)); #else not_implemented(); @@ -45,7 +45,7 @@ chunk_dss_prec_get(void) { dss_prec_t ret; - if (config_dss == false) + if (have_dss == false) return (dss_prec_disabled); malloc_mutex_lock(&dss_mtx); ret = dss_prec_default; @@ -57,8 +57,8 @@ bool chunk_dss_prec_set(dss_prec_t dss_prec) { - if (config_dss == false) - return (true); + if (have_dss == false) + return (dss_prec != dss_prec_disabled); malloc_mutex_lock(&dss_mtx); dss_prec_default = dss_prec; malloc_mutex_unlock(&dss_mtx); @@ -70,7 +70,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero) { void *ret; - cassert(config_dss); + cassert(have_dss); assert(size > 0 && (size & chunksize_mask) == 0); assert(alignment > 0 && (alignment & chunksize_mask) == 0); @@ -143,7 +143,7 @@ chunk_in_dss(void *chunk) { bool ret; - cassert(config_dss); + cassert(have_dss); malloc_mutex_lock(&dss_mtx); if ((uintptr_t)chunk >= (uintptr_t)dss_base @@ -160,7 +160,7 @@ bool chunk_dss_boot(void) { - cassert(config_dss); + cassert(have_dss); if (malloc_mutex_init(&dss_mtx)) return (true); @@ -175,7 +175,7 @@ void chunk_dss_prefork(void) { - if (config_dss) + if (have_dss) malloc_mutex_prefork(&dss_mtx); } @@ -183,7 +183,7 @@ void chunk_dss_postfork_parent(void) { - if (config_dss) + if (have_dss) malloc_mutex_postfork_parent(&dss_mtx); } @@ -191,7 +191,7 @@ void chunk_dss_postfork_child(void) { - if (config_dss) + if (have_dss) malloc_mutex_postfork_child(&dss_mtx); } @@ -74,7 +74,6 @@ CTL_PROTO(thread_allocatedp) CTL_PROTO(thread_deallocated) CTL_PROTO(thread_deallocatedp) CTL_PROTO(config_debug) -CTL_PROTO(config_dss) CTL_PROTO(config_fill) CTL_PROTO(config_lazy_lock) CTL_PROTO(config_mremap) @@ -213,7 +212,6 @@ static const ctl_named_node_t thread_node[] = { static const ctl_named_node_t config_node[] = { {NAME("debug"), CTL(config_debug)}, - {NAME("dss"), CTL(config_dss)}, {NAME("fill"), CTL(config_fill)}, {NAME("lazy_lock"), CTL(config_lazy_lock)}, {NAME("mremap"), CTL(config_mremap)}, @@ -1136,7 +1134,6 @@ label_return: /******************************************************************************/ CTL_RO_BOOL_CONFIG_GEN(config_debug) -CTL_RO_BOOL_CONFIG_GEN(config_dss) CTL_RO_BOOL_CONFIG_GEN(config_fill) CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock) CTL_RO_BOOL_CONFIG_GEN(config_mremap) @@ -1356,8 +1353,7 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, arena_t *arena = arenas[arena_ind]; if (arena != NULL) { dss_prec_old = arena_dss_prec_get(arena); - arena_dss_prec_set(arena, dss_prec); - err = false; + err = arena_dss_prec_set(arena, dss_prec); } else err = true; } else { @@ -140,7 +140,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, * Use mremap(2) if this is a huge-->huge reallocation, and neither the * source nor the destination are in dss. */ - if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr) + if (oldsize >= chunksize && (have_dss == false || (chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false))) { size_t newsize = huge_salloc(ret); @@ -198,12 +198,12 @@ static void huge_dalloc_junk(void *ptr, size_t usize) { - if (config_fill && config_dss && opt_junk) { + if (config_fill && have_dss && opt_junk) { /* * Only bother junk filling if the chunk isn't about to be * unmapped. */ - if (config_munmap == false || (config_dss && chunk_in_dss(ptr))) + if (config_munmap == false || (have_dss && chunk_in_dss(ptr))) memset(ptr, 0x5a, usize); } } |