diff options
author | Jason Evans <je@fb.com> | 2012-10-11 20:53:15 (GMT) |
---|---|---|
committer | Jason Evans <je@fb.com> | 2012-10-13 01:26:16 (GMT) |
commit | 609ae595f0358157b19311b0f9f9591db7cee705 (patch) | |
tree | 2a51758c6218f26167eb8ad2155ccc925c898c3b /src/arena.c | |
parent | d0ffd8ed4f6aa4cf7248028eddfcb35f93247fe4 (diff) | |
download | jemalloc-609ae595f0358157b19311b0f9f9591db7cee705.zip jemalloc-609ae595f0358157b19311b0f9f9591db7cee705.tar.gz jemalloc-609ae595f0358157b19311b0f9f9591db7cee705.tar.bz2 |
Add arena-specific and selective dss allocation.
Add the "arenas.extend" mallctl, so that it is possible to create new
arenas that are outside the set that jemalloc automatically multiplexes
threads onto.
Add the ALLOCM_ARENA() flag for {,r,d}allocm(), so that it is possible
to explicitly allocate from a particular arena.
Add the "opt.dss" mallctl, which controls the default precedence of dss
allocation relative to mmap allocation.
Add the "arena.<i>.dss" mallctl, which makes it possible to set the
default dss precedence on a per arena or global basis.
Add the "arena.<i>.purge" mallctl, which obsoletes "arenas.purge".
Add the "stats.arenas.<i>.dss" mallctl.
Diffstat (limited to 'src/arena.c')
-rw-r--r-- | src/arena.c | 134 |
1 files changed, 80 insertions, 54 deletions
diff --git a/src/arena.c b/src/arena.c index 674ffe9..1e6964a 100644 --- a/src/arena.c +++ b/src/arena.c @@ -372,7 +372,7 @@ arena_chunk_alloc(arena_t *arena) zero = false; malloc_mutex_unlock(&arena->lock); chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, - false, &zero); + false, &zero, arena->dss_prec); malloc_mutex_lock(&arena->lock); if (chunk == NULL) return (NULL); @@ -1619,52 +1619,6 @@ arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, mapelm = arena_mapp_get(chunk, pageind); arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm); } -void -arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty, - arena_stats_t *astats, malloc_bin_stats_t *bstats, - malloc_large_stats_t *lstats) -{ - unsigned i; - - malloc_mutex_lock(&arena->lock); - *nactive += arena->nactive; - *ndirty += arena->ndirty; - - astats->mapped += arena->stats.mapped; - astats->npurge += arena->stats.npurge; - astats->nmadvise += arena->stats.nmadvise; - astats->purged += arena->stats.purged; - astats->allocated_large += arena->stats.allocated_large; - astats->nmalloc_large += arena->stats.nmalloc_large; - astats->ndalloc_large += arena->stats.ndalloc_large; - astats->nrequests_large += arena->stats.nrequests_large; - - for (i = 0; i < nlclasses; i++) { - lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; - lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; - lstats[i].nrequests += arena->stats.lstats[i].nrequests; - lstats[i].curruns += arena->stats.lstats[i].curruns; - } - malloc_mutex_unlock(&arena->lock); - - for (i = 0; i < NBINS; i++) { - arena_bin_t *bin = &arena->bins[i]; - - malloc_mutex_lock(&bin->lock); - bstats[i].allocated += bin->stats.allocated; - bstats[i].nmalloc += bin->stats.nmalloc; - bstats[i].ndalloc += bin->stats.ndalloc; - bstats[i].nrequests += bin->stats.nrequests; - if (config_tcache) { - bstats[i].nfills += bin->stats.nfills; - bstats[i].nflushes += bin->stats.nflushes; - } - bstats[i].nruns += bin->stats.nruns; - bstats[i].reruns += bin->stats.reruns; - bstats[i].curruns += bin->stats.curruns; - malloc_mutex_unlock(&bin->lock); - } -} void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr) @@ -1877,8 +1831,9 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, } void * -arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, - size_t alignment, bool zero, bool try_tcache) +arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, + size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, + bool try_tcache_dalloc) { void *ret; size_t copysize; @@ -1897,9 +1852,9 @@ arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t usize = sa2u(size + extra, alignment); if (usize == 0) return (NULL); - ret = ipalloc(usize, alignment, zero); + ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena); } else - ret = arena_malloc(NULL, size + extra, zero, try_tcache); + ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc); if (ret == NULL) { if (extra == 0) @@ -1909,9 +1864,10 @@ arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t usize = sa2u(size, alignment); if (usize == 0) return (NULL); - ret = ipalloc(usize, alignment, zero); + ret = ipallocx(usize, alignment, zero, try_tcache_alloc, + arena); } else - ret = arena_malloc(NULL, size, zero, try_tcache); + ret = arena_malloc(arena, size, zero, try_tcache_alloc); if (ret == NULL) return (NULL); @@ -1926,10 +1882,78 @@ arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, copysize = (size < oldsize) ? size : oldsize; VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); memcpy(ret, ptr, copysize); - iqalloc(ptr); + iqallocx(ptr, try_tcache_dalloc); return (ret); } +dss_prec_t +arena_dss_prec_get(arena_t *arena) +{ + dss_prec_t ret; + + malloc_mutex_lock(&arena->lock); + ret = arena->dss_prec; + malloc_mutex_unlock(&arena->lock); + return (ret); +} + +void +arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) +{ + + malloc_mutex_lock(&arena->lock); + arena->dss_prec = dss_prec; + malloc_mutex_unlock(&arena->lock); +} + +void +arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, + size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, + malloc_large_stats_t *lstats) +{ + unsigned i; + + malloc_mutex_lock(&arena->lock); + *dss = dss_prec_names[arena->dss_prec]; + *nactive += arena->nactive; + *ndirty += arena->ndirty; + + astats->mapped += arena->stats.mapped; + astats->npurge += arena->stats.npurge; + astats->nmadvise += arena->stats.nmadvise; + astats->purged += arena->stats.purged; + astats->allocated_large += arena->stats.allocated_large; + astats->nmalloc_large += arena->stats.nmalloc_large; + astats->ndalloc_large += arena->stats.ndalloc_large; + astats->nrequests_large += arena->stats.nrequests_large; + + for (i = 0; i < nlclasses; i++) { + lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; + lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; + lstats[i].nrequests += arena->stats.lstats[i].nrequests; + lstats[i].curruns += arena->stats.lstats[i].curruns; + } + malloc_mutex_unlock(&arena->lock); + + for (i = 0; i < NBINS; i++) { + arena_bin_t *bin = &arena->bins[i]; + + malloc_mutex_lock(&bin->lock); + bstats[i].allocated += bin->stats.allocated; + bstats[i].nmalloc += bin->stats.nmalloc; + bstats[i].ndalloc += bin->stats.ndalloc; + bstats[i].nrequests += bin->stats.nrequests; + if (config_tcache) { + bstats[i].nfills += bin->stats.nfills; + bstats[i].nflushes += bin->stats.nflushes; + } + bstats[i].nruns += bin->stats.nruns; + bstats[i].reruns += bin->stats.reruns; + bstats[i].curruns += bin->stats.curruns; + malloc_mutex_unlock(&bin->lock); + } +} + bool arena_new(arena_t *arena, unsigned ind) { @@ -1958,6 +1982,8 @@ arena_new(arena_t *arena, unsigned ind) if (config_prof) arena->prof_accumbytes = 0; + arena->dss_prec = chunk_dss_prec_get(); + /* Initialize chunks. */ ql_new(&arena->chunks_dirty); arena->spare = NULL; |