diff options
| author | Jason Evans <jasone@canonware.com> | 2016-05-04 00:34:40 (GMT) |
|---|---|---|
| committer | Jason Evans <jasone@canonware.com> | 2016-05-04 00:34:40 (GMT) |
| commit | e02b83cc5e3c4d30f93dba945162e3aa58d962d6 (patch) | |
| tree | dc1cf37bd624061c6351ea3a78f993d2a19310ac /src | |
| parent | df900dbfaf4835d3efc06d771535f3e781544913 (diff) | |
| parent | 2e5eb21184cccabc829265b5f5237f3c13563be6 (diff) | |
| download | jemalloc-4.1.1.zip jemalloc-4.1.1.tar.gz jemalloc-4.1.1.tar.bz2 | |
Merge branch.4.1.1
Diffstat (limited to 'src')
| -rw-r--r-- | src/arena.c | 46 | ||||
| -rw-r--r-- | src/chunk.c | 51 | ||||
| -rw-r--r-- | src/chunk_dss.c | 2 | ||||
| -rw-r--r-- | src/jemalloc.c | 38 | ||||
| -rw-r--r-- | src/prof.c | 52 | ||||
| -rw-r--r-- | src/stats.c | 2 |
6 files changed, 112 insertions, 79 deletions
diff --git a/src/arena.c b/src/arena.c index 99e20fd..48e9b20 100644 --- a/src/arena.c +++ b/src/arena.c @@ -617,8 +617,8 @@ arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, /* Commit header. */ if (chunk_hooks->commit(chunk, chunksize, 0, map_bias << LG_PAGE, arena->ind)) { - chunk_dalloc_wrapper(arena, chunk_hooks, - (void *)chunk, chunksize, *commit); + chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk, + chunksize, *zero, *commit); chunk = NULL; } } @@ -629,7 +629,7 @@ arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, LG_PAGE, arena->ind); } chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk, - chunksize, *commit); + chunksize, *zero, *commit); chunk = NULL; } @@ -1024,7 +1024,7 @@ arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, malloc_mutex_unlock(&arena->lock); } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk, cdiff, true, arena->ind)) { - chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero, + chunk_dalloc_wrapper(arena, chunk_hooks, nchunk, cdiff, *zero, true); err = true; } @@ -1050,8 +1050,8 @@ arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize, } arena_nactive_add(arena, udiff >> LG_PAGE); - err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff, - chunksize, zero, true) == NULL); + err = (chunk_alloc_cache(arena, &chunk_hooks, nchunk, cdiff, chunksize, + zero, true) == NULL); malloc_mutex_unlock(&arena->lock); if (err) { err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks, @@ -1059,7 +1059,7 @@ arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize, cdiff); } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk, cdiff, true, arena->ind)) { - chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero, + chunk_dalloc_wrapper(arena, &chunk_hooks, nchunk, cdiff, *zero, true); err = true; } @@ -1707,7 +1707,7 @@ arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks, extent_node_dirty_remove(chunkselm); arena_node_dalloc(arena, chunkselm); chunkselm = chunkselm_next; - chunk_dalloc_arena(arena, chunk_hooks, addr, size, + chunk_dalloc_wrapper(arena, chunk_hooks, addr, size, zeroed, committed); } else { arena_chunk_t *chunk = @@ -2423,7 +2423,7 @@ arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero) uintptr_t random_offset; arena_run_t *run; arena_chunk_map_misc_t *miscelm; - UNUSED bool idump; + UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false); /* Large allocation. */ usize = index2size(binind); @@ -3646,16 +3646,34 @@ arena_boot(void) } void -arena_prefork(arena_t *arena) +arena_prefork0(arena_t *arena) { - unsigned i; malloc_mutex_prefork(&arena->lock); - malloc_mutex_prefork(&arena->huge_mtx); +} + +void +arena_prefork1(arena_t *arena) +{ + malloc_mutex_prefork(&arena->chunks_mtx); +} + +void +arena_prefork2(arena_t *arena) +{ + malloc_mutex_prefork(&arena->node_cache_mtx); +} + +void +arena_prefork3(arena_t *arena) +{ + unsigned i; + for (i = 0; i < NBINS; i++) malloc_mutex_prefork(&arena->bins[i].lock); + malloc_mutex_prefork(&arena->huge_mtx); } void @@ -3663,11 +3681,11 @@ arena_postfork_parent(arena_t *arena) { unsigned i; + malloc_mutex_postfork_parent(&arena->huge_mtx); for (i = 0; i < NBINS; i++) malloc_mutex_postfork_parent(&arena->bins[i].lock); malloc_mutex_postfork_parent(&arena->node_cache_mtx); malloc_mutex_postfork_parent(&arena->chunks_mtx); - malloc_mutex_postfork_parent(&arena->huge_mtx); malloc_mutex_postfork_parent(&arena->lock); } @@ -3676,10 +3694,10 @@ arena_postfork_child(arena_t *arena) { unsigned i; + malloc_mutex_postfork_child(&arena->huge_mtx); for (i = 0; i < NBINS; i++) malloc_mutex_postfork_child(&arena->bins[i].lock); malloc_mutex_postfork_child(&arena->node_cache_mtx); malloc_mutex_postfork_child(&arena->chunks_mtx); - malloc_mutex_postfork_child(&arena->huge_mtx); malloc_mutex_postfork_child(&arena->lock); } diff --git a/src/chunk.c b/src/chunk.c index b179d21..304d4e5 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -425,8 +425,8 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, arena_t *arena; arena = chunk_arena_get(arena_ind); - ret = chunk_alloc_core(arena, new_addr, size, alignment, zero, - commit, arena->dss_prec); + ret = chunk_alloc_core(arena, new_addr, size, alignment, zero, commit, + arena->dss_prec); if (ret == NULL) return (NULL); if (config_valgrind) @@ -579,8 +579,18 @@ chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, arena_maybe_purge(arena); } +static bool +chunk_dalloc_default(void *chunk, size_t size, bool committed, + unsigned arena_ind) +{ + + if (!have_dss || !chunk_in_dss(chunk)) + return (chunk_dalloc_mmap(chunk, size)); + return (true); +} + void -chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, +chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool zeroed, bool committed) { @@ -605,27 +615,6 @@ chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, } static bool -chunk_dalloc_default(void *chunk, size_t size, bool committed, - unsigned arena_ind) -{ - - if (!have_dss || !chunk_in_dss(chunk)) - return (chunk_dalloc_mmap(chunk, size)); - return (true); -} - -void -chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, - size_t size, bool committed) -{ - - chunk_hooks_assure_initialized(arena, chunk_hooks); - chunk_hooks->dalloc(chunk, size, committed, arena->ind); - if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default) - JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); -} - -static bool chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length, unsigned arena_ind) { @@ -643,8 +632,9 @@ chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length, length)); } -bool -chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length) +static bool +chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length, + unsigned arena_ind) { assert(chunk != NULL); @@ -657,15 +647,6 @@ chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length) length)); } -static bool -chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length, - unsigned arena_ind) -{ - - return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset, - length)); -} - bool chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset, size_t length) diff --git a/src/chunk_dss.c b/src/chunk_dss.c index 61fc916..943d0e9 100644 --- a/src/chunk_dss.c +++ b/src/chunk_dss.c @@ -136,7 +136,7 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment, CHUNK_HOOKS_INITIALIZER; chunk_dalloc_wrapper(arena, &chunk_hooks, cpad, cpad_size, - true); + false, true); } if (*zero) { JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( diff --git a/src/jemalloc.c b/src/jemalloc.c index 0735376..7120791 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -2644,7 +2644,8 @@ JEMALLOC_EXPORT void _malloc_prefork(void) #endif { - unsigned i, narenas; + unsigned i, j, narenas; + arena_t *arena; #ifdef JEMALLOC_MUTEX_INIT_CB if (!malloc_initialized()) @@ -2652,18 +2653,31 @@ _malloc_prefork(void) #endif assert(malloc_initialized()); + narenas = narenas_total_get(); + /* Acquire all mutexes in a safe order. */ ctl_prefork(); - prof_prefork(); malloc_mutex_prefork(&arenas_lock); - for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { - arena_t *arena; - - if ((arena = arena_get(i, false)) != NULL) - arena_prefork(arena); + prof_prefork0(); + for (i = 0; i < 3; i++) { + for (j = 0; j < narenas; j++) { + if ((arena = arena_get(j, false)) != NULL) { + switch (i) { + case 0: arena_prefork0(arena); break; + case 1: arena_prefork1(arena); break; + case 2: arena_prefork2(arena); break; + default: not_reached(); + } + } + } } - chunk_prefork(); base_prefork(); + chunk_prefork(); + for (i = 0; i < narenas; i++) { + if ((arena = arena_get(i, false)) != NULL) + arena_prefork3(arena); + } + prof_prefork1(); } #ifndef JEMALLOC_MUTEX_INIT_CB @@ -2683,16 +2697,16 @@ _malloc_postfork(void) assert(malloc_initialized()); /* Release all mutexes, now that fork() has completed. */ - base_postfork_parent(); chunk_postfork_parent(); + base_postfork_parent(); for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { arena_t *arena; if ((arena = arena_get(i, false)) != NULL) arena_postfork_parent(arena); } - malloc_mutex_postfork_parent(&arenas_lock); prof_postfork_parent(); + malloc_mutex_postfork_parent(&arenas_lock); ctl_postfork_parent(); } @@ -2704,16 +2718,16 @@ jemalloc_postfork_child(void) assert(malloc_initialized()); /* Release all mutexes, now that fork() has completed. */ - base_postfork_child(); chunk_postfork_child(); + base_postfork_child(); for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { arena_t *arena; if ((arena = arena_get(i, false)) != NULL) arena_postfork_child(arena); } - malloc_mutex_postfork_child(&arenas_lock); prof_postfork_child(); + malloc_mutex_postfork_child(&arenas_lock); ctl_postfork_child(); } @@ -2198,20 +2198,32 @@ prof_boot2(void) } void -prof_prefork(void) +prof_prefork0(void) { if (opt_prof) { unsigned i; - malloc_mutex_prefork(&tdatas_mtx); + malloc_mutex_prefork(&prof_dump_mtx); malloc_mutex_prefork(&bt2gctx_mtx); - malloc_mutex_prefork(&next_thr_uid_mtx); - malloc_mutex_prefork(&prof_dump_seq_mtx); - for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_prefork(&gctx_locks[i]); + malloc_mutex_prefork(&tdatas_mtx); for (i = 0; i < PROF_NTDATA_LOCKS; i++) malloc_mutex_prefork(&tdata_locks[i]); + for (i = 0; i < PROF_NCTX_LOCKS; i++) + malloc_mutex_prefork(&gctx_locks[i]); + } +} + +void +prof_prefork1(void) +{ + + if (opt_prof) { + malloc_mutex_prefork(&prof_active_mtx); + malloc_mutex_prefork(&prof_dump_seq_mtx); + malloc_mutex_prefork(&prof_gdump_mtx); + malloc_mutex_prefork(&next_thr_uid_mtx); + malloc_mutex_prefork(&prof_thread_active_init_mtx); } } @@ -2222,14 +2234,18 @@ prof_postfork_parent(void) if (opt_prof) { unsigned i; - for (i = 0; i < PROF_NTDATA_LOCKS; i++) - malloc_mutex_postfork_parent(&tdata_locks[i]); + malloc_mutex_postfork_parent(&prof_thread_active_init_mtx); + malloc_mutex_postfork_parent(&next_thr_uid_mtx); + malloc_mutex_postfork_parent(&prof_gdump_mtx); + malloc_mutex_postfork_parent(&prof_dump_seq_mtx); + malloc_mutex_postfork_parent(&prof_active_mtx); for (i = 0; i < PROF_NCTX_LOCKS; i++) malloc_mutex_postfork_parent(&gctx_locks[i]); - malloc_mutex_postfork_parent(&prof_dump_seq_mtx); - malloc_mutex_postfork_parent(&next_thr_uid_mtx); - malloc_mutex_postfork_parent(&bt2gctx_mtx); + for (i = 0; i < PROF_NTDATA_LOCKS; i++) + malloc_mutex_postfork_parent(&tdata_locks[i]); malloc_mutex_postfork_parent(&tdatas_mtx); + malloc_mutex_postfork_parent(&bt2gctx_mtx); + malloc_mutex_postfork_parent(&prof_dump_mtx); } } @@ -2240,14 +2256,18 @@ prof_postfork_child(void) if (opt_prof) { unsigned i; - for (i = 0; i < PROF_NTDATA_LOCKS; i++) - malloc_mutex_postfork_child(&tdata_locks[i]); + malloc_mutex_postfork_child(&prof_thread_active_init_mtx); + malloc_mutex_postfork_child(&next_thr_uid_mtx); + malloc_mutex_postfork_child(&prof_gdump_mtx); + malloc_mutex_postfork_child(&prof_dump_seq_mtx); + malloc_mutex_postfork_child(&prof_active_mtx); for (i = 0; i < PROF_NCTX_LOCKS; i++) malloc_mutex_postfork_child(&gctx_locks[i]); - malloc_mutex_postfork_child(&prof_dump_seq_mtx); - malloc_mutex_postfork_child(&next_thr_uid_mtx); - malloc_mutex_postfork_child(&bt2gctx_mtx); + for (i = 0; i < PROF_NTDATA_LOCKS; i++) + malloc_mutex_postfork_child(&tdata_locks[i]); malloc_mutex_postfork_child(&tdatas_mtx); + malloc_mutex_postfork_child(&bt2gctx_mtx); + malloc_mutex_postfork_child(&prof_dump_mtx); } } diff --git a/src/stats.c b/src/stats.c index a724947..87b09e5 100644 --- a/src/stats.c +++ b/src/stats.c @@ -468,7 +468,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque, #define OPT_WRITE_UNSIGNED(n) \ if (je_mallctl("opt."#n, &uv, &usz, NULL, 0) == 0) { \ malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %zu\n", sv); \ + " opt."#n": %u\n", uv); \ } #define OPT_WRITE_SIZE_T(n) \ if (je_mallctl("opt."#n, &sv, &ssz, NULL, 0) == 0) { \ |
