diff options
author | David Goldblatt <davidgoldblatt@fb.com> | 2017-05-15 22:38:15 (GMT) |
---|---|---|
committer | David Goldblatt <davidtgoldblatt@gmail.com> | 2017-05-19 21:21:27 (GMT) |
commit | 26c792e61a163b38b373023bca2947283dcd1fc8 (patch) | |
tree | 89c4b0e1275707f6e671a23ad6ed548484becabc | |
parent | 6e62c6286258e340308b4a989b4bd80232fed8e1 (diff) | |
download | jemalloc-26c792e61a163b38b373023bca2947283dcd1fc8.zip jemalloc-26c792e61a163b38b373023bca2947283dcd1fc8.tar.gz jemalloc-26c792e61a163b38b373023bca2947283dcd1fc8.tar.bz2 |
Allow mutexes to take a lock ordering enum at construction.
This lets us specify whether and how mutexes of the same rank are allowed to be
acquired. Currently, we only allow two polices (only a single mutex at a given
rank at a time, and mutexes acquired in ascending order), but we can plausibly
allow more (e.g. the "release uncontended mutexes before blocking").
-rw-r--r-- | include/jemalloc/internal/mutex_externs.h | 2 | ||||
-rw-r--r-- | include/jemalloc/internal/mutex_structs.h | 6 | ||||
-rw-r--r-- | include/jemalloc/internal/mutex_types.h | 10 | ||||
-rw-r--r-- | src/arena.c | 13 | ||||
-rw-r--r-- | src/base.c | 3 | ||||
-rw-r--r-- | src/ctl.c | 3 | ||||
-rw-r--r-- | src/extent.c | 3 | ||||
-rw-r--r-- | src/jemalloc.c | 6 | ||||
-rw-r--r-- | src/mutex.c | 28 | ||||
-rw-r--r-- | src/prof.c | 25 | ||||
-rw-r--r-- | src/rtree.c | 3 | ||||
-rw-r--r-- | src/tcache.c | 3 |
12 files changed, 75 insertions, 30 deletions
diff --git a/include/jemalloc/internal/mutex_externs.h b/include/jemalloc/internal/mutex_externs.h index 8e40cb3..c9a817f 100644 --- a/include/jemalloc/internal/mutex_externs.h +++ b/include/jemalloc/internal/mutex_externs.h @@ -11,7 +11,7 @@ extern bool isthreaded; #endif bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, - witness_rank_t rank); + witness_rank_t rank, malloc_mutex_lock_order_t lock_order); void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex); void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex); void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex); diff --git a/include/jemalloc/internal/mutex_structs.h b/include/jemalloc/internal/mutex_structs.h index 2691852..a8b16a1 100644 --- a/include/jemalloc/internal/mutex_structs.h +++ b/include/jemalloc/internal/mutex_structs.h @@ -40,12 +40,14 @@ struct malloc_mutex_s { * memory cost. */ #if !defined(JEMALLOC_DEBUG) - witness_t witness; + witness_t witness; + malloc_mutex_lock_order_t lock_order; #endif }; #if defined(JEMALLOC_DEBUG) - witness_t witness; + witness_t witness; + malloc_mutex_lock_order_t lock_order; #endif }; diff --git a/include/jemalloc/internal/mutex_types.h b/include/jemalloc/internal/mutex_types.h index 5af8d09..65a9938 100644 --- a/include/jemalloc/internal/mutex_types.h +++ b/include/jemalloc/internal/mutex_types.h @@ -3,6 +3,16 @@ typedef struct malloc_mutex_s malloc_mutex_t; +typedef enum { + /* Can only acquire one mutex of a given witness rank at a time. */ + malloc_mutex_rank_exclusive, + /* + * Can acquire multiple mutexes of the same witness rank, but in + * address-ascending order only. + */ + malloc_mutex_address_ordered +} malloc_mutex_lock_order_t; + /* * Based on benchmark results, a fixed spin with this amount of retries works * well for our critical sections. diff --git a/src/arena.c b/src/arena.c index 42bfc6b..67e1b2f 100644 --- a/src/arena.c +++ b/src/arena.c @@ -64,7 +64,7 @@ arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) { } #ifndef JEMALLOC_ATOMIC_U64 if (malloc_mutex_init(&arena_stats->mtx, "arena_stats", - WITNESS_RANK_ARENA_STATS)) { + WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) { return true; } #endif @@ -734,7 +734,8 @@ arena_decay_init(arena_decay_t *decay, extents_t *extents, ssize_t decay_ms, assert(((char *)decay)[i] == 0); } } - if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY)) { + if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY, + malloc_mutex_rank_exclusive)) { return true; } decay->purging = false; @@ -1869,7 +1870,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { ql_new(&arena->tcache_ql); if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql", - WITNESS_RANK_TCACHE_QL)) { + WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) { goto label_error; } } @@ -1901,7 +1902,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { extent_list_init(&arena->large); if (malloc_mutex_init(&arena->large_mtx, "arena_large", - WITNESS_RANK_ARENA_LARGE)) { + WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) { goto label_error; } @@ -1950,7 +1951,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { extent_avail_new(&arena->extent_avail); if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail", - WITNESS_RANK_EXTENT_FREELIST)) { + WITNESS_RANK_EXTENT_FREELIST, malloc_mutex_rank_exclusive)) { goto label_error; } @@ -1958,7 +1959,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { for (i = 0; i < NBINS; i++) { arena_bin_t *bin = &arena->bins[i]; if (malloc_mutex_init(&bin->lock, "arena_bin", - WITNESS_RANK_ARENA_BIN)) { + WITNESS_RANK_ARENA_BIN, malloc_mutex_rank_exclusive)) { goto label_error; } bin->slabcur = NULL; @@ -238,7 +238,8 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { &gap_size, base_size, base_alignment); base->ind = ind; atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED); - if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE)) { + if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE, + malloc_mutex_rank_exclusive)) { base_unmap(extent_hooks, ind, block, block->size); return NULL; } @@ -1199,7 +1199,8 @@ label_return: bool ctl_boot(void) { - if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL)) { + if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL, + malloc_mutex_rank_exclusive)) { return true; } diff --git a/src/extent.c b/src/extent.c index 1b28453..513d16d 100644 --- a/src/extent.c +++ b/src/extent.c @@ -195,7 +195,8 @@ ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp) bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state, bool delay_coalesce) { - if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS)) { + if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS, + malloc_mutex_rank_exclusive)) { return true; } for (unsigned i = 0; i < NPSIZES+1; i++) { diff --git a/src/jemalloc.c b/src/jemalloc.c index 47133ed..56aef5b 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -235,7 +235,8 @@ _init_init_lock(void) { * doing anything. */ if (!init_lock_initialized) { - malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT); + malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT, + malloc_mutex_rank_exclusive); } init_lock_initialized = true; } @@ -1237,7 +1238,8 @@ malloc_init_hard_a0_locked() { if (tcache_boot(TSDN_NULL)) { return true; } - if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS)) { + if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS, + malloc_mutex_rank_exclusive)) { return true; } /* diff --git a/src/mutex.c b/src/mutex.c index 3eec970..b15bbf6 100644 --- a/src/mutex.c +++ b/src/mutex.c @@ -138,9 +138,25 @@ malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) { mutex_prof_data_init(&mutex->prof_data); } +static int +mutex_addr_comp(const witness_t *witness1, void *mutex1, + const witness_t *witness2, void *mutex2) { + assert(mutex1 != NULL); + assert(mutex2 != NULL); + uintptr_t mu1int = (uintptr_t)mutex1; + uintptr_t mu2int = (uintptr_t)mutex2; + if (mu1int < mu2int) { + return -1; + } else if (mu1int == mu2int) { + return 0; + } else { + return 1; + } +} + bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, - witness_rank_t rank) { + witness_rank_t rank, malloc_mutex_lock_order_t lock_order) { mutex_prof_data_init(&mutex->prof_data); #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 @@ -179,7 +195,13 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name, pthread_mutexattr_destroy(&attr); #endif if (config_debug) { - witness_init(&mutex->witness, name, rank, NULL, NULL); + mutex->lock_order = lock_order; + if (lock_order == malloc_mutex_address_ordered) { + witness_init(&mutex->witness, name, rank, + mutex_addr_comp, &mutex); + } else { + witness_init(&mutex->witness, name, rank, NULL, NULL); + } } return false; } @@ -200,7 +222,7 @@ malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) { malloc_mutex_unlock(tsdn, mutex); #else if (malloc_mutex_init(mutex, mutex->witness.name, - mutex->witness.rank)) { + mutex->witness.rank, mutex->lock_order)) { malloc_printf("<jemalloc>: Error re-initializing mutex in " "child\n"); if (opt_abort) { @@ -1754,7 +1754,7 @@ prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) { #ifndef JEMALLOC_ATOMIC_U64 if (malloc_mutex_init(&prof_accum->mtx, "prof_accum", - WITNESS_RANK_PROF_ACCUM)) { + WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) { return true; } prof_accum->accumbytes = 0; @@ -2289,20 +2289,21 @@ prof_boot2(tsd_t *tsd) { prof_active = opt_prof_active; if (malloc_mutex_init(&prof_active_mtx, "prof_active", - WITNESS_RANK_PROF_ACTIVE)) { + WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) { return true; } prof_gdump_val = opt_prof_gdump; if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump", - WITNESS_RANK_PROF_GDUMP)) { + WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) { return true; } prof_thread_active_init = opt_prof_thread_active_init; if (malloc_mutex_init(&prof_thread_active_init_mtx, "prof_thread_active_init", - WITNESS_RANK_PROF_THREAD_ACTIVE_INIT)) { + WITNESS_RANK_PROF_THREAD_ACTIVE_INIT, + malloc_mutex_rank_exclusive)) { return true; } @@ -2311,28 +2312,28 @@ prof_boot2(tsd_t *tsd) { return true; } if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx", - WITNESS_RANK_PROF_BT2GCTX)) { + WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) { return true; } tdata_tree_new(&tdatas); if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas", - WITNESS_RANK_PROF_TDATAS)) { + WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) { return true; } next_thr_uid = 0; if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid", - WITNESS_RANK_PROF_NEXT_THR_UID)) { + WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) { return true; } if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq", - WITNESS_RANK_PROF_DUMP_SEQ)) { + WITNESS_RANK_PROF_DUMP_SEQ, malloc_mutex_rank_exclusive)) { return true; } if (malloc_mutex_init(&prof_dump_mtx, "prof_dump", - WITNESS_RANK_PROF_DUMP)) { + WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) { return true; } @@ -2352,7 +2353,8 @@ prof_boot2(tsd_t *tsd) { } for (i = 0; i < PROF_NCTX_LOCKS; i++) { if (malloc_mutex_init(&gctx_locks[i], "prof_gctx", - WITNESS_RANK_PROF_GCTX)) { + WITNESS_RANK_PROF_GCTX, + malloc_mutex_rank_exclusive)) { return true; } } @@ -2365,7 +2367,8 @@ prof_boot2(tsd_t *tsd) { } for (i = 0; i < PROF_NTDATA_LOCKS; i++) { if (malloc_mutex_init(&tdata_locks[i], "prof_tdata", - WITNESS_RANK_PROF_TDATA)) { + WITNESS_RANK_PROF_TDATA, + malloc_mutex_rank_exclusive)) { return true; } } diff --git a/src/rtree.c b/src/rtree.c index 62df014..6d4a71a 100644 --- a/src/rtree.c +++ b/src/rtree.c @@ -18,7 +18,8 @@ rtree_new(rtree_t *rtree, bool zeroed) { assert(zeroed); #endif - if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE)) { + if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE, + malloc_mutex_rank_exclusive)) { return true; } diff --git a/src/tcache.c b/src/tcache.c index ee5e816..d9f5e7c 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -649,7 +649,8 @@ tcache_boot(tsdn_t *tsdn) { tcache_maxclass = (ZU(1) << opt_lg_tcache_max); } - if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES)) { + if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES, + malloc_mutex_rank_exclusive)) { return true; } |