summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorDavid Goldblatt <davidgoldblatt@fb.com>2017-05-31 16:43:43 (GMT)
committerDavid Goldblatt <davidtgoldblatt@gmail.com>2017-06-01 00:02:54 (GMT)
commitfa35463d56be52a3a6e6b513fbb6cc6e63d9bcc7 (patch)
tree4890ad5e5d99a064c60c5d2aa9b0195ca1b4aa65 /src
parent508f54b02bd08ac0d250df1fa15cf87d574ce8a1 (diff)
downloadjemalloc-fa35463d56be52a3a6e6b513fbb6cc6e63d9bcc7.zip
jemalloc-fa35463d56be52a3a6e6b513fbb6cc6e63d9bcc7.tar.gz
jemalloc-fa35463d56be52a3a6e6b513fbb6cc6e63d9bcc7.tar.bz2
Witness assertions: only assert locklessness when non-reentrant.
Previously we could still hit these assertions down error paths or in the extended API.
Diffstat (limited to 'src')
-rw-r--r--src/jemalloc.c111
1 files changed, 62 insertions, 49 deletions
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 268d19c..02e32da 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -628,6 +628,31 @@ stats_print_atexit(void) {
}
/*
+ * Ensure that we don't hold any locks upon entry to or exit from allocator
+ * code (in a "broad" sense that doesn't count a reentrant allocation as an
+ * entrance or exit).
+ */
+JEMALLOC_ALWAYS_INLINE void
+check_entry_exit_locking(tsdn_t *tsdn) {
+ if (!config_debug) {
+ return;
+ }
+ if (tsdn_null(tsdn)) {
+ return;
+ }
+ tsd_t *tsd = tsdn_tsd(tsdn);
+ /*
+ * It's possible we hold locks at entry/exit if we're in a nested
+ * allocation.
+ */
+ int8_t reentrancy_level = tsd_reentrancy_level_get(tsd);
+ if (reentrancy_level != 0) {
+ return;
+ }
+ witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
+}
+
+/*
* End miscellaneous support functions.
*/
/******************************************************************************/
@@ -1705,15 +1730,13 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
}
}
+ check_entry_exit_locking(tsd_tsdn(tsd));
/*
* If we need to handle reentrancy, we can do it out of a
* known-initialized arena (i.e. arena 0).
*/
reentrancy_level = tsd_reentrancy_level_get(tsd);
- if (reentrancy_level == 0) {
- witness_assert_lockless(tsdn_witness_tsdp_get(tsd_tsdn(tsd)));
- }
if (sopts->slow && unlikely(reentrancy_level > 0)) {
/*
* We should never specify particular arenas or tcaches from
@@ -1788,9 +1811,7 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
}
/* Success! */
- if (reentrancy_level == 0) {
- witness_assert_lockless(tsdn_witness_tsdp_get(tsd_tsdn(tsd)));
- }
+ check_entry_exit_locking(tsd_tsdn(tsd));
*dopts->result = allocation;
return 0;
@@ -1804,7 +1825,7 @@ label_oom:
UTRACE(NULL, size, NULL);
}
- witness_assert_lockless(tsdn_witness_tsdp_get(tsd_tsdn(tsd)));
+ check_entry_exit_locking(tsd_tsdn(tsd));
if (sopts->set_errno_on_error) {
set_errno(ENOMEM);
@@ -1835,7 +1856,7 @@ label_invalid_alignment:
UTRACE(NULL, size, NULL);
}
- witness_assert_lockless(tsdn_witness_tsdp_get(tsd_tsdn(tsd)));
+ check_entry_exit_locking(tsd_tsdn(tsd));
if (sopts->null_out_result_on_error) {
*dopts->result = NULL;
@@ -2036,9 +2057,8 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
if (!slow_path) {
tsd_assert_fast(tsd);
}
- if (tsd_reentrancy_level_get(tsd) == 0) {
- witness_assert_lockless(tsdn_witness_tsdp_get(tsd_tsdn(tsd)));
- } else {
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ if (tsd_reentrancy_level_get(tsd) != 0) {
assert(slow_path);
}
@@ -2076,9 +2096,8 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
if (!slow_path) {
tsd_assert_fast(tsd);
}
- if (tsd_reentrancy_level_get(tsd) == 0) {
- witness_assert_lockless(tsdn_witness_tsdp_get(tsd_tsdn(tsd)));
- } else {
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ if (tsd_reentrancy_level_get(tsd) != 0) {
assert(slow_path);
}
@@ -2138,7 +2157,7 @@ je_realloc(void *ptr, size_t size) {
assert(malloc_initialized() || IS_INITIALIZER);
tsd_t *tsd = tsd_fetch();
- witness_assert_lockless(tsdn_witness_tsdp_get(tsd_tsdn(tsd)));
+ check_entry_exit_locking(tsd_tsdn(tsd));
alloc_ctx_t alloc_ctx;
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
@@ -2181,7 +2200,7 @@ je_realloc(void *ptr, size_t size) {
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, ret);
- witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
+ check_entry_exit_locking(tsdn);
return ret;
}
@@ -2190,10 +2209,7 @@ je_free(void *ptr) {
UTRACE(ptr, 0, 0);
if (likely(ptr != NULL)) {
tsd_t *tsd = tsd_fetch();
- if (tsd_reentrancy_level_get(tsd) == 0) {
- witness_assert_lockless(tsdn_witness_tsdp_get(tsd_tsdn(
- tsd)));
- }
+ check_entry_exit_locking(tsd_tsdn(tsd));
tcache_t *tcache;
if (likely(tsd_fast(tsd))) {
@@ -2209,10 +2225,7 @@ je_free(void *ptr) {
}
ifree(tsd, ptr, tcache, true);
}
- if (tsd_reentrancy_level_get(tsd) == 0) {
- witness_assert_lockless(tsdn_witness_tsdp_get(tsd_tsdn(
- tsd)));
- }
+ check_entry_exit_locking(tsd_tsdn(tsd));
}
}
@@ -2472,7 +2485,7 @@ je_rallocx(void *ptr, size_t size, int flags) {
assert(size != 0);
assert(malloc_initialized() || IS_INITIALIZER);
tsd = tsd_fetch();
- witness_assert_lockless(tsdn_witness_tsdp_get(tsd_tsdn(tsd)));
+ check_entry_exit_locking(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
@@ -2529,7 +2542,7 @@ je_rallocx(void *ptr, size_t size, int flags) {
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, p);
- witness_assert_lockless(tsdn_witness_tsdp_get(tsd_tsdn(tsd)));
+ check_entry_exit_locking(tsd_tsdn(tsd));
return p;
label_oom:
if (config_xmalloc && unlikely(opt_xmalloc)) {
@@ -2537,7 +2550,7 @@ label_oom:
abort();
}
UTRACE(ptr, size, 0);
- witness_assert_lockless(tsdn_witness_tsdp_get(tsd_tsdn(tsd)));
+ check_entry_exit_locking(tsd_tsdn(tsd));
return NULL;
}
@@ -2629,7 +2642,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
assert(SIZE_T_MAX - size >= extra);
assert(malloc_initialized() || IS_INITIALIZER);
tsd = tsd_fetch();
- witness_assert_lockless(tsdn_witness_tsdp_get(tsd_tsdn(tsd)));
+ check_entry_exit_locking(tsd_tsdn(tsd));
alloc_ctx_t alloc_ctx;
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
@@ -2672,7 +2685,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
}
label_not_resized:
UTRACE(ptr, size, ptr);
- witness_assert_lockless(tsdn_witness_tsdp_get(tsd_tsdn(tsd)));
+ check_entry_exit_locking(tsd_tsdn(tsd));
return usize;
}
@@ -2686,7 +2699,7 @@ je_sallocx(const void *ptr, int flags) {
assert(ptr != NULL);
tsdn = tsdn_fetch();
- witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
+ check_entry_exit_locking(tsdn);
if (config_debug || force_ivsalloc) {
usize = ivsalloc(tsdn, ptr);
@@ -2695,7 +2708,7 @@ je_sallocx(const void *ptr, int flags) {
usize = isalloc(tsdn, ptr);
}
- witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
+ check_entry_exit_locking(tsdn);
return usize;
}
@@ -2706,7 +2719,7 @@ je_dallocx(void *ptr, int flags) {
tsd_t *tsd = tsd_fetch();
bool fast = tsd_fast(tsd);
- witness_assert_lockless(tsdn_witness_tsdp_get(tsd_tsdn(tsd)));
+ check_entry_exit_locking(tsd_tsdn(tsd));
tcache_t *tcache;
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
@@ -2737,12 +2750,12 @@ je_dallocx(void *ptr, int flags) {
} else {
ifree(tsd, ptr, tcache, true);
}
- witness_assert_lockless(tsdn_witness_tsdp_get(tsd_tsdn(tsd)));
+ check_entry_exit_locking(tsd_tsdn(tsd));
}
JEMALLOC_ALWAYS_INLINE size_t
inallocx(tsdn_t *tsdn, size_t size, int flags) {
- witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
+ check_entry_exit_locking(tsdn);
size_t usize;
if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) {
@@ -2750,7 +2763,7 @@ inallocx(tsdn_t *tsdn, size_t size, int flags) {
} else {
usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
}
- witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
+ check_entry_exit_locking(tsdn);
return usize;
}
@@ -2763,7 +2776,7 @@ je_sdallocx(void *ptr, size_t size, int flags) {
bool fast = tsd_fast(tsd);
size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
- witness_assert_lockless(tsdn_witness_tsdp_get(tsd_tsdn(tsd)));
+ check_entry_exit_locking(tsd_tsdn(tsd));
tcache_t *tcache;
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
@@ -2794,7 +2807,7 @@ je_sdallocx(void *ptr, size_t size, int flags) {
} else {
isfree(tsd, ptr, usize, tcache, true);
}
- witness_assert_lockless(tsdn_witness_tsdp_get(tsd_tsdn(tsd)));
+ check_entry_exit_locking(tsd_tsdn(tsd));
}
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
@@ -2810,14 +2823,14 @@ je_nallocx(size_t size, int flags) {
}
tsdn = tsdn_fetch();
- witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
+ check_entry_exit_locking(tsdn);
usize = inallocx(tsdn, size, flags);
if (unlikely(usize > LARGE_MAXCLASS)) {
return 0;
}
- witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
+ check_entry_exit_locking(tsdn);
return usize;
}
@@ -2832,9 +2845,9 @@ je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
}
tsd = tsd_fetch();
- witness_assert_lockless(tsdn_witness_tsdp_get(tsd_tsdn(tsd)));
+ check_entry_exit_locking(tsd_tsdn(tsd));
ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
- witness_assert_lockless(tsdn_witness_tsdp_get(tsd_tsdn(tsd)));
+ check_entry_exit_locking(tsd_tsdn(tsd));
return ret;
}
@@ -2848,9 +2861,9 @@ je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
}
tsdn = tsdn_fetch();
- witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
+ check_entry_exit_locking(tsdn);
ret = ctl_nametomib(tsdn, name, mibp, miblenp);
- witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
+ check_entry_exit_locking(tsdn);
return ret;
}
@@ -2865,9 +2878,9 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
}
tsd = tsd_fetch();
- witness_assert_lockless(tsdn_witness_tsdp_get(tsd_tsdn(tsd)));
+ check_entry_exit_locking(tsd_tsdn(tsd));
ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
- witness_assert_lockless(tsdn_witness_tsdp_get(tsd_tsdn(tsd)));
+ check_entry_exit_locking(tsd_tsdn(tsd));
return ret;
}
@@ -2877,9 +2890,9 @@ je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
tsdn_t *tsdn;
tsdn = tsdn_fetch();
- witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
+ check_entry_exit_locking(tsdn);
stats_print(write_cb, cbopaque, opts);
- witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
+ check_entry_exit_locking(tsdn);
}
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
@@ -2890,7 +2903,7 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
assert(malloc_initialized() || IS_INITIALIZER);
tsdn = tsdn_fetch();
- witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
+ check_entry_exit_locking(tsdn);
if (unlikely(ptr == NULL)) {
ret = 0;
@@ -2903,7 +2916,7 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
}
}
- witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
+ check_entry_exit_locking(tsdn);
return ret;
}