summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Goldblatt <davidgoldblatt@fb.com>2017-04-01 02:59:45 (GMT)
committerDavid Goldblatt <davidtgoldblatt@gmail.com>2017-04-07 21:10:27 (GMT)
commitb407a65401bca5828760c8fd5e940e91475a2b3e (patch)
treea1bbd2e2fccd51153f126a228a07491259b44730
parent0a0fcd3e6a0816f0a56fa852416d0ece861c0abb (diff)
downloadjemalloc-b407a65401bca5828760c8fd5e940e91475a2b3e.zip
jemalloc-b407a65401bca5828760c8fd5e940e91475a2b3e.tar.gz
jemalloc-b407a65401bca5828760c8fd5e940e91475a2b3e.tar.bz2
Add basic reentrancy-checking support, and allow arena_new to reenter.
This checks whether or not we're reentrant using thread-local data, and, if we are, moves certain internal allocations to use arena 0 (which should be properly initialized after bootstrapping). The immediate thing this allows is spinning up threads in arena_new, which will enable spinning up background threads there.
-rw-r--r--include/jemalloc/internal/hooks.h4
-rw-r--r--include/jemalloc/internal/jemalloc_internal.h.in9
-rw-r--r--include/jemalloc/internal/private_symbols.txt4
-rw-r--r--include/jemalloc/internal/tsd_structs.h6
-rw-r--r--src/arena.c13
-rw-r--r--src/jemalloc.c94
-rw-r--r--test/src/test.c76
-rw-r--r--test/stress/microbench.c2
-rw-r--r--test/unit/stats.c2
-rw-r--r--test/unit/tsd.c3
10 files changed, 168 insertions, 45 deletions
diff --git a/include/jemalloc/internal/hooks.h b/include/jemalloc/internal/hooks.h
index 608b268..cd49afc 100644
--- a/include/jemalloc/internal/hooks.h
+++ b/include/jemalloc/internal/hooks.h
@@ -1,8 +1,8 @@
#ifndef JEMALLOC_INTERNAL_HOOKS_H
#define JEMALLOC_INTERNAL_HOOKS_H
-extern void (*hooks_arena_new_hook)();
-extern void (*hooks_libc_hook)();
+extern JEMALLOC_EXPORT void (*hooks_arena_new_hook)();
+extern JEMALLOC_EXPORT void (*hooks_libc_hook)();
#define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index 1c0bf43..62dae0c 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -1013,6 +1013,11 @@ arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
return arena;
}
+ /* During reentrancy, arena 0 is the safest bet. */
+ if (*tsd_reentrancy_levelp_get(tsd) > 1) {
+ return arena_get(tsd_tsdn(tsd), 0, true);
+ }
+
ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
if (unlikely(ret == NULL)) {
ret = arena_choose_hard(tsd, internal);
@@ -1193,7 +1198,9 @@ idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_internal,
if (config_stats && is_internal) {
arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr));
}
-
+ if (!is_internal && *tsd_reentrancy_levelp_get(tsdn_tsd(tsdn)) != 0) {
+ tcache = NULL;
+ }
arena_dalloc(tsdn, ptr, tcache, slow_path);
}
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index deae824..4931d48 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -232,6 +232,7 @@ hash_rotl_64
hash_x64_128
hash_x86_128
hash_x86_32
+hooks_arena_new_hook
hooks_libc_hook
iaalloc
ialloc
@@ -537,6 +538,9 @@ tsd_init_head
tsd_narenas_tdata_get
tsd_narenas_tdata_set
tsd_narenas_tdatap_get
+tsd_reentrancy_level_get
+tsd_reentrancy_level_set
+tsd_reentrancy_levelp_get
tsd_wrapper_get
tsd_wrapper_set
tsd_nominal
diff --git a/include/jemalloc/internal/tsd_structs.h b/include/jemalloc/internal/tsd_structs.h
index 2dca0bd..12df63d 100644
--- a/include/jemalloc/internal/tsd_structs.h
+++ b/include/jemalloc/internal/tsd_structs.h
@@ -65,7 +65,8 @@ struct tsd_init_head_s {
O(witnesses, witness_list_t, no, no, yes) \
O(rtree_leaf_elm_witnesses, rtree_leaf_elm_witness_tsd_t, \
no, no, no) \
- O(witness_fork, bool, yes, no, no)
+ O(witness_fork, bool, yes, no, no) \
+ O(reentrancy_level, int, no, no, no)
#define TSD_INITIALIZER { \
tsd_state_uninitialized, \
@@ -82,7 +83,8 @@ struct tsd_init_head_s {
TCACHE_ZERO_INITIALIZER, \
ql_head_initializer(witnesses), \
RTREE_ELM_WITNESS_TSD_INITIALIZER, \
- false \
+ false, \
+ 0 \
}
struct tsd_s {
diff --git a/src/arena.c b/src/arena.c
index b78719e..19069bb 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -1952,6 +1952,19 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
arena->base = base;
+ /* We don't support reetrancy for arena 0 bootstrapping. */
+ if (ind != 0 && hooks_arena_new_hook) {
+ /*
+ * If we're here, then arena 0 already exists, so bootstrapping
+ * is done enough that we should have tsd.
+ */
+ int *reentrancy_level = tsd_reentrancy_levelp_get(tsdn_tsd(
+ tsdn));
+ ++*reentrancy_level;
+ hooks_arena_new_hook();
+ --*reentrancy_level;
+ }
+
return arena;
label_error:
if (ind != 0) {
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 9d66f7f..7b205ff 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -1656,6 +1656,14 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts) {
szind_t ind = 0;
size_t usize = 0;
+ /*
+ * For reentrancy checking, we get the old reentrancy level from tsd and
+ * reset it once we're done. In case of early bailout though, we never
+ * bother getting the old level, so we shouldn't try to reset it. This
+ * is indicated by leaving the pointer as NULL.
+ */
+ int *reentrancy_level = NULL;
+
/* Initialize (if we can't prove we don't have to). */
if (sopts->slow) {
if (unlikely(malloc_init())) {
@@ -1708,7 +1716,27 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts) {
* some reason. Let's grab it right away.
*/
tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
+
+ /*
+ * If we need to handle reentrancy, we can do it out of a
+ * known-initialized arena (i.e. arena 0).
+ */
+ reentrancy_level = tsd_reentrancy_levelp_get(tsd);
+ ++*reentrancy_level;
+ if (*reentrancy_level == 1) {
+ witness_assert_lockless(tsd_tsdn(tsd));
+ }
+ if (unlikely(*reentrancy_level > 1)) {
+ /*
+ * We should never specify particular arenas or tcaches from
+ * within our internal allocations.
+ */
+ assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC);
+ assert(dopts->arena_ind = ARENA_IND_AUTOMATIC);
+ dopts->tcache_ind = TCACHE_IND_NONE;
+ /* We know that arena 0 has already been initialized. */
+ dopts->arena_ind = 0;
+ }
/* If profiling is on, get our profiling context. */
if (config_prof && opt_prof) {
@@ -1769,9 +1797,15 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts) {
UTRACE(0, size, allocation);
}
- witness_assert_lockless(tsd_tsdn(tsd));
-
/* Success! */
+ if (*reentrancy_level == 1) {
+ witness_assert_lockless(tsd_tsdn(tsd));
+ }
+ /*
+ * If we got here, we never bailed out on a failure path, so
+ * reentrancy_level is non-null.
+ */
+ --*reentrancy_level;
*dopts->result = allocation;
return 0;
@@ -1795,6 +1829,10 @@ label_oom:
*dopts->result = NULL;
}
+ if (reentrancy_level != NULL) {
+ --*reentrancy_level;
+ }
+
return ENOMEM;
/*
@@ -1822,6 +1860,10 @@ label_invalid_alignment:
*dopts->result = NULL;
}
+ if (reentrancy_level != NULL) {
+ --*reentrancy_level;
+ }
+
return EINVAL;
}
@@ -1996,7 +2038,9 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) {
JEMALLOC_ALWAYS_INLINE_C void
ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
- witness_assert_lockless(tsd_tsdn(tsd));
+ if (*tsd_reentrancy_levelp_get(tsd) == 0) {
+ witness_assert_lockless(tsd_tsdn(tsd));
+ }
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
@@ -2021,7 +2065,9 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
JEMALLOC_ALWAYS_INLINE_C void
isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
- witness_assert_lockless(tsd_tsdn(tsd));
+ if (*tsd_reentrancy_levelp_get(tsd) == 0) {
+ witness_assert_lockless(tsd_tsdn(tsd));
+ }
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
@@ -2056,7 +2102,11 @@ je_realloc(void *ptr, size_t size) {
/* realloc(ptr, 0) is equivalent to free(ptr). */
UTRACE(ptr, 0, 0);
tsd = tsd_fetch();
- ifree(tsd, ptr, tcache_get(tsd), true);
+ tcache_t *tcache = NULL;
+ if (likely(*tsd_reentrancy_levelp_get(tsd) == 0)) {
+ tcache = tcache_get(tsd);
+ }
+ ifree(tsd, ptr, tcache, true);
return NULL;
}
size = 1;
@@ -2111,13 +2161,21 @@ je_free(void *ptr) {
UTRACE(ptr, 0, 0);
if (likely(ptr != NULL)) {
tsd_t *tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
+ if (*tsd_reentrancy_levelp_get(tsd) == 0) {
+ witness_assert_lockless(tsd_tsdn(tsd));
+ }
+ tcache_t *tcache = NULL;
+ if (likely(*tsd_reentrancy_levelp_get(tsd) == 0)) {
+ tcache = tcache_get(tsd);
+ }
if (likely(!malloc_slow)) {
- ifree(tsd, ptr, tcache_get(tsd), false);
+ ifree(tsd, ptr, tcache, false);
} else {
- ifree(tsd, ptr, tcache_get(tsd), true);
+ ifree(tsd, ptr, tcache, true);
+ }
+ if (*tsd_reentrancy_levelp_get(tsd) == 0) {
+ witness_assert_lockless(tsd_tsdn(tsd));
}
- witness_assert_lockless(tsd_tsdn(tsd));
}
}
@@ -2599,13 +2657,19 @@ je_dallocx(void *ptr, int flags) {
tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
+ /* Not allowed to be reentrant and specify a custom tcache. */
+ assert(*tsd_reentrancy_levelp_get(tsd) == 0);
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
tcache = NULL;
} else {
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
}
} else {
- tcache = tcache_get(tsd);
+ if (likely(*tsd_reentrancy_levelp_get(tsd) == 0)) {
+ tcache = tcache_get(tsd);
+ } else {
+ tcache = NULL;
+ }
}
UTRACE(ptr, 0, 0);
@@ -2646,13 +2710,19 @@ je_sdallocx(void *ptr, size_t size, int flags) {
witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
+ /* Not allowed to be reentrant and specify a custom tcache. */
+ assert(*tsd_reentrancy_levelp_get(tsd) == 0);
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
tcache = NULL;
} else {
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
}
} else {
- tcache = tcache_get(tsd);
+ if (likely(*tsd_reentrancy_levelp_get(tsd) == 0)) {
+ tcache = tcache_get(tsd);
+ } else {
+ tcache = NULL;
+ }
}
UTRACE(ptr, 0, 0);
diff --git a/test/src/test.c b/test/src/test.c
index fe6dc60..01a4d73 100644
--- a/test/src/test.c
+++ b/test/src/test.c
@@ -10,31 +10,56 @@ static const char * test_name = "";
/* Reentrancy testing helpers. */
#define NUM_REENTRANT_ALLOCS 20
-static bool reentrant = false;
-static bool hook_ran = false;
-static void *to_free[NUM_REENTRANT_ALLOCS];
+typedef enum {
+ non_reentrant = 0,
+ libc_reentrant = 1,
+ arena_new_reentrant = 2
+} reentrancy_t;
+static reentrancy_t reentrancy;
+
+static bool libc_hook_ran = false;
+static bool arena_new_hook_ran = false;
+
+static const char *
+reentrancy_t_str(reentrancy_t r) {
+ switch (r) {
+ case non_reentrant:
+ return "non-reentrant";
+ case libc_reentrant:
+ return "libc-reentrant";
+ case arena_new_reentrant:
+ return "arena_new-reentrant";
+ default:
+ unreachable();
+ }
+}
static void
-reentrancy_hook() {
- hook_ran = true;
- hooks_libc_hook = NULL;
+do_hook(bool *hook_ran, void (**hook)()) {
+ *hook_ran = true;
+ *hook = NULL;
- void *to_free_local[NUM_REENTRANT_ALLOCS];
size_t alloc_size = 1;
for (int i = 0; i < NUM_REENTRANT_ALLOCS; i++) {
- to_free[i] = malloc(alloc_size);
- to_free_local[i] = malloc(alloc_size);
+ free(malloc(alloc_size));
alloc_size *= 2;
}
- for (int i = 0; i < NUM_REENTRANT_ALLOCS; i++) {
- free(to_free_local[i]);
- }
+}
+
+static void
+libc_reentrancy_hook() {
+ do_hook(&libc_hook_ran, &hooks_libc_hook);
+}
+
+static void
+arena_new_reentrancy_hook() {
+ do_hook(&arena_new_hook_ran, &hooks_arena_new_hook);
}
/* Actual test infrastructure. */
bool
test_is_reentrant() {
- return reentrant;
+ return reentrancy != non_reentrant;
}
JEMALLOC_FORMAT_PRINTF(1, 2)
@@ -81,9 +106,8 @@ p_test_init(const char *name) {
void
p_test_fini(void) {
test_counts[test_status]++;
- malloc_printf("%s: %s (%s)\n", test_name,
- test_status_string(test_status),
- reentrant ? "reentrant" : "non-reentrant");
+ malloc_printf("%s (%s): %s\n", test_name, reentrancy_t_str(reentrancy),
+ test_status_string(test_status));
}
static test_status_t
@@ -106,24 +130,28 @@ p_test_impl(bool do_malloc_init, bool do_reentrant, test_t *t, va_list ap) {
ret = test_status_pass;
for (; t != NULL; t = va_arg(ap, test_t *)) {
/* Non-reentrant run. */
- reentrant = false;
+ reentrancy = non_reentrant;
+ hooks_arena_new_hook = hooks_libc_hook = NULL;
t();
if (test_status > ret) {
ret = test_status;
}
/* Reentrant run. */
if (do_reentrant) {
- reentrant = true;
- hooks_libc_hook = &reentrancy_hook;
+ reentrancy = libc_reentrant;
+ hooks_arena_new_hook = NULL;
+ hooks_libc_hook = &libc_reentrancy_hook;
t();
if (test_status > ret) {
ret = test_status;
}
- if (hook_ran) {
- hook_ran = false;
- for (int i = 0; i < NUM_REENTRANT_ALLOCS; i++) {
- free(to_free[i]);
- }
+
+ reentrancy = arena_new_reentrant;
+ hooks_libc_hook = NULL;
+ hooks_arena_new_hook = &arena_new_reentrancy_hook;
+ t();
+ if (test_status > ret) {
+ ret = test_status;
}
}
}
diff --git a/test/stress/microbench.c b/test/stress/microbench.c
index 6ed1500..73cbcab 100644
--- a/test/stress/microbench.c
+++ b/test/stress/microbench.c
@@ -156,7 +156,7 @@ TEST_END
int
main(void) {
- return test(
+ return test_no_reentrancy(
test_malloc_vs_mallocx,
test_free_vs_dallocx,
test_dallocx_vs_sdallocx,
diff --git a/test/unit/stats.c b/test/unit/stats.c
index f8c6b10..1619f5b 100644
--- a/test/unit/stats.c
+++ b/test/unit/stats.c
@@ -351,7 +351,7 @@ TEST_END
int
main(void) {
- return test(
+ return test_no_reentrancy(
test_stats_summary,
test_stats_large,
test_stats_arenas_summary,
diff --git a/test/unit/tsd.c b/test/unit/tsd.c
index 4a0f318..3811467 100644
--- a/test/unit/tsd.c
+++ b/test/unit/tsd.c
@@ -79,7 +79,6 @@ thd_start(void *arg) {
}
TEST_BEGIN(test_tsd_main_thread) {
- test_skip_if(test_is_reentrant());
thd_start((void *)(uintptr_t)0xa5f3e329);
}
TEST_END
@@ -144,7 +143,7 @@ main(void) {
data_tsd_boot();
data_test_started = true;
- return test(
+ return test_no_reentrancy(
test_tsd_main_thread,
test_tsd_sub_thread,
test_tsd_reincarnation);