summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Goldblatt <davidgoldblatt@fb.com>2018-05-15 21:15:43 (GMT)
committerDavid Goldblatt <davidtgoldblatt@gmail.com>2018-05-18 18:43:03 (GMT)
commita7f749c9af0d5ca51b5b5eaf35c2c2913d8a77e1 (patch)
treeed1f92900318575e0649e38e6104c29d4fc77df0
parent0379235f47585ac8f583ba85aab9d294abfa44b5 (diff)
downloadjemalloc-a7f749c9af0d5ca51b5b5eaf35c2c2913d8a77e1.zip
jemalloc-a7f749c9af0d5ca51b5b5eaf35c2c2913d8a77e1.tar.gz
jemalloc-a7f749c9af0d5ca51b5b5eaf35c2c2913d8a77e1.tar.bz2
Hooks: Protect against reentrancy.
Previously, we made the user deal with this themselves, but that's not good enough; if hooks may allocate, we should test the allocation pathways down hooks. If we're doing that, we might as well actually implement the protection for the user.
-rw-r--r--include/jemalloc/internal/hook.h6
-rw-r--r--include/jemalloc/internal/tsd.h2
-rw-r--r--src/hook.c68
-rw-r--r--test/unit/hook.c42
4 files changed, 106 insertions, 12 deletions
diff --git a/include/jemalloc/internal/hook.h b/include/jemalloc/internal/hook.h
index 9ea9c6f..ee246b1 100644
--- a/include/jemalloc/internal/hook.h
+++ b/include/jemalloc/internal/hook.h
@@ -25,9 +25,9 @@
* and only calls the alloc hook).
*
* Reentrancy:
- * Is not protected against. If your hooks allocate, then the hooks will be
- * called again. Note that you can guard against this with a thread-local
- * "in_hook" bool.
+ * Reentrancy is guarded against from within the hook implementation. If you
+ * call allocator functions from within a hook, the hooks will not be invoked
+ * again.
* Threading:
* The installation of a hook synchronizes with all its uses. If you can
* prove the installation of a hook happens-before a jemalloc entry point,
diff --git a/include/jemalloc/internal/tsd.h b/include/jemalloc/internal/tsd.h
index 845a3f0..3097ce0 100644
--- a/include/jemalloc/internal/tsd.h
+++ b/include/jemalloc/internal/tsd.h
@@ -66,6 +66,7 @@ typedef ql_elm(tsd_t) tsd_link_t;
#define MALLOC_TSD \
O(tcache_enabled, bool, bool) \
O(arenas_tdata_bypass, bool, bool) \
+ O(in_hook, bool, bool) \
O(reentrancy_level, int8_t, int8_t) \
O(narenas_tdata, uint32_t, uint32_t) \
O(offset_state, uint64_t, uint64_t) \
@@ -85,6 +86,7 @@ typedef ql_elm(tsd_t) tsd_link_t;
ATOMIC_INIT(tsd_state_uninitialized), \
TCACHE_ENABLED_ZERO_INITIALIZER, \
false, \
+ false, \
0, \
0, \
0, \
diff --git a/src/hook.c b/src/hook.c
index 24afe99..f66d423 100644
--- a/src/hook.c
+++ b/src/hook.c
@@ -99,12 +99,62 @@ for (int for_each_hook_counter = 0; \
#define FOR_EACH_HOOK_END \
}
+static bool *
+hook_reentrantp() {
+ /*
+ * We prevent user reentrancy within hooks. This is basically just a
+ * thread-local bool that triggers an early-exit.
+ *
+ * We don't fold in_hook into reentrancy. There are two reasons for
+ * this:
+ * - Right now, we turn on reentrancy during things like extent hook
+ * execution. Allocating during extent hooks is not officially
+ * supported, but we don't want to break it for the time being. These
+ * sorts of allocations should probably still be hooked, though.
+ * - If a hook allocates, we may want it to be relatively fast (after
+ * all, it executes on every allocator operation). Turning on
+ * reentrancy is a fairly heavyweight mode (disabling tcache,
+ * redirecting to arena 0, etc.). It's possible we may one day want
+ * to turn on reentrant mode here, if it proves too difficult to keep
+ * this working. But that's fairly easy for us to see; OTOH, people
+ * not using hooks because they're too slow is easy for us to miss.
+ *
+ * The tricky part is
+ * that this code might get invoked even if we don't have access to tsd.
+ * This function mimics getting a pointer to thread-local data, except
+ * that it might secretly return a pointer to some global data if we
+ * know that the caller will take the early-exit path.
+ * If we return a bool that indicates that we are reentrant, then the
+ * caller will go down the early exit path, leaving the global
+ * untouched.
+ */
+ static bool in_hook_global = true;
+ tsdn_t *tsdn = tsdn_fetch();
+ bool *in_hook = tsdn_in_hookp_get(tsdn);
+ if (in_hook != NULL) {
+ return in_hook;
+ }
+ return &in_hook_global;
+}
+
+#define HOOK_PROLOGUE \
+ if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) { \
+ return; \
+ } \
+ bool *in_hook = hook_reentrantp(); \
+ if (*in_hook) { \
+ return; \
+ } \
+ *in_hook = true;
+
+#define HOOK_EPILOGUE \
+ *in_hook = false;
+
void
hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
uintptr_t args_raw[3]) {
- if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) {
- return;
- }
+ HOOK_PROLOGUE
+
hooks_internal_t hook;
FOR_EACH_HOOK_BEGIN(&hook)
hook_alloc h = hook.hooks.alloc_hook;
@@ -112,13 +162,13 @@ hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
h(hook.hooks.extra, type, result, result_raw, args_raw);
}
FOR_EACH_HOOK_END
+
+ HOOK_EPILOGUE
}
void
hook_invoke_dalloc(hook_dalloc_t type, void *address, uintptr_t args_raw[3]) {
- if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) {
- return;
- }
+ HOOK_PROLOGUE
hooks_internal_t hook;
FOR_EACH_HOOK_BEGIN(&hook)
hook_dalloc h = hook.hooks.dalloc_hook;
@@ -126,14 +176,13 @@ hook_invoke_dalloc(hook_dalloc_t type, void *address, uintptr_t args_raw[3]) {
h(hook.hooks.extra, type, address, args_raw);
}
FOR_EACH_HOOK_END
+ HOOK_EPILOGUE
}
void
hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]) {
- if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) {
- return;
- }
+ HOOK_PROLOGUE
hooks_internal_t hook;
FOR_EACH_HOOK_BEGIN(&hook)
hook_expand h = hook.hooks.expand_hook;
@@ -142,4 +191,5 @@ hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
result_raw, args_raw);
}
FOR_EACH_HOOK_END
+ HOOK_EPILOGUE
}
diff --git a/test/unit/hook.c b/test/unit/hook.c
index 3f85ff1..72fcc43 100644
--- a/test/unit/hook.c
+++ b/test/unit/hook.c
@@ -26,6 +26,45 @@ reset_args() {
}
static void
+alloc_free_size(size_t sz) {
+ void *ptr = mallocx(1, 0);
+ free(ptr);
+ ptr = mallocx(1, 0);
+ free(ptr);
+ ptr = mallocx(1, MALLOCX_TCACHE_NONE);
+ dallocx(ptr, MALLOCX_TCACHE_NONE);
+}
+
+/*
+ * We want to support a degree of user reentrancy. This tests a variety of
+ * allocation scenarios.
+ */
+static void
+be_reentrant() {
+ /* Let's make sure the tcache is non-empty if enabled. */
+ alloc_free_size(1);
+ alloc_free_size(1024);
+ alloc_free_size(64 * 1024);
+ alloc_free_size(256 * 1024);
+ alloc_free_size(1024 * 1024);
+
+ /* Some reallocation. */
+ void *ptr = mallocx(129, 0);
+ ptr = rallocx(ptr, 130, 0);
+ free(ptr);
+
+ ptr = mallocx(2 * 1024 * 1024, 0);
+ free(ptr);
+ ptr = mallocx(1 * 1024 * 1024, 0);
+ ptr = rallocx(ptr, 2 * 1024 * 1024, 0);
+ free(ptr);
+
+ ptr = mallocx(1, 0);
+ ptr = rallocx(ptr, 1000, 0);
+ free(ptr);
+}
+
+static void
set_args_raw(uintptr_t *args_raw, int nargs) {
memcpy(arg_args_raw, args_raw, sizeof(uintptr_t) * nargs);
}
@@ -52,6 +91,7 @@ test_alloc_hook(void *extra, hook_alloc_t type, void *result,
arg_result = result;
arg_result_raw = result_raw;
set_args_raw(args_raw, 3);
+ be_reentrant();
}
static void
@@ -62,6 +102,7 @@ test_dalloc_hook(void *extra, hook_dalloc_t type, void *address,
arg_type = (int)type;
arg_address = address;
set_args_raw(args_raw, 3);
+ be_reentrant();
}
static void
@@ -76,6 +117,7 @@ test_expand_hook(void *extra, hook_expand_t type, void *address,
arg_new_usize = new_usize;
arg_result_raw = result_raw;
set_args_raw(args_raw, 4);
+ be_reentrant();
}
TEST_BEGIN(test_hooks_basic) {