diff options
author | David Goldblatt <davidgoldblatt@fb.com> | 2017-04-01 02:59:45 (GMT) |
---|---|---|
committer | David Goldblatt <davidtgoldblatt@gmail.com> | 2017-04-07 21:10:27 (GMT) |
commit | b407a65401bca5828760c8fd5e940e91475a2b3e (patch) | |
tree | a1bbd2e2fccd51153f126a228a07491259b44730 /src | |
parent | 0a0fcd3e6a0816f0a56fa852416d0ece861c0abb (diff) | |
download | jemalloc-b407a65401bca5828760c8fd5e940e91475a2b3e.zip jemalloc-b407a65401bca5828760c8fd5e940e91475a2b3e.tar.gz jemalloc-b407a65401bca5828760c8fd5e940e91475a2b3e.tar.bz2 |
Add basic reentrancy-checking support, and allow arena_new to reenter.
This checks whether or not we're reentrant using thread-local data, and, if we
are, moves certain internal allocations to use arena 0 (which should be properly
initialized after bootstrapping).
The immediate thing this allows is spinning up threads in arena_new, which will
enable spinning up background threads there.
Diffstat (limited to 'src')
-rw-r--r-- | src/arena.c | 13 | ||||
-rw-r--r-- | src/jemalloc.c | 94 |
2 files changed, 95 insertions, 12 deletions
diff --git a/src/arena.c b/src/arena.c index b78719e..19069bb 100644 --- a/src/arena.c +++ b/src/arena.c @@ -1952,6 +1952,19 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { arena->base = base; + /* We don't support reetrancy for arena 0 bootstrapping. */ + if (ind != 0 && hooks_arena_new_hook) { + /* + * If we're here, then arena 0 already exists, so bootstrapping + * is done enough that we should have tsd. + */ + int *reentrancy_level = tsd_reentrancy_levelp_get(tsdn_tsd( + tsdn)); + ++*reentrancy_level; + hooks_arena_new_hook(); + --*reentrancy_level; + } + return arena; label_error: if (ind != 0) { diff --git a/src/jemalloc.c b/src/jemalloc.c index 9d66f7f..7b205ff 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -1656,6 +1656,14 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts) { szind_t ind = 0; size_t usize = 0; + /* + * For reentrancy checking, we get the old reentrancy level from tsd and + * reset it once we're done. In case of early bailout though, we never + * bother getting the old level, so we shouldn't try to reset it. This + * is indicated by leaving the pointer as NULL. + */ + int *reentrancy_level = NULL; + /* Initialize (if we can't prove we don't have to). */ if (sopts->slow) { if (unlikely(malloc_init())) { @@ -1708,7 +1716,27 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts) { * some reason. Let's grab it right away. */ tsd = tsd_fetch(); - witness_assert_lockless(tsd_tsdn(tsd)); + + /* + * If we need to handle reentrancy, we can do it out of a + * known-initialized arena (i.e. arena 0). + */ + reentrancy_level = tsd_reentrancy_levelp_get(tsd); + ++*reentrancy_level; + if (*reentrancy_level == 1) { + witness_assert_lockless(tsd_tsdn(tsd)); + } + if (unlikely(*reentrancy_level > 1)) { + /* + * We should never specify particular arenas or tcaches from + * within our internal allocations. + */ + assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC); + assert(dopts->arena_ind = ARENA_IND_AUTOMATIC); + dopts->tcache_ind = TCACHE_IND_NONE; + /* We know that arena 0 has already been initialized. */ + dopts->arena_ind = 0; + } /* If profiling is on, get our profiling context. */ if (config_prof && opt_prof) { @@ -1769,9 +1797,15 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts) { UTRACE(0, size, allocation); } - witness_assert_lockless(tsd_tsdn(tsd)); - /* Success! */ + if (*reentrancy_level == 1) { + witness_assert_lockless(tsd_tsdn(tsd)); + } + /* + * If we got here, we never bailed out on a failure path, so + * reentrancy_level is non-null. + */ + --*reentrancy_level; *dopts->result = allocation; return 0; @@ -1795,6 +1829,10 @@ label_oom: *dopts->result = NULL; } + if (reentrancy_level != NULL) { + --*reentrancy_level; + } + return ENOMEM; /* @@ -1822,6 +1860,10 @@ label_invalid_alignment: *dopts->result = NULL; } + if (reentrancy_level != NULL) { + --*reentrancy_level; + } + return EINVAL; } @@ -1996,7 +2038,9 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) { JEMALLOC_ALWAYS_INLINE_C void ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { - witness_assert_lockless(tsd_tsdn(tsd)); + if (*tsd_reentrancy_levelp_get(tsd) == 0) { + witness_assert_lockless(tsd_tsdn(tsd)); + } assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); @@ -2021,7 +2065,9 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { JEMALLOC_ALWAYS_INLINE_C void isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { - witness_assert_lockless(tsd_tsdn(tsd)); + if (*tsd_reentrancy_levelp_get(tsd) == 0) { + witness_assert_lockless(tsd_tsdn(tsd)); + } assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); @@ -2056,7 +2102,11 @@ je_realloc(void *ptr, size_t size) { /* realloc(ptr, 0) is equivalent to free(ptr). */ UTRACE(ptr, 0, 0); tsd = tsd_fetch(); - ifree(tsd, ptr, tcache_get(tsd), true); + tcache_t *tcache = NULL; + if (likely(*tsd_reentrancy_levelp_get(tsd) == 0)) { + tcache = tcache_get(tsd); + } + ifree(tsd, ptr, tcache, true); return NULL; } size = 1; @@ -2111,13 +2161,21 @@ je_free(void *ptr) { UTRACE(ptr, 0, 0); if (likely(ptr != NULL)) { tsd_t *tsd = tsd_fetch(); - witness_assert_lockless(tsd_tsdn(tsd)); + if (*tsd_reentrancy_levelp_get(tsd) == 0) { + witness_assert_lockless(tsd_tsdn(tsd)); + } + tcache_t *tcache = NULL; + if (likely(*tsd_reentrancy_levelp_get(tsd) == 0)) { + tcache = tcache_get(tsd); + } if (likely(!malloc_slow)) { - ifree(tsd, ptr, tcache_get(tsd), false); + ifree(tsd, ptr, tcache, false); } else { - ifree(tsd, ptr, tcache_get(tsd), true); + ifree(tsd, ptr, tcache, true); + } + if (*tsd_reentrancy_levelp_get(tsd) == 0) { + witness_assert_lockless(tsd_tsdn(tsd)); } - witness_assert_lockless(tsd_tsdn(tsd)); } } @@ -2599,13 +2657,19 @@ je_dallocx(void *ptr, int flags) { tsd = tsd_fetch(); witness_assert_lockless(tsd_tsdn(tsd)); if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { + /* Not allowed to be reentrant and specify a custom tcache. */ + assert(*tsd_reentrancy_levelp_get(tsd) == 0); if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { tcache = NULL; } else { tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); } } else { - tcache = tcache_get(tsd); + if (likely(*tsd_reentrancy_levelp_get(tsd) == 0)) { + tcache = tcache_get(tsd); + } else { + tcache = NULL; + } } UTRACE(ptr, 0, 0); @@ -2646,13 +2710,19 @@ je_sdallocx(void *ptr, size_t size, int flags) { witness_assert_lockless(tsd_tsdn(tsd)); if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { + /* Not allowed to be reentrant and specify a custom tcache. */ + assert(*tsd_reentrancy_levelp_get(tsd) == 0); if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { tcache = NULL; } else { tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); } } else { - tcache = tcache_get(tsd); + if (likely(*tsd_reentrancy_levelp_get(tsd) == 0)) { + tcache = tcache_get(tsd); + } else { + tcache = NULL; + } } UTRACE(ptr, 0, 0); |