diff options
author | Qi Wang <interwq@gwu.edu> | 2017-06-07 04:44:39 (GMT) |
---|---|---|
committer | Qi Wang <interwq@gmail.com> | 2017-06-07 18:03:49 (GMT) |
commit | 00869e39a334f3d869dfb9f8e651c2de3dded76f (patch) | |
tree | 41502cb2f1679d9e0e1132fb2c92eec1f2b5a7b3 | |
parent | 29c2577ee0bfa57009a5827bd44cab04b738a914 (diff) | |
download | jemalloc-00869e39a334f3d869dfb9f8e651c2de3dded76f.zip jemalloc-00869e39a334f3d869dfb9f8e651c2de3dded76f.tar.gz jemalloc-00869e39a334f3d869dfb9f8e651c2de3dded76f.tar.bz2 |
Make tsd no-cleanup during tsd reincarnation.
Since tsd cleanup isn't guaranteed when reincarnated, we set up tsd in a way
that needs no cleanup, by making it going through slow path instead.
-rw-r--r-- | include/jemalloc/internal/tsd.h | 2 | ||||
-rw-r--r-- | src/jemalloc.c | 3 | ||||
-rw-r--r-- | src/tsd.c | 66 | ||||
-rw-r--r-- | test/unit/tsd.c | 4 |
4 files changed, 51 insertions, 24 deletions
diff --git a/include/jemalloc/internal/tsd.h b/include/jemalloc/internal/tsd.h index f304e1d..4efaf4e 100644 --- a/include/jemalloc/internal/tsd.h +++ b/include/jemalloc/internal/tsd.h @@ -154,7 +154,6 @@ void malloc_tsd_dalloc(void *wrapper); void malloc_tsd_cleanup_register(bool (*f)(void)); tsd_t *malloc_tsd_boot0(void); void malloc_tsd_boot1(void); -bool tsd_data_init(void *arg); void tsd_cleanup(void *arg); tsd_t *tsd_fetch_slow(tsd_t *tsd); void tsd_slow_update(tsd_t *tsd); @@ -228,6 +227,7 @@ MALLOC_TSD #define O(n, t, nt) \ JEMALLOC_ALWAYS_INLINE void \ tsd_##n##_set(tsd_t *tsd, t val) { \ + assert(tsd->state != tsd_state_reincarnated); \ *tsd_##n##p_get(tsd) = val; \ } MALLOC_TSD diff --git a/src/jemalloc.c b/src/jemalloc.c index 7e695d6..9a5685b 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -1764,7 +1764,8 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { * We should never specify particular arenas or tcaches from * within our internal allocations. */ - assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC); + assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC || + dopts->tcache_ind == TCACHE_IND_NONE); assert(dopts->arena_ind = ARENA_IND_AUTOMATIC); dopts->tcache_ind = TCACHE_IND_NONE; /* We know that arena 0 has already been initialized. */ @@ -63,6 +63,45 @@ tsd_slow_update(tsd_t *tsd) { } } +static bool +tsd_data_init(tsd_t *tsd) { + /* + * We initialize the rtree context first (before the tcache), since the + * tcache initialization depends on it. + */ + rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd)); + + return tsd_tcache_enabled_data_init(tsd); +} + +static void +assert_tsd_data_cleanup_done(tsd_t *tsd) { + assert(!tsd_nominal(tsd)); + assert(*tsd_arenap_get_unsafe(tsd) == NULL); + assert(*tsd_iarenap_get_unsafe(tsd) == NULL); + assert(*tsd_arenas_tdata_bypassp_get_unsafe(tsd) == true); + assert(*tsd_arenas_tdatap_get_unsafe(tsd) == NULL); + assert(*tsd_tcache_enabledp_get_unsafe(tsd) == false); + assert(*tsd_prof_tdatap_get_unsafe(tsd) == NULL); +} + +static bool +tsd_data_init_nocleanup(tsd_t *tsd) { + assert(tsd->state == tsd_state_reincarnated); + /* + * During reincarnation, there is no guarantee that the cleanup function + * will be called (deallocation may happen after all tsd destructors). + * We set up tsd in a way that no cleanup is needed. + */ + rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd)); + *tsd_arenas_tdata_bypassp_get(tsd) = true; + *tsd_tcache_enabledp_get_unsafe(tsd) = false; + *tsd_reentrancy_levelp_get(tsd) = 1; + assert_tsd_data_cleanup_done(tsd); + + return false; +} + tsd_t * tsd_fetch_slow(tsd_t *tsd) { if (tsd->state == tsd_state_nominal_slow) { @@ -79,7 +118,7 @@ tsd_fetch_slow(tsd_t *tsd) { } else if (tsd->state == tsd_state_purgatory) { tsd->state = tsd_state_reincarnated; tsd_set(tsd); - tsd_data_init(tsd); + tsd_data_init_nocleanup(tsd); } else { assert(tsd->state == tsd_state_reincarnated); } @@ -131,21 +170,6 @@ malloc_tsd_cleanup_register(bool (*f)(void)) { ncleanups++; } -bool -tsd_data_init(void *arg) { - tsd_t *tsd = (tsd_t *)arg; - /* - * We initialize the rtree context first (before the tcache), since the - * tcache initialization depends on it. - */ - rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd)); - - if (tsd_tcache_enabled_data_init(tsd)) { - return true; - } - return false; -} - static void tsd_do_data_cleanup(tsd_t *tsd) { prof_tdata_cleanup(tsd); @@ -164,14 +188,16 @@ tsd_cleanup(void *arg) { case tsd_state_uninitialized: /* Do nothing. */ break; - case tsd_state_nominal: - case tsd_state_nominal_slow: case tsd_state_reincarnated: /* * Reincarnated means another destructor deallocated memory - * after this destructor was called. Reset state to - * tsd_state_purgatory and request another callback. + * after the destructor was called. Cleanup isn't required but + * is still called for testing and completeness. */ + assert_tsd_data_cleanup_done(tsd); + /* Fall through. */ + case tsd_state_nominal: + case tsd_state_nominal_slow: tsd_do_data_cleanup(tsd); tsd->state = tsd_state_purgatory; tsd_set(tsd); diff --git a/test/unit/tsd.c b/test/unit/tsd.c index c9a7d80..6c47913 100644 --- a/test/unit/tsd.c +++ b/test/unit/tsd.c @@ -106,8 +106,8 @@ thd_start_reincarnated(void *arg) { "TSD state should be reincarnated\n"); p = mallocx(1, MALLOCX_TCACHE_NONE); assert_ptr_not_null(p, "Unexpected malloc() failure"); - assert_ptr_not_null(*tsd_arenap_get_unsafe(tsd), - "Should have tsd arena set after reincarnation."); + assert_ptr_null(*tsd_arenap_get_unsafe(tsd), + "Should not have tsd arena set after reincarnation."); free(p); tsd_cleanup((void *)tsd); |