summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorQi Wang <interwq@gwu.edu>2017-05-31 22:21:10 (GMT)
committerQi Wang <interwq@gmail.com>2017-06-02 19:59:21 (GMT)
commit530c07a45ba3ea744b280c9df5d94165839f7b09 (patch)
tree626f2c4901169e416533a3d0b9a3ab64e2f4e006
parent340071f0cf6902a79102328960f5cf1ced87f3c2 (diff)
downloadjemalloc-530c07a45ba3ea744b280c9df5d94165839f7b09.zip
jemalloc-530c07a45ba3ea744b280c9df5d94165839f7b09.tar.gz
jemalloc-530c07a45ba3ea744b280c9df5d94165839f7b09.tar.bz2
Set reentrancy level to 1 during init.
This makes sure we go down slow path w/ a0 in init.
-rw-r--r--src/jemalloc.c43
1 files changed, 28 insertions, 15 deletions
diff --git a/src/jemalloc.c b/src/jemalloc.c
index c3983a5..7e695d6 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -1397,6 +1397,18 @@ malloc_init_hard_finish(void) {
return false;
}
+static void
+malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) {
+ malloc_mutex_assert_owner(tsdn, &init_lock);
+ malloc_mutex_unlock(tsdn, &init_lock);
+ if (reentrancy_set) {
+ assert(!tsdn_null(tsdn));
+ tsd_t *tsd = tsdn_tsd(tsdn);
+ assert(tsd_reentrancy_level_get(tsd) > 0);
+ post_reentrancy(tsd);
+ }
+}
+
static bool
malloc_init_hard(void) {
tsd_t *tsd;
@@ -1405,15 +1417,18 @@ malloc_init_hard(void) {
_init_init_lock();
#endif
malloc_mutex_lock(TSDN_NULL, &init_lock);
+
+#define UNLOCK_RETURN(tsdn, ret, reentrancy) \
+ malloc_init_hard_cleanup(tsdn, reentrancy); \
+ return ret;
+
if (!malloc_init_hard_needed()) {
- malloc_mutex_unlock(TSDN_NULL, &init_lock);
- return false;
+ UNLOCK_RETURN(TSDN_NULL, false, false)
}
if (malloc_init_state != malloc_init_a0_initialized &&
malloc_init_hard_a0_locked()) {
- malloc_mutex_unlock(TSDN_NULL, &init_lock);
- return true;
+ UNLOCK_RETURN(TSDN_NULL, true, false)
}
malloc_mutex_unlock(TSDN_NULL, &init_lock);
@@ -1425,29 +1440,27 @@ malloc_init_hard(void) {
if (malloc_init_hard_recursible()) {
return true;
}
- malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
+ malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
+ /* Set reentrancy level to 1 during init. */
+ pre_reentrancy(tsd);
/* Initialize narenas before prof_boot2 (for allocation). */
if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) {
- malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
- return true;
+ UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
}
-
if (config_prof && prof_boot2(tsd)) {
- malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
- return true;
+ UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
}
malloc_init_percpu();
if (malloc_init_hard_finish()) {
- malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
- return true;
+ UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
}
-
+ post_reentrancy(tsd);
malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
- malloc_tsd_boot1();
+ malloc_tsd_boot1();
/* Update TSD after tsd_boot1. */
tsd = tsd_fetch();
if (opt_background_thread) {
@@ -1463,7 +1476,7 @@ malloc_init_hard(void) {
return true;
}
}
-
+#undef UNLOCK_RETURN
return false;
}