summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorQi Wang <interwq@gwu.edu>2017-06-07 22:49:09 (GMT)
committerQi Wang <interwq@gmail.com>2017-06-08 17:02:18 (GMT)
commit73713fbb27cd1cf6754259b19a960e91a16c3638 (patch)
tree359eef242566b73864f8df223f3a0cfc1af238a5 /src
parent00869e39a334f3d869dfb9f8e651c2de3dded76f (diff)
downloadjemalloc-73713fbb27cd1cf6754259b19a960e91a16c3638.zip
jemalloc-73713fbb27cd1cf6754259b19a960e91a16c3638.tar.gz
jemalloc-73713fbb27cd1cf6754259b19a960e91a16c3638.tar.bz2
Drop high rank locks when creating threads.
Avoid holding arenas_lock and background_thread_lock when creating background threads, because pthread_create may take internal locks, and potentially cause deadlock with jemalloc internal locks.
Diffstat (limited to 'src')
-rw-r--r--src/arena.c11
-rw-r--r--src/background_thread.c5
-rw-r--r--src/ctl.c3
-rw-r--r--src/jemalloc.c36
4 files changed, 42 insertions, 13 deletions
diff --git a/src/arena.c b/src/arena.c
index dedbb3e..0912df3 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -2050,17 +2050,6 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
hooks_arena_new_hook();
}
post_reentrancy(tsdn_tsd(tsdn));
-
- /* background_thread_create() handles reentrancy internally. */
- if (have_background_thread) {
- bool err;
- malloc_mutex_lock(tsdn, &background_thread_lock);
- err = background_thread_create(tsdn_tsd(tsdn), ind);
- malloc_mutex_unlock(tsdn, &background_thread_lock);
- if (err) {
- goto label_error;
- }
- }
}
return arena;
diff --git a/src/background_thread.c b/src/background_thread.c
index 64eba1a..50812c3 100644
--- a/src/background_thread.c
+++ b/src/background_thread.c
@@ -352,12 +352,15 @@ background_thread_create(tsd_t *tsd, unsigned arena_ind) {
}
pre_reentrancy(tsd);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
/*
* To avoid complications (besides reentrancy), create internal
- * background threads with the underlying pthread_create.
+ * background threads with the underlying pthread_create, and drop
+ * background_thread_lock (pthread_create may take internal locks).
*/
int err = pthread_create_wrapper(&info->thread, NULL,
background_thread_entry, (void *)thread_ind);
+ malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
post_reentrancy(tsd);
if (err != 0) {
diff --git a/src/ctl.c b/src/ctl.c
index 2c3f994..134dbac 100644
--- a/src/ctl.c
+++ b/src/ctl.c
@@ -1501,6 +1501,7 @@ background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
}
background_thread_ctl_init(tsd_tsdn(tsd));
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
if (newp == NULL) {
oldval = background_thread_enabled();
@@ -1535,6 +1536,8 @@ background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = 0;
label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+
return ret;
}
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 9a5685b..5a0baf8 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -70,7 +70,7 @@ unsigned opt_narenas = 0;
unsigned ncpus;
/* Protects arenas initialization. */
-static malloc_mutex_t arenas_lock;
+malloc_mutex_t arenas_lock;
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
@@ -335,6 +335,25 @@ arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
return arena;
}
+static void
+arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
+ if (ind == 0) {
+ return;
+ }
+ /* background_thread_create() handles reentrancy internally. */
+ if (have_background_thread) {
+ bool err;
+ malloc_mutex_lock(tsdn, &background_thread_lock);
+ err = background_thread_create(tsdn_tsd(tsdn), ind);
+ malloc_mutex_unlock(tsdn, &background_thread_lock);
+ if (err) {
+ malloc_printf("<jemalloc>: error in background thread "
+ "creation for arena %u. Abort.\n", ind);
+ abort();
+ }
+ }
+}
+
arena_t *
arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
arena_t *arena;
@@ -342,6 +361,9 @@ arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
malloc_mutex_lock(tsdn, &arenas_lock);
arena = arena_init_locked(tsdn, ind, extent_hooks);
malloc_mutex_unlock(tsdn, &arenas_lock);
+
+ arena_new_create_background_thread(tsdn, ind);
+
return arena;
}
@@ -475,6 +497,7 @@ arena_choose_hard(tsd_t *tsd, bool internal) {
if (narenas_auto > 1) {
unsigned i, j, choose[2], first_null;
+ bool is_new_arena[2];
/*
* Determine binding for both non-internal and internal
@@ -486,6 +509,7 @@ arena_choose_hard(tsd_t *tsd, bool internal) {
for (j = 0; j < 2; j++) {
choose[j] = 0;
+ is_new_arena[j] = false;
}
first_null = narenas_auto;
@@ -545,6 +569,7 @@ arena_choose_hard(tsd_t *tsd, bool internal) {
&arenas_lock);
return NULL;
}
+ is_new_arena[j] = true;
if (!!j == internal) {
ret = arena;
}
@@ -552,6 +577,15 @@ arena_choose_hard(tsd_t *tsd, bool internal) {
arena_bind(tsd, choose[j], !!j);
}
malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
+
+ for (j = 0; j < 2; j++) {
+ if (is_new_arena[j]) {
+ assert(choose[j] > 0);
+ arena_new_create_background_thread(
+ tsd_tsdn(tsd), choose[j]);
+ }
+ }
+
} else {
ret = arena_get(tsd_tsdn(tsd), 0, false);
arena_bind(tsd, 0, false);