summaryrefslogtreecommitdiffstats
path: root/src/jemalloc.c
diff options
context:
space:
mode:
authorJason Evans <je@fb.com>2012-10-09 21:46:22 (GMT)
committerJason Evans <je@fb.com>2012-10-09 22:21:46 (GMT)
commit20f1fc95adb35ea63dc61f47f2b0ffbd37d39f32 (patch)
tree9c61145b466c8f413b4f98247f17d8509e6ed8ea /src/jemalloc.c
parent7de92767c20cb72c94609b9c78985526fb84a679 (diff)
downloadjemalloc-20f1fc95adb35ea63dc61f47f2b0ffbd37d39f32.zip
jemalloc-20f1fc95adb35ea63dc61f47f2b0ffbd37d39f32.tar.gz
jemalloc-20f1fc95adb35ea63dc61f47f2b0ffbd37d39f32.tar.bz2
Fix fork(2)-related deadlocks.
Add a library constructor for jemalloc that initializes the allocator. This fixes a race that could occur if threads were created by the main thread prior to any memory allocation, followed by fork(2), and then memory allocation in the child process. Fix the prefork/postfork functions to acquire/release the ctl, prof, and rtree mutexes. This fixes various fork() child process deadlocks, but one possible deadlock remains (intentionally) unaddressed: prof backtracing can acquire runtime library mutexes, so deadlock is still possible if heap profiling is enabled during fork(). This deadlock is known to be a real issue in at least the case of libgcc-based backtracing. Reported by tfengjun.
Diffstat (limited to 'src/jemalloc.c')
-rw-r--r--src/jemalloc.c33
1 files changed, 30 insertions, 3 deletions
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 7fa0744..4ea1f75 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -1614,6 +1614,27 @@ je_nallocm(size_t *rsize, size_t size, int flags)
* malloc during fork().
*/
+/*
+ * If an application creates a thread before doing any allocation in the main
+ * thread, then calls fork(2) in the main thread followed by memory allocation
+ * in the child process, a race can occur that results in deadlock within the
+ * child: the main thread may have forked while the created thread had
+ * partially initialized the allocator. Ordinarily jemalloc prevents
+ * fork/malloc races via the following functions it registers during
+ * initialization using pthread_atfork(), but of course that does no good if
+ * the allocator isn't fully initialized at fork time. The following library
+ * constructor is a partial solution to this problem. It may still possible to
+ * trigger the deadlock described above, but doing so would involve forking via
+ * a library constructor that runs before jemalloc's runs.
+ */
+JEMALLOC_ATTR(constructor)
+static void
+jemalloc_constructor(void)
+{
+
+ malloc_init();
+}
+
#ifndef JEMALLOC_MUTEX_INIT_CB
void
jemalloc_prefork(void)
@@ -1631,14 +1652,16 @@ _malloc_prefork(void)
assert(malloc_initialized);
/* Acquire all mutexes in a safe order. */
+ ctl_prefork();
malloc_mutex_prefork(&arenas_lock);
for (i = 0; i < narenas; i++) {
if (arenas[i] != NULL)
arena_prefork(arenas[i]);
}
+ prof_prefork();
base_prefork();
huge_prefork();
- chunk_dss_prefork();
+ chunk_prefork();
}
#ifndef JEMALLOC_MUTEX_INIT_CB
@@ -1658,14 +1681,16 @@ _malloc_postfork(void)
assert(malloc_initialized);
/* Release all mutexes, now that fork() has completed. */
- chunk_dss_postfork_parent();
+ chunk_postfork_parent();
huge_postfork_parent();
base_postfork_parent();
+ prof_postfork_parent();
for (i = 0; i < narenas; i++) {
if (arenas[i] != NULL)
arena_postfork_parent(arenas[i]);
}
malloc_mutex_postfork_parent(&arenas_lock);
+ ctl_postfork_parent();
}
void
@@ -1676,14 +1701,16 @@ jemalloc_postfork_child(void)
assert(malloc_initialized);
/* Release all mutexes, now that fork() has completed. */
- chunk_dss_postfork_child();
+ chunk_postfork_child();
huge_postfork_child();
base_postfork_child();
+ prof_postfork_child();
for (i = 0; i < narenas; i++) {
if (arenas[i] != NULL)
arena_postfork_child(arenas[i]);
}
malloc_mutex_postfork_child(&arenas_lock);
+ ctl_postfork_child();
}
/******************************************************************************/