summaryrefslogtreecommitdiffstats
path: root/src/jemalloc.c
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2013-03-06 20:04:18 (GMT)
committerJason Evans <jasone@canonware.com>2013-03-06 20:04:18 (GMT)
commit9ef9d9e8c271cdf14f664b871a8f98c827714784 (patch)
tree3981574a5c1746a8b9a2df1595b1ad25ea7afdef /src/jemalloc.c
parent83789f45307379e096c4e8be81d9e9a51e3f5a4a (diff)
parent2298835e70ae79577a1c691020874bb11eefc039 (diff)
downloadjemalloc-3.3.1.zip
jemalloc-3.3.1.tar.gz
jemalloc-3.3.1.tar.bz2
Merge branch 'dev'3.3.1
Diffstat (limited to 'src/jemalloc.c')
-rw-r--r--src/jemalloc.c33
1 files changed, 28 insertions, 5 deletions
diff --git a/src/jemalloc.c b/src/jemalloc.c
index c117685..bc350ed 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -282,12 +282,30 @@ arenas_cleanup(void *arg)
malloc_mutex_unlock(&arenas_lock);
}
+static JEMALLOC_ATTR(always_inline) void
+malloc_thread_init(void)
+{
+
+ /*
+ * TSD initialization can't be safely done as a side effect of
+ * deallocation, because it is possible for a thread to do nothing but
+ * deallocate its TLS data via free(), in which case writing to TLS
+ * would cause write-after-free memory corruption. The quarantine
+ * facility *only* gets used as a side effect of deallocation, so make
+ * a best effort attempt at initializing its TSD by hooking all
+ * allocation events.
+ */
+ if (config_fill && opt_quarantine)
+ quarantine_alloc_hook();
+}
+
static JEMALLOC_ATTR(always_inline) bool
malloc_init(void)
{
- if (malloc_initialized == false)
- return (malloc_init_hard());
+ if (malloc_initialized == false && malloc_init_hard())
+ return (true);
+ malloc_thread_init();
return (false);
}
@@ -1095,6 +1113,7 @@ je_realloc(void *ptr, size_t size)
if (size == 0) {
if (ptr != NULL) {
/* realloc(ptr, 0) is equivalent to free(p). */
+ assert(malloc_initialized || IS_INITIALIZER);
if (config_prof) {
old_size = isalloc(ptr, true);
if (config_valgrind && opt_valgrind)
@@ -1120,6 +1139,7 @@ je_realloc(void *ptr, size_t size)
if (ptr != NULL) {
assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
if (config_prof) {
old_size = isalloc(ptr, true);
@@ -1323,6 +1343,7 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
size_t ret;
assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
if (config_ivsalloc)
ret = ivsalloc(ptr, config_prof);
@@ -1497,6 +1518,7 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
assert(size != 0);
assert(SIZE_T_MAX - size >= extra);
assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
if (arena_ind != UINT_MAX) {
arena_chunk_t *chunk;
@@ -1611,6 +1633,7 @@ je_sallocm(const void *ptr, size_t *rsize, int flags)
size_t sz;
assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
if (config_ivsalloc)
sz = ivsalloc(ptr, config_prof);
@@ -1730,12 +1753,12 @@ _malloc_prefork(void)
/* Acquire all mutexes in a safe order. */
ctl_prefork();
+ prof_prefork();
malloc_mutex_prefork(&arenas_lock);
for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_prefork(arenas[i]);
}
- prof_prefork();
chunk_prefork();
base_prefork();
huge_prefork();
@@ -1761,12 +1784,12 @@ _malloc_postfork(void)
huge_postfork_parent();
base_postfork_parent();
chunk_postfork_parent();
- prof_postfork_parent();
for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_postfork_parent(arenas[i]);
}
malloc_mutex_postfork_parent(&arenas_lock);
+ prof_postfork_parent();
ctl_postfork_parent();
}
@@ -1781,12 +1804,12 @@ jemalloc_postfork_child(void)
huge_postfork_child();
base_postfork_child();
chunk_postfork_child();
- prof_postfork_child();
for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_postfork_child(arenas[i]);
}
malloc_mutex_postfork_child(&arenas_lock);
+ prof_postfork_child();
ctl_postfork_child();
}