summaryrefslogtreecommitdiffstats
path: root/src/jemalloc.c
diff options
context:
space:
mode:
authorJason Evans <je@fb.com>2013-01-30 23:03:11 (GMT)
committerJason Evans <je@fb.com>2013-01-31 22:23:48 (GMT)
commitbbe29d374d0fa5f4684621f16c099294e56c26ef (patch)
tree4ddaf133e68cb14e25127afb7dfd41a5c8562b34 /src/jemalloc.c
parent83789f45307379e096c4e8be81d9e9a51e3f5a4a (diff)
downloadjemalloc-bbe29d374d0fa5f4684621f16c099294e56c26ef.zip
jemalloc-bbe29d374d0fa5f4684621f16c099294e56c26ef.tar.gz
jemalloc-bbe29d374d0fa5f4684621f16c099294e56c26ef.tar.bz2
Fix potential TLS-related memory corruption.
Avoid writing to uninitialized TLS as a side effect of deallocation. Initializing TLS during deallocation is unsafe because it is possible that a thread never did any allocation, and that TLS has already been deallocated by the threads library, resulting in write-after-free corruption. These fixes affect prof_tdata and quarantine; all other uses of TLS are already safe, whether intentionally (as for tcache) or unintentionally (as for arenas).
Diffstat (limited to 'src/jemalloc.c')
-rw-r--r--src/jemalloc.c27
1 files changed, 25 insertions, 2 deletions
diff --git a/src/jemalloc.c b/src/jemalloc.c
index c117685..6f6464d 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -282,12 +282,30 @@ arenas_cleanup(void *arg)
malloc_mutex_unlock(&arenas_lock);
}
+static JEMALLOC_ATTR(always_inline) void
+malloc_thread_init(void)
+{
+
+ /*
+ * TSD initialization can't be safely done as a side effect of
+ * deallocation, because it is possible for a thread to do nothing but
+ * deallocate its TLS data via free(), in which case writing to TLS
+ * would cause write-after-free memory corruption. The quarantine
+ * facility *only* gets used as a side effect of deallocation, so make
+ * a best effort attempt at initializing its TSD by hooking all
+ * allocation events.
+ */
+ if (config_fill && opt_quarantine)
+ quarantine_alloc_hook();
+}
+
static JEMALLOC_ATTR(always_inline) bool
malloc_init(void)
{
- if (malloc_initialized == false)
- return (malloc_init_hard());
+ if (malloc_initialized == false && malloc_init_hard())
+ return (true);
+ malloc_thread_init();
return (false);
}
@@ -1095,6 +1113,7 @@ je_realloc(void *ptr, size_t size)
if (size == 0) {
if (ptr != NULL) {
/* realloc(ptr, 0) is equivalent to free(p). */
+ assert(malloc_initialized || IS_INITIALIZER);
if (config_prof) {
old_size = isalloc(ptr, true);
if (config_valgrind && opt_valgrind)
@@ -1120,6 +1139,7 @@ je_realloc(void *ptr, size_t size)
if (ptr != NULL) {
assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
if (config_prof) {
old_size = isalloc(ptr, true);
@@ -1323,6 +1343,7 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
size_t ret;
assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
if (config_ivsalloc)
ret = ivsalloc(ptr, config_prof);
@@ -1497,6 +1518,7 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
assert(size != 0);
assert(SIZE_T_MAX - size >= extra);
assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
if (arena_ind != UINT_MAX) {
arena_chunk_t *chunk;
@@ -1611,6 +1633,7 @@ je_sallocm(const void *ptr, size_t *rsize, int flags)
size_t sz;
assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
if (config_ivsalloc)
sz = ivsalloc(ptr, config_prof);