summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2012-04-21 23:04:51 (GMT)
committerJason Evans <jasone@canonware.com>2012-04-21 23:04:51 (GMT)
commit7ad54c1c30e0805e0758690115875f982de46cf2 (patch)
tree2c7b57e68793413aaa3370b5e66e7cb0e208dcbe /src
parent8f0e0eb1c01d5d934586ea62e519ca8b8637aebc (diff)
downloadjemalloc-7ad54c1c30e0805e0758690115875f982de46cf2.zip
jemalloc-7ad54c1c30e0805e0758690115875f982de46cf2.tar.gz
jemalloc-7ad54c1c30e0805e0758690115875f982de46cf2.tar.bz2
Fix chunk allocation/deallocation bugs.
Fix chunk_alloc_dss() to zero memory when requested. Fix chunk_dealloc() to avoid chunk_dealloc_mmap() for dss-allocated memory. Fix huge_palloc() to always junk fill when requested. Improve chunk_recycle() to report that memory is zeroed as a side effect of pages_purge().
Diffstat (limited to 'src')
-rw-r--r--src/chunk.c17
-rw-r--r--src/chunk_dss.c4
-rw-r--r--src/huge.c10
3 files changed, 25 insertions, 6 deletions
diff --git a/src/chunk.c b/src/chunk.c
index 3148505..0fccd0c 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -98,7 +98,10 @@ chunk_recycle(size_t size, size_t alignment, bool *zero)
if (node != NULL)
base_node_dealloc(node);
-#ifdef JEMALLOC_PURGE_MADVISE_FREE
+#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
+ /* Pages are zeroed as a side effect of pages_purge(). */
+ *zero = true;
+#else
if (*zero) {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
@@ -161,7 +164,13 @@ label_return:
if (config_prof && opt_prof && opt_prof_gdump && gdump)
prof_gdump();
}
+ if (config_debug && *zero && ret != NULL) {
+ size_t i;
+ size_t *p = (size_t *)(uintptr_t)ret;
+ for (i = 0; i < size / sizeof(size_t); i++)
+ assert(p[i] == 0);
+ }
assert(CHUNK_ADDR2BASE(ret) == ret);
return (ret);
}
@@ -258,9 +267,9 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
}
if (unmap) {
- if (chunk_dealloc_mmap(chunk, size) == false)
- return;
- chunk_record(chunk, size);
+ if ((config_dss && chunk_in_dss(chunk)) ||
+ chunk_dealloc_mmap(chunk, size))
+ chunk_record(chunk, size);
}
}
diff --git a/src/chunk_dss.c b/src/chunk_dss.c
index bd4a724..2d68e48 100644
--- a/src/chunk_dss.c
+++ b/src/chunk_dss.c
@@ -89,6 +89,10 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
malloc_mutex_unlock(&dss_mtx);
if (cpad_size != 0)
chunk_dealloc(cpad, cpad_size, true);
+ if (*zero) {
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+ memset(ret, 0, size);
+ }
return (ret);
}
} while (dss_prev != (void *)-1);
diff --git a/src/huge.c b/src/huge.c
index daf0c62..23eb074 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -28,6 +28,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
void *ret;
size_t csize;
extent_node_t *node;
+ bool is_zeroed;
/* Allocate one or more contiguous chunks for this request. */
@@ -42,7 +43,12 @@ huge_palloc(size_t size, size_t alignment, bool zero)
if (node == NULL)
return (NULL);
- ret = chunk_alloc(csize, alignment, false, &zero);
+ /*
+ * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
+ * it is possible to make correct junk/zero fill decisions below.
+ */
+ is_zeroed = zero;
+ ret = chunk_alloc(csize, alignment, false, &is_zeroed);
if (ret == NULL) {
base_node_dealloc(node);
return (NULL);
@@ -64,7 +70,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
if (config_fill && zero == false) {
if (opt_junk)
memset(ret, 0xa5, csize);
- else if (opt_zero)
+ else if (opt_zero && is_zeroed == false)
memset(ret, 0, csize);
}