summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason Evans <je@fb.com>2012-05-09 20:05:04 (GMT)
committerJason Evans <je@fb.com>2012-05-09 20:05:04 (GMT)
commitde6fbdb72c6e1401b36f8f2073404645bac6cd2b (patch)
tree701715206d28fa76accb42b88f441566f6595a45
parent34a8cf6c4029c09e1db776b7027a9c1b31f9e5b4 (diff)
downloadjemalloc-de6fbdb72c6e1401b36f8f2073404645bac6cd2b.zip
jemalloc-de6fbdb72c6e1401b36f8f2073404645bac6cd2b.tar.gz
jemalloc-de6fbdb72c6e1401b36f8f2073404645bac6cd2b.tar.bz2
Fix chunk_alloc_mmap() bugs.
Simplify chunk_alloc_mmap() to no longer attempt map extension. The extra complexity isn't warranted, because although in the success case it saves one system call as compared to immediately falling back to chunk_alloc_mmap_slow(), it also makes the failure case even more expensive. This simplification removes two bugs: - For Windows platforms, pages_unmap() wasn't being called for unaligned mappings prior to falling back to chunk_alloc_mmap_slow(). This caused permanent virtual memory leaks. - For non-Windows platforms, alignment greater than chunksize caused pages_map() to be called with size 0 when attempting map extension. This always resulted in an mmap() error, and subsequent fallback to chunk_alloc_mmap_slow().
-rw-r--r--src/chunk.c1
-rw-r--r--src/chunk_mmap.c45
2 files changed, 11 insertions, 35 deletions
diff --git a/src/chunk.c b/src/chunk.c
index 166d1ea..bb6189e 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -134,6 +134,7 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero)
assert(size != 0);
assert((size & chunksize_mask) == 0);
+ assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
ret = chunk_recycle(size, alignment, base, zero);
diff --git a/src/chunk_mmap.c b/src/chunk_mmap.c
index 9f388d2..c8da655 100644
--- a/src/chunk_mmap.c
+++ b/src/chunk_mmap.c
@@ -16,6 +16,8 @@ pages_map(void *addr, size_t size)
{
void *ret;
+ assert(size != 0);
+
#ifdef _WIN32
/*
* If VirtualAlloc can't allocate at the given address when one is
@@ -164,51 +166,24 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
* NetBSD has), but in the absence of such a feature, we have to work
* hard to efficiently create aligned mappings. The reliable, but
* slow method is to create a mapping that is over-sized, then trim the
- * excess. However, that always results in at least one call to
+ * excess. However, that always results in one or two calls to
* pages_unmap().
*
- * A more optimistic approach is to try mapping precisely the right
- * amount, then try to append another mapping if alignment is off. In
- * practice, this works out well as long as the application is not
- * interleaving mappings via direct mmap() calls. If we do run into a
- * situation where there is an interleaved mapping and we are unable to
- * extend an unaligned mapping, our best option is to switch to the
- * slow method until mmap() returns another aligned mapping. This will
- * tend to leave a gap in the memory map that is too small to cause
- * later problems for the optimistic method.
- *
- * Another possible confounding factor is address space layout
- * randomization (ASLR), which causes mmap(2) to disregard the
- * requested address. As such, repeatedly trying to extend unaligned
- * mappings could result in an infinite loop, so if extension fails,
- * immediately fall back to the reliable method of over-allocation
- * followed by trimming.
+ * Optimistically try mapping precisely the right amount before falling
+ * back to the slow method, with the expectation that the optimistic
+ * approach works most of the time.
*/
+ assert(alignment != 0);
+ assert((alignment & chunksize_mask) == 0);
+
ret = pages_map(NULL, size);
if (ret == NULL)
return (NULL);
-
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
if (offset != 0) {
-#ifdef _WIN32
+ pages_unmap(ret, size);
return (chunk_alloc_mmap_slow(size, alignment, zero));
-#else
- /* Try to extend chunk boundary. */
- if (pages_map((void *)((uintptr_t)ret + size), chunksize -
- offset) == NULL) {
- /*
- * Extension failed. Clean up, then fall back to the
- * reliable-but-expensive method.
- */
- pages_unmap(ret, size);
- return (chunk_alloc_mmap_slow(size, alignment, zero));
- } else {
- /* Clean up unneeded leading space. */
- pages_unmap(ret, chunksize - offset);
- ret = (void *)((uintptr_t)ret + (chunksize - offset));
- }
-#endif
}
assert(ret != NULL);