summaryrefslogtreecommitdiffstats
path: root/src/chunk_mmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/chunk_mmap.c')
-rw-r--r--src/chunk_mmap.c45
1 files changed, 10 insertions, 35 deletions
diff --git a/src/chunk_mmap.c b/src/chunk_mmap.c
index 9f388d2..c8da655 100644
--- a/src/chunk_mmap.c
+++ b/src/chunk_mmap.c
@@ -16,6 +16,8 @@ pages_map(void *addr, size_t size)
{
void *ret;
+ assert(size != 0);
+
#ifdef _WIN32
/*
* If VirtualAlloc can't allocate at the given address when one is
@@ -164,51 +166,24 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
* NetBSD has), but in the absence of such a feature, we have to work
* hard to efficiently create aligned mappings. The reliable, but
* slow method is to create a mapping that is over-sized, then trim the
- * excess. However, that always results in at least one call to
+ * excess. However, that always results in one or two calls to
* pages_unmap().
*
- * A more optimistic approach is to try mapping precisely the right
- * amount, then try to append another mapping if alignment is off. In
- * practice, this works out well as long as the application is not
- * interleaving mappings via direct mmap() calls. If we do run into a
- * situation where there is an interleaved mapping and we are unable to
- * extend an unaligned mapping, our best option is to switch to the
- * slow method until mmap() returns another aligned mapping. This will
- * tend to leave a gap in the memory map that is too small to cause
- * later problems for the optimistic method.
- *
- * Another possible confounding factor is address space layout
- * randomization (ASLR), which causes mmap(2) to disregard the
- * requested address. As such, repeatedly trying to extend unaligned
- * mappings could result in an infinite loop, so if extension fails,
- * immediately fall back to the reliable method of over-allocation
- * followed by trimming.
+ * Optimistically try mapping precisely the right amount before falling
+ * back to the slow method, with the expectation that the optimistic
+ * approach works most of the time.
*/
+ assert(alignment != 0);
+ assert((alignment & chunksize_mask) == 0);
+
ret = pages_map(NULL, size);
if (ret == NULL)
return (NULL);
-
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
if (offset != 0) {
-#ifdef _WIN32
+ pages_unmap(ret, size);
return (chunk_alloc_mmap_slow(size, alignment, zero));
-#else
- /* Try to extend chunk boundary. */
- if (pages_map((void *)((uintptr_t)ret + size), chunksize -
- offset) == NULL) {
- /*
- * Extension failed. Clean up, then fall back to the
- * reliable-but-expensive method.
- */
- pages_unmap(ret, size);
- return (chunk_alloc_mmap_slow(size, alignment, zero));
- } else {
- /* Clean up unneeded leading space. */
- pages_unmap(ret, chunksize - offset);
- ret = (void *)((uintptr_t)ret + (chunksize - offset));
- }
-#endif
}
assert(ret != NULL);