summaryrefslogtreecommitdiffstats
path: root/jemalloc
diff options
context:
space:
mode:
authorJordan DeLong <delong.j@facebook.com>2010-05-10 21:17:00 (GMT)
committerJason Evans <je@facebook.com>2010-05-11 18:46:53 (GMT)
commit2206e1acc12a078b0108052e3c1aca745887188e (patch)
tree002a848861f0a845fca001389ecb5252d42cede6 /jemalloc
parentecea0f6125ea87ee6fd82f16286b61eb8c0f5692 (diff)
downloadjemalloc-2206e1acc12a078b0108052e3c1aca745887188e.zip
jemalloc-2206e1acc12a078b0108052e3c1aca745887188e.tar.gz
jemalloc-2206e1acc12a078b0108052e3c1aca745887188e.tar.bz2
Add MAP_NORESERVE support.
Add MAP_NORESERVE to the chunk_mmap() case being used by chunk_swap_enable(), if the system supports it.
Diffstat (limited to 'jemalloc')
-rw-r--r--jemalloc/include/jemalloc/internal/chunk_mmap.h1
-rw-r--r--jemalloc/src/chunk_mmap.c43
-rw-r--r--jemalloc/src/chunk_swap.c2
3 files changed, 32 insertions, 14 deletions
diff --git a/jemalloc/include/jemalloc/internal/chunk_mmap.h b/jemalloc/include/jemalloc/internal/chunk_mmap.h
index 8fb90b7..dc52448 100644
--- a/jemalloc/include/jemalloc/internal/chunk_mmap.h
+++ b/jemalloc/include/jemalloc/internal/chunk_mmap.h
@@ -10,6 +10,7 @@
#ifdef JEMALLOC_H_EXTERNS
void *chunk_alloc_mmap(size_t size);
+void *chunk_alloc_mmap_noreserve(size_t size);
void chunk_dealloc_mmap(void *chunk, size_t size);
#endif /* JEMALLOC_H_EXTERNS */
diff --git a/jemalloc/src/chunk_mmap.c b/jemalloc/src/chunk_mmap.c
index 8f07113..d9f9e86 100644
--- a/jemalloc/src/chunk_mmap.c
+++ b/jemalloc/src/chunk_mmap.c
@@ -23,14 +23,15 @@ static
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-static void *pages_map(void *addr, size_t size);
+static void *pages_map(void *addr, size_t size, bool noreserve);
static void pages_unmap(void *addr, size_t size);
-static void *chunk_alloc_mmap_slow(size_t size, bool unaligned);
+static void *chunk_alloc_mmap_slow(size_t size, bool unaligned, bool noreserve);
+static void *chunk_alloc_mmap_internal(size_t size, bool noreserve);
/******************************************************************************/
static void *
-pages_map(void *addr, size_t size)
+pages_map(void *addr, size_t size, bool noreserve)
{
void *ret;
@@ -38,8 +39,12 @@ pages_map(void *addr, size_t size)
* We don't use MAP_FIXED here, because it can cause the *replacement*
* of existing mappings, and we only want to create new mappings.
*/
- ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
- -1, 0);
+ int flags = MAP_PRIVATE | MAP_ANON;
+#ifdef MAP_NORESERVE
+ if (noreserve)
+ flags |= MAP_NORESERVE;
+#endif
+ ret = mmap(addr, size, PROT_READ | PROT_WRITE, flags, -1, 0);
assert(ret != NULL);
if (ret == MAP_FAILED)
@@ -83,7 +88,7 @@ pages_unmap(void *addr, size_t size)
}
static void *
-chunk_alloc_mmap_slow(size_t size, bool unaligned)
+chunk_alloc_mmap_slow(size_t size, bool unaligned, bool noreserve)
{
void *ret;
size_t offset;
@@ -92,7 +97,7 @@ chunk_alloc_mmap_slow(size_t size, bool unaligned)
if (size + chunksize <= size)
return (NULL);
- ret = pages_map(NULL, size + chunksize);
+ ret = pages_map(NULL, size + chunksize, noreserve);
if (ret == NULL)
return (NULL);
@@ -128,8 +133,8 @@ chunk_alloc_mmap_slow(size_t size, bool unaligned)
return (ret);
}
-void *
-chunk_alloc_mmap(size_t size)
+static void *
+chunk_alloc_mmap_internal(size_t size, bool noreserve)
{
void *ret;
@@ -164,7 +169,7 @@ chunk_alloc_mmap(size_t size)
if (mmap_unaligned == false) {
size_t offset;
- ret = pages_map(NULL, size);
+ ret = pages_map(NULL, size, noreserve);
if (ret == NULL)
return (NULL);
@@ -173,13 +178,13 @@ chunk_alloc_mmap(size_t size)
mmap_unaligned = true;
/* Try to extend chunk boundary. */
if (pages_map((void *)((uintptr_t)ret + size),
- chunksize - offset) == NULL) {
+ chunksize - offset, noreserve) == NULL) {
/*
* Extension failed. Clean up, then revert to
* the reliable-but-expensive method.
*/
pages_unmap(ret, size);
- ret = chunk_alloc_mmap_slow(size, true);
+ ret = chunk_alloc_mmap_slow(size, true, noreserve);
} else {
/* Clean up unneeded leading space. */
pages_unmap(ret, chunksize - offset);
@@ -188,11 +193,23 @@ chunk_alloc_mmap(size_t size)
}
}
} else
- ret = chunk_alloc_mmap_slow(size, false);
+ ret = chunk_alloc_mmap_slow(size, false, noreserve);
return (ret);
}
+void *
+chunk_alloc_mmap(size_t size)
+{
+ return chunk_alloc_mmap_internal(size, false);
+}
+
+void *
+chunk_alloc_mmap_noreserve(size_t size)
+{
+ return chunk_alloc_mmap_internal(size, true);
+}
+
void
chunk_dealloc_mmap(void *chunk, size_t size)
{
diff --git a/jemalloc/src/chunk_swap.c b/jemalloc/src/chunk_swap.c
index b8c880f..ed9e414 100644
--- a/jemalloc/src/chunk_swap.c
+++ b/jemalloc/src/chunk_swap.c
@@ -283,7 +283,7 @@ chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed)
* Allocate a chunk-aligned region of anonymous memory, which will
* be the final location for the memory-mapped files.
*/
- vaddr = chunk_alloc_mmap(cumsize);
+ vaddr = chunk_alloc_mmap_noreserve(cumsize);
if (vaddr == NULL) {
ret = true;
goto RETURN;