summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2017-04-16 23:23:32 (GMT)
committerJason Evans <jasone@canonware.com>2017-04-19 02:01:04 (GMT)
commitda4cff0279b2e8f2b0482ae961f2e2f63662342d (patch)
tree7d44c670b9d7a32cec4e5b872f5e20acd62a9c8b /src
parent45f087eb033927338b9df847eb9be6886ef48cf7 (diff)
downloadjemalloc-da4cff0279b2e8f2b0482ae961f2e2f63662342d.zip
jemalloc-da4cff0279b2e8f2b0482ae961f2e2f63662342d.tar.gz
jemalloc-da4cff0279b2e8f2b0482ae961f2e2f63662342d.tar.bz2
Support --with-lg-page values larger than system page size.
All mappings continue to be PAGE-aligned, even if the system page size is smaller. This change is primarily intended to provide a mechanism for supporting multiple page sizes with the same binary; smaller page sizes work better in conjunction with jemalloc's design. This resolves #467.
Diffstat (limited to 'src')
-rw-r--r--src/extent_mmap.c60
-rw-r--r--src/jemalloc.c4
-rw-r--r--src/pages.c182
3 files changed, 145 insertions, 101 deletions
diff --git a/src/extent_mmap.c b/src/extent_mmap.c
index 9381dc1..b186275 100644
--- a/src/extent_mmap.c
+++ b/src/extent_mmap.c
@@ -6,66 +6,14 @@
/******************************************************************************/
-static void *
-extent_alloc_mmap_slow(size_t size, size_t alignment, bool *zero,
- bool *commit) {
- void *ret;
- size_t alloc_size;
-
- alloc_size = size + alignment - PAGE;
- /* Beware size_t wrap-around. */
- if (alloc_size < size) {
- return NULL;
- }
- do {
- void *pages;
- size_t leadsize;
- pages = pages_map(NULL, alloc_size, commit);
- if (pages == NULL) {
- return NULL;
- }
- leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
- (uintptr_t)pages;
- ret = pages_trim(pages, alloc_size, leadsize, size, commit);
- } while (ret == NULL);
-
- assert(ret != NULL);
- *zero = true;
- return ret;
-}
-
void *
extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
bool *commit) {
- void *ret;
- size_t offset;
-
- /*
- * Ideally, there would be a way to specify alignment to mmap() (like
- * NetBSD has), but in the absence of such a feature, we have to work
- * hard to efficiently create aligned mappings. The reliable, but
- * slow method is to create a mapping that is over-sized, then trim the
- * excess. However, that always results in one or two calls to
- * pages_unmap().
- *
- * Optimistically try mapping precisely the right amount before falling
- * back to the slow method, with the expectation that the optimistic
- * approach works most of the time.
- */
-
- assert(alignment != 0);
-
- ret = pages_map(new_addr, size, commit);
- if (ret == NULL || ret == new_addr) {
- return ret;
- }
- assert(new_addr == NULL);
- offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
- if (offset != 0) {
- pages_unmap(ret, size);
- return extent_alloc_mmap_slow(size, alignment, zero, commit);
+ void *ret = pages_map(new_addr, size, ALIGNMENT_CEILING(alignment,
+ PAGE), commit);
+ if (ret == NULL) {
+ return NULL;
}
-
assert(ret != NULL);
*zero = true;
return ret;
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 0297cf5..ea632c2 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -1220,7 +1220,9 @@ malloc_init_hard_a0_locked() {
}
}
}
- pages_boot();
+ if (pages_boot()) {
+ return true;
+ }
if (base_boot(TSDN_NULL)) {
return true;
}
diff --git a/src/pages.c b/src/pages.c
index 7fa254f..46c307b 100644
--- a/src/pages.c
+++ b/src/pages.c
@@ -12,6 +12,9 @@
/******************************************************************************/
/* Data. */
+/* Actual operating system page size, detected during bootstrap, <= PAGE. */
+static size_t os_page;
+
#ifndef _WIN32
# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
# define PAGES_PROT_DECOMMIT (PROT_NONE)
@@ -20,20 +23,26 @@ static int mmap_flags;
static bool os_overcommits;
/******************************************************************************/
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
-void *
-pages_map(void *addr, size_t size, bool *commit) {
- assert(PAGE_ADDR2BASE(addr) == addr);
- assert(PAGE_CEILING(size) == size);
+static void os_pages_unmap(void *addr, size_t size);
- void *ret;
+/******************************************************************************/
+static void *
+os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
+ assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
+ assert(ALIGNMENT_CEILING(size, os_page) == size);
assert(size != 0);
if (os_overcommits) {
*commit = true;
}
+ void *ret;
#ifdef _WIN32
/*
* If VirtualAlloc can't allocate at the given address when one is
@@ -59,19 +68,48 @@ pages_map(void *addr, size_t size, bool *commit) {
/*
* We succeeded in mapping memory, but not in the right place.
*/
- pages_unmap(ret, size);
+ os_pages_unmap(ret, size);
ret = NULL;
}
#endif
- assert(ret == NULL || (addr == NULL && ret != addr)
- || (addr != NULL && ret == addr));
+ assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL &&
+ ret == addr));
return ret;
}
-void
-pages_unmap(void *addr, size_t size) {
- assert(PAGE_ADDR2BASE(addr) == addr);
- assert(PAGE_CEILING(size) == size);
+static void *
+os_pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
+ bool *commit) {
+ void *ret = (void *)((uintptr_t)addr + leadsize);
+
+ assert(alloc_size >= leadsize + size);
+#ifdef _WIN32
+ os_pages_unmap(addr, alloc_size);
+ void *new_addr = os_pages_map(ret, size, PAGE, commit);
+ if (new_addr == ret) {
+ return ret;
+ }
+ if (new_addr != NULL) {
+ os_pages_unmap(new_addr, size);
+ }
+ return NULL;
+#else
+ size_t trailsize = alloc_size - leadsize - size;
+
+ if (leadsize != 0) {
+ os_pages_unmap(addr, leadsize);
+ }
+ if (trailsize != 0) {
+ os_pages_unmap((void *)((uintptr_t)ret + size), trailsize);
+ }
+ return ret;
+#endif
+}
+
+static void
+os_pages_unmap(void *addr, size_t size) {
+ assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
+ assert(ALIGNMENT_CEILING(size, os_page) == size);
#ifdef _WIN32
if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
@@ -84,50 +122,80 @@ pages_unmap(void *addr, size_t size) {
buferror(get_errno(), buf, sizeof(buf));
malloc_printf("<jemalloc>: Error in "
#ifdef _WIN32
- "VirtualFree"
+ "VirtualFree"
#else
- "munmap"
+ "munmap"
#endif
- "(): %s\n", buf);
+ "(): %s\n", buf);
if (opt_abort) {
abort();
}
}
}
-void *
-pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
- bool *commit) {
- void *ret = (void *)((uintptr_t)addr + leadsize);
-
- assert(alloc_size >= leadsize + size);
-#ifdef _WIN32
- {
- void *new_addr;
-
- pages_unmap(addr, alloc_size);
- new_addr = pages_map(ret, size, commit);
- if (new_addr == ret) {
- return ret;
- }
- if (new_addr) {
- pages_unmap(new_addr, size);
- }
+static void *
+pages_map_slow(size_t size, size_t alignment, bool *commit) {
+ size_t alloc_size = size + alignment - os_page;
+ /* Beware size_t wrap-around. */
+ if (alloc_size < size) {
return NULL;
}
-#else
- {
- size_t trailsize = alloc_size - leadsize - size;
- if (leadsize != 0) {
- pages_unmap(addr, leadsize);
- }
- if (trailsize != 0) {
- pages_unmap((void *)((uintptr_t)ret + size), trailsize);
+ void *ret;
+ do {
+ void *pages = os_pages_map(NULL, alloc_size, alignment, commit);
+ if (pages == NULL) {
+ return NULL;
}
+ size_t leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment)
+ - (uintptr_t)pages;
+ ret = os_pages_trim(pages, alloc_size, leadsize, size, commit);
+ } while (ret == NULL);
+
+ assert(ret != NULL);
+ assert(PAGE_ADDR2BASE(ret) == ret);
+ return ret;
+}
+
+void *
+pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
+ assert(alignment >= PAGE);
+ assert(ALIGNMENT_ADDR2BASE(addr, alignment) == addr);
+
+ /*
+ * Ideally, there would be a way to specify alignment to mmap() (like
+ * NetBSD has), but in the absence of such a feature, we have to work
+ * hard to efficiently create aligned mappings. The reliable, but
+ * slow method is to create a mapping that is over-sized, then trim the
+ * excess. However, that always results in one or two calls to
+ * os_pages_unmap(), and it can leave holes in the process's virtual
+ * memory map if memory grows downward.
+ *
+ * Optimistically try mapping precisely the right amount before falling
+ * back to the slow method, with the expectation that the optimistic
+ * approach works most of the time.
+ */
+
+ void *ret = os_pages_map(addr, size, os_page, commit);
+ if (ret == NULL || ret == addr) {
return ret;
}
-#endif
+ assert(addr == NULL);
+ if (ALIGNMENT_ADDR2OFFSET(ret, alignment) != 0) {
+ os_pages_unmap(ret, size);
+ return pages_map_slow(size, alignment, commit);
+ }
+
+ assert(PAGE_ADDR2BASE(ret) == ret);
+ return ret;
+}
+
+void
+pages_unmap(void *addr, size_t size) {
+ assert(PAGE_ADDR2BASE(addr) == addr);
+ assert(PAGE_CEILING(size) == size);
+
+ os_pages_unmap(addr, size);
}
static bool
@@ -155,7 +223,7 @@ pages_commit_impl(void *addr, size_t size, bool commit) {
* We succeeded in mapping memory, but not in the right
* place.
*/
- pages_unmap(result, size);
+ os_pages_unmap(result, size);
return true;
}
return false;
@@ -239,6 +307,21 @@ pages_nohuge(void *addr, size_t size) {
#endif
}
+static size_t
+os_page_detect(void) {
+#ifdef _WIN32
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ return si.dwPageSize;
+#else
+ long result = sysconf(_SC_PAGESIZE);
+ if (result == -1) {
+ return LG_PAGE;
+ }
+ return (size_t)result;
+#endif
+}
+
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
static bool
os_overcommits_sysctl(void) {
@@ -300,8 +383,17 @@ os_overcommits_proc(void) {
}
#endif
-void
+bool
pages_boot(void) {
+ os_page = os_page_detect();
+ if (os_page > PAGE) {
+ malloc_write("<jemalloc>: Unsupported system page size\n");
+ if (opt_abort) {
+ abort();
+ }
+ return true;
+ }
+
#ifndef _WIN32
mmap_flags = MAP_PRIVATE | MAP_ANON;
#endif
@@ -318,4 +410,6 @@ pages_boot(void) {
#else
os_overcommits = false;
#endif
+
+ return false;
}