diff options
Diffstat (limited to 'src/pages.c')
| -rw-r--r-- | src/pages.c | 346 |
1 files changed, 233 insertions, 113 deletions
diff --git a/src/pages.c b/src/pages.c index 7698e49..fec64dd 100644 --- a/src/pages.c +++ b/src/pages.c @@ -1,5 +1,12 @@ -#define JEMALLOC_PAGES_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_PAGES_C_ +#include "jemalloc/internal/jemalloc_preamble.h" + +#include "jemalloc/internal/pages.h" + +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/malloc_io.h" #ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT #include <sys/sysctl.h> @@ -8,6 +15,9 @@ /******************************************************************************/ /* Data. */ +/* Actual operating system page size, detected during bootstrap, <= PAGE. */ +static size_t os_page; + #ifndef _WIN32 # define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE) # define PAGES_PROT_DECOMMIT (PROT_NONE) @@ -16,17 +26,26 @@ static int mmap_flags; static bool os_overcommits; /******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ -void * -pages_map(void *addr, size_t size, bool *commit) -{ - void *ret; +static void os_pages_unmap(void *addr, size_t size); + +/******************************************************************************/ +static void * +os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) { + assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr); + assert(ALIGNMENT_CEILING(size, os_page) == size); assert(size != 0); - if (os_overcommits) + if (os_overcommits) { *commit = true; + } + void *ret; #ifdef _WIN32 /* * If VirtualAlloc can't allocate at the given address when one is @@ -46,24 +65,54 @@ pages_map(void *addr, size_t size, bool *commit) } assert(ret != NULL); - if (ret == MAP_FAILED) + if (ret == MAP_FAILED) { ret = NULL; - else if (addr != NULL && ret != addr) { + } else if (addr != NULL && ret != addr) { /* * We succeeded in mapping memory, but not in the right place. */ - pages_unmap(ret, size); + os_pages_unmap(ret, size); ret = NULL; } #endif - assert(ret == NULL || (addr == NULL && ret != addr) - || (addr != NULL && ret == addr)); - return (ret); + assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL && + ret == addr)); + return ret; } -void -pages_unmap(void *addr, size_t size) -{ +static void * +os_pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, + bool *commit) { + void *ret = (void *)((uintptr_t)addr + leadsize); + + assert(alloc_size >= leadsize + size); +#ifdef _WIN32 + os_pages_unmap(addr, alloc_size); + void *new_addr = os_pages_map(ret, size, PAGE, commit); + if (new_addr == ret) { + return ret; + } + if (new_addr != NULL) { + os_pages_unmap(new_addr, size); + } + return NULL; +#else + size_t trailsize = alloc_size - leadsize - size; + + if (leadsize != 0) { + os_pages_unmap(addr, leadsize); + } + if (trailsize != 0) { + os_pages_unmap((void *)((uintptr_t)ret + size), trailsize); + } + return ret; +#endif +} + +static void +os_pages_unmap(void *addr, size_t size) { + assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr); + assert(ALIGNMENT_CEILING(size, os_page) == size); #ifdef _WIN32 if (VirtualFree(addr, 0, MEM_RELEASE) == 0) @@ -76,54 +125,90 @@ pages_unmap(void *addr, size_t size) buferror(get_errno(), buf, sizeof(buf)); malloc_printf("<jemalloc>: Error in " #ifdef _WIN32 - "VirtualFree" + "VirtualFree" #else - "munmap" + "munmap" #endif - "(): %s\n", buf); - if (opt_abort) + "(): %s\n", buf); + if (opt_abort) { abort(); + } + } +} + +static void * +pages_map_slow(size_t size, size_t alignment, bool *commit) { + size_t alloc_size = size + alignment - os_page; + /* Beware size_t wrap-around. */ + if (alloc_size < size) { + return NULL; } + + void *ret; + do { + void *pages = os_pages_map(NULL, alloc_size, alignment, commit); + if (pages == NULL) { + return NULL; + } + size_t leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) + - (uintptr_t)pages; + ret = os_pages_trim(pages, alloc_size, leadsize, size, commit); + } while (ret == NULL); + + assert(ret != NULL); + assert(PAGE_ADDR2BASE(ret) == ret); + return ret; } void * -pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, - bool *commit) -{ - void *ret = (void *)((uintptr_t)addr + leadsize); +pages_map(void *addr, size_t size, size_t alignment, bool *commit) { + assert(alignment >= PAGE); + assert(ALIGNMENT_ADDR2BASE(addr, alignment) == addr); - assert(alloc_size >= leadsize + size); -#ifdef _WIN32 - { - void *new_addr; - - pages_unmap(addr, alloc_size); - new_addr = pages_map(ret, size, commit); - if (new_addr == ret) - return (ret); - if (new_addr) - pages_unmap(new_addr, size); - return (NULL); - } -#else - { - size_t trailsize = alloc_size - leadsize - size; + /* + * Ideally, there would be a way to specify alignment to mmap() (like + * NetBSD has), but in the absence of such a feature, we have to work + * hard to efficiently create aligned mappings. The reliable, but + * slow method is to create a mapping that is over-sized, then trim the + * excess. However, that always results in one or two calls to + * os_pages_unmap(), and it can leave holes in the process's virtual + * memory map if memory grows downward. + * + * Optimistically try mapping precisely the right amount before falling + * back to the slow method, with the expectation that the optimistic + * approach works most of the time. + */ - if (leadsize != 0) - pages_unmap(addr, leadsize); - if (trailsize != 0) - pages_unmap((void *)((uintptr_t)ret + size), trailsize); - return (ret); + void *ret = os_pages_map(addr, size, os_page, commit); + if (ret == NULL || ret == addr) { + return ret; } -#endif + assert(addr == NULL); + if (ALIGNMENT_ADDR2OFFSET(ret, alignment) != 0) { + os_pages_unmap(ret, size); + return pages_map_slow(size, alignment, commit); + } + + assert(PAGE_ADDR2BASE(ret) == ret); + return ret; +} + +void +pages_unmap(void *addr, size_t size) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); + + os_pages_unmap(addr, size); } static bool -pages_commit_impl(void *addr, size_t size, bool commit) -{ +pages_commit_impl(void *addr, size_t size, bool commit) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); - if (os_overcommits) - return (true); + if (os_overcommits) { + return true; + } #ifdef _WIN32 return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT, @@ -133,103 +218,123 @@ pages_commit_impl(void *addr, size_t size, bool commit) int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED, -1, 0); - if (result == MAP_FAILED) - return (true); + if (result == MAP_FAILED) { + return true; + } if (result != addr) { /* * We succeeded in mapping memory, but not in the right * place. */ - pages_unmap(result, size); - return (true); + os_pages_unmap(result, size); + return true; } - return (false); + return false; } #endif } bool -pages_commit(void *addr, size_t size) -{ - - return (pages_commit_impl(addr, size, true)); +pages_commit(void *addr, size_t size) { + return pages_commit_impl(addr, size, true); } bool -pages_decommit(void *addr, size_t size) -{ - - return (pages_commit_impl(addr, size, false)); +pages_decommit(void *addr, size_t size) { + return pages_commit_impl(addr, size, false); } bool -pages_purge(void *addr, size_t size) -{ - bool unzeroed; +pages_purge_lazy(void *addr, size_t size) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); + + if (!pages_can_purge_lazy) { + return true; + } #ifdef _WIN32 VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE); - unzeroed = true; -#elif (defined(JEMALLOC_PURGE_MADVISE_FREE) || \ - defined(JEMALLOC_PURGE_MADVISE_DONTNEED)) -# if defined(JEMALLOC_PURGE_MADVISE_FREE) -# define JEMALLOC_MADV_PURGE MADV_FREE -# define JEMALLOC_MADV_ZEROS false -# elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) -# define JEMALLOC_MADV_PURGE MADV_DONTNEED -# define JEMALLOC_MADV_ZEROS true -# else -# error No madvise(2) flag defined for purging unused dirty pages -# endif - int err = madvise(addr, size, JEMALLOC_MADV_PURGE); - unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0); -# undef JEMALLOC_MADV_PURGE -# undef JEMALLOC_MADV_ZEROS + return false; +#elif defined(JEMALLOC_PURGE_MADVISE_FREE) + return (madvise(addr, size, MADV_FREE) != 0); +#elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ + !defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS) + return (madvise(addr, size, MADV_DONTNEED) != 0); #else - /* Last resort no-op. */ - unzeroed = true; + not_reached(); #endif - return (unzeroed); } bool -pages_huge(void *addr, size_t size) -{ - +pages_purge_forced(void *addr, size_t size) { assert(PAGE_ADDR2BASE(addr) == addr); assert(PAGE_CEILING(size) == size); -#ifdef JEMALLOC_HAVE_MADVISE_HUGE - return (madvise(addr, size, MADV_HUGEPAGE) != 0); + if (!pages_can_purge_forced) { + return true; + } + +#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ + defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS) + return (madvise(addr, size, MADV_DONTNEED) != 0); +#elif defined(JEMALLOC_MAPS_COALESCE) + /* Try to overlay a new demand-zeroed mapping. */ + return pages_commit(addr, size); #else - return (false); + not_reached(); #endif } bool -pages_nohuge(void *addr, size_t size) -{ +pages_huge(void *addr, size_t size) { + assert(HUGEPAGE_ADDR2BASE(addr) == addr); + assert(HUGEPAGE_CEILING(size) == size); - assert(PAGE_ADDR2BASE(addr) == addr); - assert(PAGE_CEILING(size) == size); +#ifdef JEMALLOC_THP + return (madvise(addr, size, MADV_HUGEPAGE) != 0); +#else + return true; +#endif +} + +bool +pages_nohuge(void *addr, size_t size) { + assert(HUGEPAGE_ADDR2BASE(addr) == addr); + assert(HUGEPAGE_CEILING(size) == size); -#ifdef JEMALLOC_HAVE_MADVISE_HUGE +#ifdef JEMALLOC_THP return (madvise(addr, size, MADV_NOHUGEPAGE) != 0); #else - return (false); + return false; +#endif +} + +static size_t +os_page_detect(void) { +#ifdef _WIN32 + SYSTEM_INFO si; + GetSystemInfo(&si); + return si.dwPageSize; +#else + long result = sysconf(_SC_PAGESIZE); + if (result == -1) { + return LG_PAGE; + } + return (size_t)result; #endif } #ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT static bool -os_overcommits_sysctl(void) -{ +os_overcommits_sysctl(void) { int vm_overcommit; size_t sz; sz = sizeof(vm_overcommit); - if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) - return (false); /* Error. */ + if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) { + return false; /* Error. */ + } return ((vm_overcommit & 0x3) == 0); } @@ -242,19 +347,23 @@ os_overcommits_sysctl(void) * wrappers. */ static bool -os_overcommits_proc(void) -{ +os_overcommits_proc(void) { int fd; char buf[1]; ssize_t nread; #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open) - fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY); + fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY | + O_CLOEXEC); +#elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat) + fd = (int)syscall(SYS_openat, + AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC); #else - fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY); + fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC); #endif - if (fd == -1) - return (false); /* Error. */ + if (fd == -1) { + return false; /* Error. */ + } #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read) nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf)); @@ -268,8 +377,9 @@ os_overcommits_proc(void) close(fd); #endif - if (nread < 1) - return (false); /* Error. */ + if (nread < 1) { + return false; /* Error. */ + } /* * /proc/sys/vm/overcommit_memory meanings: * 0: Heuristic overcommit. @@ -280,9 +390,16 @@ os_overcommits_proc(void) } #endif -void -pages_boot(void) -{ +bool +pages_boot(void) { + os_page = os_page_detect(); + if (os_page > PAGE) { + malloc_write("<jemalloc>: Unsupported system page size\n"); + if (opt_abort) { + abort(); + } + return true; + } #ifndef _WIN32 mmap_flags = MAP_PRIVATE | MAP_ANON; @@ -293,10 +410,13 @@ pages_boot(void) #elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY) os_overcommits = os_overcommits_proc(); # ifdef MAP_NORESERVE - if (os_overcommits) + if (os_overcommits) { mmap_flags |= MAP_NORESERVE; + } # endif #else os_overcommits = false; #endif + + return false; } |
