summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2012-04-22 02:17:21 (GMT)
committerJason Evans <jasone@canonware.com>2012-04-22 02:17:21 (GMT)
commita8f8d7540d66ddee7337db80c92890916e1063ca (patch)
treedfd205686200f0b8fb6e2d78aaba0b39ff7aca02 /src
parent7ad54c1c30e0805e0758690115875f982de46cf2 (diff)
downloadjemalloc-a8f8d7540d66ddee7337db80c92890916e1063ca.zip
jemalloc-a8f8d7540d66ddee7337db80c92890916e1063ca.tar.gz
jemalloc-a8f8d7540d66ddee7337db80c92890916e1063ca.tar.bz2
Remove mmap_unaligned.
Remove mmap_unaligned, which was used to heuristically decide whether to optimistically call mmap() in such a way that could reduce the total number of system calls. If I remember correctly, the intention of mmap_unaligned was to avoid always executing the slow path in the presence of ASLR. However, that reasoning seems to have been based on a flawed understanding of how ASLR actually works. Although ASLR apparently causes mmap() to ignore address requests, it does not cause total placement randomness, so there is a reasonable expectation that iterative mmap() calls will start returning chunk-aligned mappings once the first chunk has been properly aligned.
Diffstat (limited to 'src')
-rw-r--r--src/chunk.c12
-rw-r--r--src/chunk_mmap.c100
-rw-r--r--src/jemalloc.c7
3 files changed, 28 insertions, 91 deletions
diff --git a/src/chunk.c b/src/chunk.c
index 0fccd0c..5426b02 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -274,7 +274,7 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
}
bool
-chunk_boot0(void)
+chunk_boot(void)
{
/* Set variables according to the value of opt_lg_chunk. */
@@ -301,13 +301,3 @@ chunk_boot0(void)
return (false);
}
-
-bool
-chunk_boot1(void)
-{
-
- if (chunk_mmap_boot())
- return (true);
-
- return (false);
-}
diff --git a/src/chunk_mmap.c b/src/chunk_mmap.c
index 126406a..9ff7480 100644
--- a/src/chunk_mmap.c
+++ b/src/chunk_mmap.c
@@ -2,17 +2,6 @@
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
-/* Data. */
-
-/*
- * Used by chunk_alloc_mmap() to decide whether to attempt the fast path and
- * potentially avoid some system calls.
- */
-malloc_tsd_data(static, mmap_unaligned, bool, false)
-malloc_tsd_funcs(JEMALLOC_INLINE, mmap_unaligned, bool, false,
- malloc_tsd_no_cleanup)
-
-/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void *pages_map(void *addr, size_t size);
@@ -112,16 +101,6 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned, bool *zero)
if (trailsize != 0)
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
- /*
- * If mmap() returned an aligned mapping, reset mmap_unaligned so that
- * the next chunk_alloc_mmap() execution tries the fast allocation
- * method.
- */
- if (unaligned == false && mmap_unaligned_booted) {
- bool mu = false;
- mmap_unaligned_tsd_set(&mu);
- }
-
assert(ret != NULL);
*zero = true;
return (ret);
@@ -131,6 +110,7 @@ void *
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
{
void *ret;
+ size_t offset;
/*
* Ideally, there would be a way to specify alignment to mmap() (like
@@ -152,44 +132,34 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
*
* Another possible confounding factor is address space layout
* randomization (ASLR), which causes mmap(2) to disregard the
- * requested address. mmap_unaligned tracks whether the previous
- * chunk_alloc_mmap() execution received any unaligned or relocated
- * mappings, and if so, the current execution will immediately fall
- * back to the slow method. However, we keep track of whether the fast
- * method would have succeeded, and if so, we make a note to try the
- * fast method next time.
+ * requested address. As such, repeatedly trying to extend unaligned
+ * mappings could result in an infinite loop, so if extension fails,
+ * immediately fall back to the reliable method of over-allocation
+ * followed by trimming.
*/
- if (mmap_unaligned_booted && *mmap_unaligned_tsd_get() == false) {
- size_t offset;
-
- ret = pages_map(NULL, size);
- if (ret == NULL)
- return (NULL);
-
- offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
- if (offset != 0) {
- bool mu = true;
- mmap_unaligned_tsd_set(&mu);
- /* Try to extend chunk boundary. */
- if (pages_map((void *)((uintptr_t)ret + size),
- chunksize - offset) == NULL) {
- /*
- * Extension failed. Clean up, then revert to
- * the reliable-but-expensive method.
- */
- pages_unmap(ret, size);
- return (chunk_alloc_mmap_slow(size, alignment,
- true, zero));
- } else {
- /* Clean up unneeded leading space. */
- pages_unmap(ret, chunksize - offset);
- ret = (void *)((uintptr_t)ret + (chunksize -
- offset));
- }
+ ret = pages_map(NULL, size);
+ if (ret == NULL)
+ return (NULL);
+
+ offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
+ if (offset != 0) {
+ /* Try to extend chunk boundary. */
+ if (pages_map((void *)((uintptr_t)ret + size), chunksize -
+ offset) == NULL) {
+ /*
+ * Extension failed. Clean up, then fall back to the
+ * reliable-but-expensive method.
+ */
+ pages_unmap(ret, size);
+ return (chunk_alloc_mmap_slow(size, alignment, true,
+ zero));
+ } else {
+ /* Clean up unneeded leading space. */
+ pages_unmap(ret, chunksize - offset);
+ ret = (void *)((uintptr_t)ret + (chunksize - offset));
}
- } else
- return (chunk_alloc_mmap_slow(size, alignment, false, zero));
+ }
assert(ret != NULL);
*zero = true;
@@ -205,21 +175,3 @@ chunk_dealloc_mmap(void *chunk, size_t size)
return (config_munmap == false);
}
-
-bool
-chunk_mmap_boot(void)
-{
-
- /*
- * XXX For the non-TLS implementation of tsd, the first access from
- * each thread causes memory allocation. The result is a bootstrapping
- * problem for this particular use case, so for now just disable it by
- * leaving it in an unbooted state.
- */
-#ifdef JEMALLOC_TLS
- if (mmap_unaligned_tsd_boot())
- return (true);
-#endif
-
- return (false);
-}
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 00c2b23..f9c8916 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -634,7 +634,7 @@ malloc_init_hard(void)
return (true);
}
- if (chunk_boot0()) {
+ if (chunk_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
@@ -711,11 +711,6 @@ malloc_init_hard(void)
ncpus = malloc_ncpus();
malloc_mutex_lock(&init_lock);
- if (chunk_boot1()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
-
if (mutex_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);