diff options
author | Jason Evans <jasone@canonware.com> | 2016-04-06 18:54:44 (GMT) |
---|---|---|
committer | Jason Evans <jasone@canonware.com> | 2016-04-11 09:35:00 (GMT) |
commit | 245ae6036c09cc11a72fab4335495d95cddd5beb (patch) | |
tree | 675007737fcc682ab3929c66f1a590e7ed144c23 | |
parent | 96aa67aca89725f0b1df3257421a3d0a48eb2700 (diff) | |
download | jemalloc-245ae6036c09cc11a72fab4335495d95cddd5beb.zip jemalloc-245ae6036c09cc11a72fab4335495d95cddd5beb.tar.gz jemalloc-245ae6036c09cc11a72fab4335495d95cddd5beb.tar.bz2 |
Support --with-lg-page values larger than actual page size.
During over-allocation in preparation for creating aligned mappings,
allocate one more page than necessary if PAGE is the actual page size,
so that trimming still succeeds even if the system returns a mapping
that has less than PAGE alignment. This allows compiling with e.g. 64
KiB "pages" on systems that actually use 4 KiB pages.
Note that for e.g. --with-lg-page=21, it is also necessary to increase
the chunk size (e.g. --with-malloc-conf=lg_chunk:22) so that there are
at least two "pages" per chunk. In practice this isn't a particularly
compelling configuration because so much (unusable) virtual memory is
dedicated to chunk headers.
-rw-r--r-- | include/jemalloc/internal/bitmap.h | 4 | ||||
-rw-r--r-- | include/jemalloc/internal/jemalloc_internal.h.in | 4 | ||||
-rw-r--r-- | src/arena.c | 2 | ||||
-rw-r--r-- | src/chunk_mmap.c | 2 |
4 files changed, 6 insertions, 6 deletions
diff --git a/include/jemalloc/internal/bitmap.h b/include/jemalloc/internal/bitmap.h index 894695f..36f38b5 100644 --- a/include/jemalloc/internal/bitmap.h +++ b/include/jemalloc/internal/bitmap.h @@ -17,8 +17,8 @@ typedef unsigned long bitmap_t; /* * Do some analysis on how big the bitmap is before we use a tree. For a brute - * force linear search, if we would have to call ffsl more than 2^3 times, use a - * tree instead. + * force linear search, if we would have to call ffs_lu() more than 2^3 times, + * use a tree instead. */ #if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3 # define USE_TREE diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in index 55ca714..0b57b82 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -741,7 +741,7 @@ sa2u(size_t size, size_t alignment) * Calculate the size of the over-size run that arena_palloc() * would need to allocate in order to guarantee the alignment. */ - if (usize + large_pad + alignment - PAGE <= arena_maxrun) + if (usize + large_pad + alignment <= arena_maxrun) return (usize); } @@ -771,7 +771,7 @@ sa2u(size_t size, size_t alignment) * Calculate the multi-chunk mapping that huge_palloc() would need in * order to guarantee the alignment. */ - if (usize + alignment - PAGE < usize) { + if (usize + alignment < usize) { /* size_t overflow. */ return (0); } diff --git a/src/arena.c b/src/arena.c index d884dc4..3373e1d 100644 --- a/src/arena.c +++ b/src/arena.c @@ -2500,7 +2500,7 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, return (NULL); alignment = PAGE_CEILING(alignment); - alloc_size = usize + large_pad + alignment - PAGE; + alloc_size = usize + large_pad + alignment; malloc_mutex_lock(&arena->lock); run = arena_run_alloc_large(arena, alloc_size, false); diff --git a/src/chunk_mmap.c b/src/chunk_mmap.c index 56b2ee4..e2e66bc 100644 --- a/src/chunk_mmap.c +++ b/src/chunk_mmap.c @@ -9,7 +9,7 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit) void *ret; size_t alloc_size; - alloc_size = size + alignment - PAGE; + alloc_size = size + alignment; /* Beware size_t wrap-around. */ if (alloc_size < size) return (NULL); |