diff options
| author | Jason Evans <jasone@canonware.com> | 2014-10-10 00:54:06 (GMT) |
|---|---|---|
| committer | Jason Evans <jasone@canonware.com> | 2014-10-10 05:44:37 (GMT) |
| commit | fc0b3b7383373d66cfed2cd4e2faa272a6868d32 (patch) | |
| tree | 529043fc2e79c396241ad6808a92d2fddd36d21a /include/jemalloc | |
| parent | b123ddc760e5b53dde17c6a19a130173067c0e30 (diff) | |
| download | jemalloc-fc0b3b7383373d66cfed2cd4e2faa272a6868d32.zip jemalloc-fc0b3b7383373d66cfed2cd4e2faa272a6868d32.tar.gz jemalloc-fc0b3b7383373d66cfed2cd4e2faa272a6868d32.tar.bz2 | |
Add configure options.
Add:
--with-lg-page
--with-lg-page-sizes
--with-lg-size-class-group
--with-lg-quantum
Get rid of STATIC_PAGE_SHIFT, in favor of directly setting LG_PAGE.
Fix various edge conditions exposed by the configure options.
Diffstat (limited to 'include/jemalloc')
| -rw-r--r-- | include/jemalloc/internal/arena.h | 8 | ||||
| -rw-r--r-- | include/jemalloc/internal/huge.h | 9 | ||||
| -rw-r--r-- | include/jemalloc/internal/jemalloc_internal.h.in | 28 | ||||
| -rw-r--r-- | include/jemalloc/internal/jemalloc_internal_defs.h.in | 10 | ||||
| -rw-r--r-- | include/jemalloc/internal/private_symbols.txt | 4 | ||||
| -rwxr-xr-x | include/jemalloc/internal/size_classes.sh | 12 | ||||
| -rw-r--r-- | include/jemalloc/internal/tcache.h | 4 |
7 files changed, 46 insertions, 29 deletions
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h index 894ce9a..f5b9fc6 100644 --- a/include/jemalloc/internal/arena.h +++ b/include/jemalloc/internal/arena.h @@ -362,8 +362,8 @@ void *arena_malloc_small(arena_t *arena, size_t size, bool zero); void *arena_malloc_large(arena_t *arena, size_t size, bool zero); void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero); void arena_prof_promoted(const void *ptr, size_t size); -void arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, - arena_chunk_map_bits_t *bitselm); +void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, + void *ptr, arena_chunk_map_bits_t *bitselm); void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm); void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, @@ -371,8 +371,10 @@ void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, #ifdef JEMALLOC_JET typedef void (arena_dalloc_junk_large_t)(void *, size_t); extern arena_dalloc_junk_large_t *arena_dalloc_junk_large; +#else +void arena_dalloc_junk_large(void *ptr, size_t usize); #endif -void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, +void arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr); void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); #ifdef JEMALLOC_JET diff --git a/include/jemalloc/internal/huge.h b/include/jemalloc/internal/huge.h index 5d4d3a1..39d8aa5 100644 --- a/include/jemalloc/internal/huge.h +++ b/include/jemalloc/internal/huge.h @@ -9,19 +9,20 @@ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS -void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero); +void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, + bool try_tcache); void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, - bool zero); + bool zero, bool try_tcache); bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, bool zero); void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero, - bool try_tcache_dalloc); + bool try_tcache_alloc, bool try_tcache_dalloc); #ifdef JEMALLOC_JET typedef void (huge_dalloc_junk_t)(void *, size_t); extern huge_dalloc_junk_t *huge_dalloc_junk; #endif -void huge_dalloc(tsd_t *tsd, void *ptr); +void huge_dalloc(tsd_t *tsd, void *ptr, bool try_tcache); size_t huge_salloc(const void *ptr); prof_tctx_t *huge_prof_tctx_get(const void *ptr); void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx); diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in index f4d5de6..3f65fad 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -185,7 +185,7 @@ typedef unsigned index_t; #define TINY_MIN (1U << LG_TINY_MIN) /* - * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size + * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size * classes). */ #ifndef LG_QUANTUM @@ -235,7 +235,8 @@ typedef unsigned index_t; # define LG_QUANTUM 4 # endif # ifndef LG_QUANTUM -# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS" +# error "Unknown minimum alignment for architecture; specify via " + "--with-lg-quantum" # endif #endif @@ -275,12 +276,11 @@ typedef unsigned index_t; #define CACHELINE_CEILING(s) \ (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) -/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */ +/* Page size. LG_PAGE is determined by the configure script. */ #ifdef PAGE_MASK # undef PAGE_MASK #endif -#define LG_PAGE STATIC_PAGE_SHIFT -#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT)) +#define PAGE ((size_t)(1U << LG_PAGE)) #define PAGE_MASK ((size_t)(PAGE - 1)) /* Return the smallest pagesize multiple that is >= s. */ @@ -809,7 +809,7 @@ imalloct(tsd_t *tsd, size_t size, bool try_tcache, arena_t *arena) if (size <= arena_maxclass) return (arena_malloc(tsd, arena, size, false, try_tcache)); else - return (huge_malloc(tsd, arena, size, false)); + return (huge_malloc(tsd, arena, size, false, try_tcache)); } JEMALLOC_ALWAYS_INLINE void * @@ -826,7 +826,7 @@ icalloct(tsd_t *tsd, size_t size, bool try_tcache, arena_t *arena) if (size <= arena_maxclass) return (arena_malloc(tsd, arena, size, true, try_tcache)); else - return (huge_malloc(tsd, arena, size, true)); + return (huge_malloc(tsd, arena, size, true, try_tcache)); } JEMALLOC_ALWAYS_INLINE void * @@ -854,9 +854,11 @@ ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero, bool try_tcache, return (NULL); ret = arena_palloc(arena, usize, alignment, zero); } else if (alignment <= chunksize) - ret = huge_malloc(tsd, arena, usize, zero); - else - ret = huge_palloc(tsd, arena, usize, alignment, zero); + ret = huge_malloc(tsd, arena, usize, zero, try_tcache); + else { + ret = huge_palloc(tsd, arena, usize, alignment, zero, + try_tcache); + } } assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); @@ -938,7 +940,7 @@ idalloct(tsd_t *tsd, void *ptr, bool try_tcache) if (chunk != ptr) arena_dalloc(tsd, chunk, ptr, try_tcache); else - huge_dalloc(tsd, ptr); + huge_dalloc(tsd, ptr, try_tcache); } JEMALLOC_ALWAYS_INLINE void @@ -952,7 +954,7 @@ isdalloct(tsd_t *tsd, void *ptr, size_t size, bool try_tcache) if (chunk != ptr) arena_sdalloc(tsd, chunk, ptr, size, try_tcache); else - huge_dalloc(tsd, ptr); + huge_dalloc(tsd, ptr, try_tcache); } JEMALLOC_ALWAYS_INLINE void @@ -1042,7 +1044,7 @@ iralloct(tsd_t *tsd, void *ptr, size_t size, size_t alignment, bool zero, alignment, zero, try_tcache_alloc, try_tcache_dalloc)); } else { return (huge_ralloc(tsd, arena, ptr, oldsize, size, 0, - alignment, zero, try_tcache_dalloc)); + alignment, zero, try_tcache_alloc, try_tcache_dalloc)); } } diff --git a/include/jemalloc/internal/jemalloc_internal_defs.h.in b/include/jemalloc/internal/jemalloc_internal_defs.h.in index fd85e5c..0ff939c 100644 --- a/include/jemalloc/internal/jemalloc_internal_defs.h.in +++ b/include/jemalloc/internal/jemalloc_internal_defs.h.in @@ -144,8 +144,14 @@ /* Support lazy locking (avoid locking unless a second thread is launched). */ #undef JEMALLOC_LAZY_LOCK -/* One page is 2^STATIC_PAGE_SHIFT bytes. */ -#undef STATIC_PAGE_SHIFT +/* + * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size + * classes). + */ +#undef LG_QUANTUM + +/* One page is 2^LG_PAGE bytes. */ +#undef LG_PAGE /* * If defined, use munmap() to unmap freed chunks, rather than storing them for diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt index d5e6fdc..66d4822 100644 --- a/include/jemalloc/internal/private_symbols.txt +++ b/include/jemalloc/internal/private_symbols.txt @@ -16,11 +16,11 @@ arena_chunk_dalloc_huge arena_cleanup arena_dalloc arena_dalloc_bin -arena_dalloc_bin_locked +arena_dalloc_bin_junked_locked arena_dalloc_junk_large arena_dalloc_junk_small arena_dalloc_large -arena_dalloc_large_locked +arena_dalloc_large_junked_locked arena_dalloc_small arena_dss_prec_get arena_dss_prec_set diff --git a/include/jemalloc/internal/size_classes.sh b/include/jemalloc/internal/size_classes.sh index 897570c..733338c 100755 --- a/include/jemalloc/internal/size_classes.sh +++ b/include/jemalloc/internal/size_classes.sh @@ -1,4 +1,6 @@ #!/bin/sh +# +# Usage: size_classes.sh <lg_parr> <lg_g> # The following limits are chosen such that they cover all supported platforms. @@ -15,10 +17,10 @@ lg_tmin=3 lg_kmax=12 # Page sizes. -lg_parr="12 13 16" +lg_parr=`echo $1 | tr ',' ' '` # Size class group size (number of size classes for each size doubling). -lg_g=2 +lg_g=$2 pow2() { e=$1 @@ -159,7 +161,11 @@ size_classes() { nbins=$((${index} + 1)) # Final written value is correct: small_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" - lg_large_minclass=$((${lg_grp} + 1)) + if [ ${lg_g} -gt 0 ] ; then + lg_large_minclass=$((${lg_grp} + 1)) + else + lg_large_minclass=$((${lg_grp} + 2)) + fi fi index=$((${index} + 1)) ndelta=$((${ndelta} + 1)) diff --git a/include/jemalloc/internal/tcache.h b/include/jemalloc/internal/tcache.h index 02eec5d..fe9c47e 100644 --- a/include/jemalloc/internal/tcache.h +++ b/include/jemalloc/internal/tcache.h @@ -112,7 +112,7 @@ void tcache_arena_associate(tcache_t *tcache, arena_t *arena); void tcache_arena_reassociate(tcache_t *tcache, arena_t *arena); void tcache_arena_dissociate(tcache_t *tcache); tcache_t *tcache_get_hard(tsd_t *tsd); -tcache_t *tcache_create(arena_t *arena); +tcache_t *tcache_create(tsd_t *tsd, arena_t *arena); void tcache_cleanup(tsd_t *tsd); void tcache_enabled_cleanup(tsd_t *tsd); void tcache_stats_merge(tcache_t *tcache, arena_t *arena); @@ -363,7 +363,7 @@ tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size) binind = size2index(size); if (config_fill && unlikely(opt_junk)) - memset(ptr, 0x5a, size); + arena_dalloc_junk_large(ptr, size); tbin = &tcache->tbins[binind]; tbin_info = &tcache_bin_info[binind]; |
