diff options
author | Jason Evans <jasone@canonware.com> | 2011-04-01 03:36:17 (GMT) |
---|---|---|
committer | Jason Evans <jasone@canonware.com> | 2011-04-01 03:36:17 (GMT) |
commit | 7427525c28d58c423a68930160e3b0fe577fe953 (patch) | |
tree | 471519cfe8e156dfaa8e3f6f4f7d92db423b81f3 /src | |
parent | 64ba3d7cd9901ed00d690365b1b3da6aa7b9cd59 (diff) | |
download | jemalloc-7427525c28d58c423a68930160e3b0fe577fe953.zip jemalloc-7427525c28d58c423a68930160e3b0fe577fe953.tar.gz jemalloc-7427525c28d58c423a68930160e3b0fe577fe953.tar.bz2 |
Move repo contents in jemalloc/ to top level.
Diffstat (limited to 'src')
-rw-r--r-- | src/arena.c | 2703 | ||||
-rw-r--r-- | src/atomic.c | 2 | ||||
-rw-r--r-- | src/base.c | 106 | ||||
-rw-r--r-- | src/bitmap.c | 90 | ||||
-rw-r--r-- | src/chunk.c | 171 | ||||
-rw-r--r-- | src/chunk_dss.c | 284 | ||||
-rw-r--r-- | src/chunk_mmap.c | 239 | ||||
-rw-r--r-- | src/chunk_swap.c | 402 | ||||
-rw-r--r-- | src/ckh.c | 619 | ||||
-rw-r--r-- | src/ctl.c | 1670 | ||||
-rw-r--r-- | src/extent.c | 41 | ||||
-rw-r--r-- | src/hash.c | 2 | ||||
-rw-r--r-- | src/huge.c | 379 | ||||
-rw-r--r-- | src/jemalloc.c | 1847 | ||||
-rw-r--r-- | src/mb.c | 2 | ||||
-rw-r--r-- | src/mutex.c | 90 | ||||
-rw-r--r-- | src/prof.c | 1243 | ||||
-rw-r--r-- | src/rtree.c | 46 | ||||
-rw-r--r-- | src/stats.c | 790 | ||||
-rw-r--r-- | src/tcache.c | 480 | ||||
-rw-r--r-- | src/zone.c | 354 |
21 files changed, 11560 insertions, 0 deletions
diff --git a/src/arena.c b/src/arena.c new file mode 100644 index 0000000..9aaf47f --- /dev/null +++ b/src/arena.c @@ -0,0 +1,2703 @@ +#define JEMALLOC_ARENA_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +size_t opt_lg_qspace_max = LG_QSPACE_MAX_DEFAULT; +size_t opt_lg_cspace_max = LG_CSPACE_MAX_DEFAULT; +ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; +uint8_t const *small_size2bin; +arena_bin_info_t *arena_bin_info; + +/* Various bin-related settings. */ +unsigned nqbins; +unsigned ncbins; +unsigned nsbins; +unsigned nbins; +size_t qspace_max; +size_t cspace_min; +size_t cspace_max; +size_t sspace_min; +size_t sspace_max; + +size_t lg_mspace; +size_t mspace_mask; + +/* + * const_small_size2bin is a static constant lookup table that in the common + * case can be used as-is for small_size2bin. + */ +#if (LG_TINY_MIN == 2) +#define S2B_4(i) i, +#define S2B_8(i) S2B_4(i) S2B_4(i) +#elif (LG_TINY_MIN == 3) +#define S2B_8(i) i, +#else +# error "Unsupported LG_TINY_MIN" +#endif +#define S2B_16(i) S2B_8(i) S2B_8(i) +#define S2B_32(i) S2B_16(i) S2B_16(i) +#define S2B_64(i) S2B_32(i) S2B_32(i) +#define S2B_128(i) S2B_64(i) S2B_64(i) +#define S2B_256(i) S2B_128(i) S2B_128(i) +/* + * The number of elements in const_small_size2bin is dependent on the + * definition for SUBPAGE. + */ +static JEMALLOC_ATTR(aligned(CACHELINE)) + const uint8_t const_small_size2bin[] = { +#if (LG_QUANTUM == 4) +/* 16-byte quantum **********************/ +# ifdef JEMALLOC_TINY +# if (LG_TINY_MIN == 2) + S2B_4(0) /* 4 */ + S2B_4(1) /* 8 */ + S2B_8(2) /* 16 */ +# define S2B_QMIN 2 +# elif (LG_TINY_MIN == 3) + S2B_8(0) /* 8 */ + S2B_8(1) /* 16 */ +# define S2B_QMIN 1 +# else +# error "Unsupported LG_TINY_MIN" +# endif +# else + S2B_16(0) /* 16 */ +# define S2B_QMIN 0 +# endif + S2B_16(S2B_QMIN + 1) /* 32 */ + S2B_16(S2B_QMIN + 2) /* 48 */ + S2B_16(S2B_QMIN + 3) /* 64 */ + S2B_16(S2B_QMIN + 4) /* 80 */ + S2B_16(S2B_QMIN + 5) /* 96 */ + S2B_16(S2B_QMIN + 6) /* 112 */ + S2B_16(S2B_QMIN + 7) /* 128 */ +# define S2B_CMIN (S2B_QMIN + 8) +#else +/* 8-byte quantum ***********************/ +# ifdef JEMALLOC_TINY +# if (LG_TINY_MIN == 2) + S2B_4(0) /* 4 */ + S2B_4(1) /* 8 */ +# define S2B_QMIN 1 +# else +# error "Unsupported LG_TINY_MIN" +# endif +# else + S2B_8(0) /* 8 */ +# define S2B_QMIN 0 +# endif + S2B_8(S2B_QMIN + 1) /* 16 */ + S2B_8(S2B_QMIN + 2) /* 24 */ + S2B_8(S2B_QMIN + 3) /* 32 */ + S2B_8(S2B_QMIN + 4) /* 40 */ + S2B_8(S2B_QMIN + 5) /* 48 */ + S2B_8(S2B_QMIN + 6) /* 56 */ + S2B_8(S2B_QMIN + 7) /* 64 */ + S2B_8(S2B_QMIN + 8) /* 72 */ + S2B_8(S2B_QMIN + 9) /* 80 */ + S2B_8(S2B_QMIN + 10) /* 88 */ + S2B_8(S2B_QMIN + 11) /* 96 */ + S2B_8(S2B_QMIN + 12) /* 104 */ + S2B_8(S2B_QMIN + 13) /* 112 */ + S2B_8(S2B_QMIN + 14) /* 120 */ + S2B_8(S2B_QMIN + 15) /* 128 */ +# define S2B_CMIN (S2B_QMIN + 16) +#endif +/****************************************/ + S2B_64(S2B_CMIN + 0) /* 192 */ + S2B_64(S2B_CMIN + 1) /* 256 */ + S2B_64(S2B_CMIN + 2) /* 320 */ + S2B_64(S2B_CMIN + 3) /* 384 */ + S2B_64(S2B_CMIN + 4) /* 448 */ + S2B_64(S2B_CMIN + 5) /* 512 */ +# define S2B_SMIN (S2B_CMIN + 6) + S2B_256(S2B_SMIN + 0) /* 768 */ + S2B_256(S2B_SMIN + 1) /* 1024 */ + S2B_256(S2B_SMIN + 2) /* 1280 */ + S2B_256(S2B_SMIN + 3) /* 1536 */ + S2B_256(S2B_SMIN + 4) /* 1792 */ + S2B_256(S2B_SMIN + 5) /* 2048 */ + S2B_256(S2B_SMIN + 6) /* 2304 */ + S2B_256(S2B_SMIN + 7) /* 2560 */ + S2B_256(S2B_SMIN + 8) /* 2816 */ + S2B_256(S2B_SMIN + 9) /* 3072 */ + S2B_256(S2B_SMIN + 10) /* 3328 */ + S2B_256(S2B_SMIN + 11) /* 3584 */ + S2B_256(S2B_SMIN + 12) /* 3840 */ +#if (STATIC_PAGE_SHIFT == 13) + S2B_256(S2B_SMIN + 13) /* 4096 */ + S2B_256(S2B_SMIN + 14) /* 4352 */ + S2B_256(S2B_SMIN + 15) /* 4608 */ + S2B_256(S2B_SMIN + 16) /* 4864 */ + S2B_256(S2B_SMIN + 17) /* 5120 */ + S2B_256(S2B_SMIN + 18) /* 5376 */ + S2B_256(S2B_SMIN + 19) /* 5632 */ + S2B_256(S2B_SMIN + 20) /* 5888 */ + S2B_256(S2B_SMIN + 21) /* 6144 */ + S2B_256(S2B_SMIN + 22) /* 6400 */ + S2B_256(S2B_SMIN + 23) /* 6656 */ + S2B_256(S2B_SMIN + 24) /* 6912 */ + S2B_256(S2B_SMIN + 25) /* 7168 */ + S2B_256(S2B_SMIN + 26) /* 7424 */ + S2B_256(S2B_SMIN + 27) /* 7680 */ + S2B_256(S2B_SMIN + 28) /* 7936 */ +#endif +}; +#undef S2B_1 +#undef S2B_2 +#undef S2B_4 +#undef S2B_8 +#undef S2B_16 +#undef S2B_32 +#undef S2B_64 +#undef S2B_128 +#undef S2B_256 +#undef S2B_QMIN +#undef S2B_CMIN +#undef S2B_SMIN + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size, + bool large, bool zero); +static arena_chunk_t *arena_chunk_alloc(arena_t *arena); +static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk); +static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large, + bool zero); +static void arena_purge(arena_t *arena, bool all); +static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty); +static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, + arena_run_t *run, size_t oldsize, size_t newsize); +static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, + arena_run_t *run, size_t oldsize, size_t newsize, bool dirty); +static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin); +static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin); +static void arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, + arena_bin_t *bin); +static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, + arena_run_t *run, arena_bin_t *bin); +static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, + arena_run_t *run, arena_bin_t *bin); +static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, + void *ptr, size_t oldsize, size_t size); +static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, + void *ptr, size_t oldsize, size_t size, size_t extra, bool zero); +static bool arena_ralloc_large(void *ptr, size_t oldsize, size_t size, + size_t extra, bool zero); +static bool small_size2bin_init(void); +#ifdef JEMALLOC_DEBUG +static void small_size2bin_validate(void); +#endif +static bool small_size2bin_init_hard(void); +static size_t bin_info_run_size_calc(arena_bin_info_t *bin_info, + size_t min_run_size); +static bool bin_info_init(void); + +/******************************************************************************/ + +static inline int +arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) +{ + uintptr_t a_mapelm = (uintptr_t)a; + uintptr_t b_mapelm = (uintptr_t)b; + + assert(a != NULL); + assert(b != NULL); + + return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm)); +} + +/* Generate red-black tree functions. */ +rb_gen(static JEMALLOC_ATTR(unused), arena_run_tree_, arena_run_tree_t, + arena_chunk_map_t, u.rb_link, arena_run_comp) + +static inline int +arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) +{ + int ret; + size_t a_size = a->bits & ~PAGE_MASK; + size_t b_size = b->bits & ~PAGE_MASK; + + assert((a->bits & CHUNK_MAP_KEY) == CHUNK_MAP_KEY || (a->bits & + CHUNK_MAP_DIRTY) == (b->bits & CHUNK_MAP_DIRTY)); + + ret = (a_size > b_size) - (a_size < b_size); + if (ret == 0) { + uintptr_t a_mapelm, b_mapelm; + + if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY) + a_mapelm = (uintptr_t)a; + else { + /* + * Treat keys as though they are lower than anything + * else. + */ + a_mapelm = 0; + } + b_mapelm = (uintptr_t)b; + + ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm); + } + + return (ret); +} + +/* Generate red-black tree functions. */ +rb_gen(static JEMALLOC_ATTR(unused), arena_avail_tree_, arena_avail_tree_t, + arena_chunk_map_t, u.rb_link, arena_avail_comp) + +static inline void * +arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) +{ + void *ret; + unsigned regind; + bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + + (uintptr_t)bin_info->bitmap_offset); + + dassert(run->magic == ARENA_RUN_MAGIC); + assert(run->nfree > 0); + assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false); + + regind = bitmap_sfu(bitmap, &bin_info->bitmap_info); + ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset + + (uintptr_t)(bin_info->reg_size * regind)); + run->nfree--; + if (regind == run->nextind) + run->nextind++; + assert(regind < run->nextind); + return (ret); +} + +static inline void +arena_run_reg_dalloc(arena_run_t *run, void *ptr) +{ + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + size_t binind = arena_bin_index(chunk->arena, run->bin); + arena_bin_info_t *bin_info = &arena_bin_info[binind]; + unsigned regind = arena_run_regind(run, bin_info, ptr); + bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + + (uintptr_t)bin_info->bitmap_offset); + + assert(run->nfree < bin_info->nregs); + /* Freeing an interior pointer can cause assertion failure. */ + assert(((uintptr_t)ptr - ((uintptr_t)run + + (uintptr_t)bin_info->reg0_offset)) % (uintptr_t)bin_info->reg_size + == 0); + assert((uintptr_t)ptr >= (uintptr_t)run + + (uintptr_t)bin_info->reg0_offset); + /* Freeing an unallocated pointer can cause assertion failure. */ + assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind)); + + bitmap_unset(bitmap, &bin_info->bitmap_info, regind); + run->nfree++; +} + +#ifdef JEMALLOC_DEBUG +static inline void +arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) +{ + size_t i; + size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << PAGE_SHIFT)); + + for (i = 0; i < PAGE_SIZE / sizeof(size_t); i++) + assert(p[i] == 0); +} +#endif + +static void +arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large, + bool zero) +{ + arena_chunk_t *chunk; + size_t old_ndirty, run_ind, total_pages, need_pages, rem_pages, i; + size_t flag_dirty; + arena_avail_tree_t *runs_avail; +#ifdef JEMALLOC_STATS + size_t cactive_diff; +#endif + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + old_ndirty = chunk->ndirty; + run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) + >> PAGE_SHIFT); + flag_dirty = chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY; + runs_avail = (flag_dirty != 0) ? &arena->runs_avail_dirty : + &arena->runs_avail_clean; + total_pages = (chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) >> + PAGE_SHIFT; + assert((chunk->map[run_ind+total_pages-1-map_bias].bits & + CHUNK_MAP_DIRTY) == flag_dirty); + need_pages = (size >> PAGE_SHIFT); + assert(need_pages > 0); + assert(need_pages <= total_pages); + rem_pages = total_pages - need_pages; + + arena_avail_tree_remove(runs_avail, &chunk->map[run_ind-map_bias]); +#ifdef JEMALLOC_STATS + /* Update stats_cactive if nactive is crossing a chunk multiple. */ + cactive_diff = CHUNK_CEILING((arena->nactive + need_pages) << + PAGE_SHIFT) - CHUNK_CEILING(arena->nactive << PAGE_SHIFT); + if (cactive_diff != 0) + stats_cactive_add(cactive_diff); +#endif + arena->nactive += need_pages; + + /* Keep track of trailing unused pages for later use. */ + if (rem_pages > 0) { + if (flag_dirty != 0) { + chunk->map[run_ind+need_pages-map_bias].bits = + (rem_pages << PAGE_SHIFT) | CHUNK_MAP_DIRTY; + chunk->map[run_ind+total_pages-1-map_bias].bits = + (rem_pages << PAGE_SHIFT) | CHUNK_MAP_DIRTY; + } else { + chunk->map[run_ind+need_pages-map_bias].bits = + (rem_pages << PAGE_SHIFT) | + (chunk->map[run_ind+need_pages-map_bias].bits & + CHUNK_MAP_UNZEROED); + chunk->map[run_ind+total_pages-1-map_bias].bits = + (rem_pages << PAGE_SHIFT) | + (chunk->map[run_ind+total_pages-1-map_bias].bits & + CHUNK_MAP_UNZEROED); + } + arena_avail_tree_insert(runs_avail, + &chunk->map[run_ind+need_pages-map_bias]); + } + + /* Update dirty page accounting. */ + if (flag_dirty != 0) { + chunk->ndirty -= need_pages; + arena->ndirty -= need_pages; + } + + /* + * Update the page map separately for large vs. small runs, since it is + * possible to avoid iteration for large mallocs. + */ + if (large) { + if (zero) { + if (flag_dirty == 0) { + /* + * The run is clean, so some pages may be + * zeroed (i.e. never before touched). + */ + for (i = 0; i < need_pages; i++) { + if ((chunk->map[run_ind+i-map_bias].bits + & CHUNK_MAP_UNZEROED) != 0) { + memset((void *)((uintptr_t) + chunk + ((run_ind+i) << + PAGE_SHIFT)), 0, + PAGE_SIZE); + } +#ifdef JEMALLOC_DEBUG + else { + arena_chunk_validate_zeroed( + chunk, run_ind+i); + } +#endif + } + } else { + /* + * The run is dirty, so all pages must be + * zeroed. + */ + memset((void *)((uintptr_t)chunk + (run_ind << + PAGE_SHIFT)), 0, (need_pages << + PAGE_SHIFT)); + } + } + + /* + * Set the last element first, in case the run only contains one + * page (i.e. both statements set the same element). + */ + chunk->map[run_ind+need_pages-1-map_bias].bits = + CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED | flag_dirty; + chunk->map[run_ind-map_bias].bits = size | flag_dirty | + CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + } else { + assert(zero == false); + /* + * Propagate the dirty and unzeroed flags to the allocated + * small run, so that arena_dalloc_bin_run() has the ability to + * conditionally trim clean pages. + */ + chunk->map[run_ind-map_bias].bits = + (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED) | + CHUNK_MAP_ALLOCATED | flag_dirty; +#ifdef JEMALLOC_DEBUG + /* + * The first page will always be dirtied during small run + * initialization, so a validation failure here would not + * actually cause an observable failure. + */ + if (flag_dirty == 0 && + (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED) + == 0) + arena_chunk_validate_zeroed(chunk, run_ind); +#endif + for (i = 1; i < need_pages - 1; i++) { + chunk->map[run_ind+i-map_bias].bits = (i << PAGE_SHIFT) + | (chunk->map[run_ind+i-map_bias].bits & + CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED; +#ifdef JEMALLOC_DEBUG + if (flag_dirty == 0 && + (chunk->map[run_ind+i-map_bias].bits & + CHUNK_MAP_UNZEROED) == 0) + arena_chunk_validate_zeroed(chunk, run_ind+i); +#endif + } + chunk->map[run_ind+need_pages-1-map_bias].bits = ((need_pages + - 1) << PAGE_SHIFT) | + (chunk->map[run_ind+need_pages-1-map_bias].bits & + CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED | flag_dirty; +#ifdef JEMALLOC_DEBUG + if (flag_dirty == 0 && + (chunk->map[run_ind+need_pages-1-map_bias].bits & + CHUNK_MAP_UNZEROED) == 0) { + arena_chunk_validate_zeroed(chunk, + run_ind+need_pages-1); + } +#endif + } +} + +static arena_chunk_t * +arena_chunk_alloc(arena_t *arena) +{ + arena_chunk_t *chunk; + size_t i; + + if (arena->spare != NULL) { + arena_avail_tree_t *runs_avail; + + chunk = arena->spare; + arena->spare = NULL; + + /* Insert the run into the appropriate runs_avail_* tree. */ + if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0) + runs_avail = &arena->runs_avail_clean; + else + runs_avail = &arena->runs_avail_dirty; + assert((chunk->map[0].bits & ~PAGE_MASK) == arena_maxclass); + assert((chunk->map[chunk_npages-1-map_bias].bits & ~PAGE_MASK) + == arena_maxclass); + assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) == + (chunk->map[chunk_npages-1-map_bias].bits & + CHUNK_MAP_DIRTY)); + arena_avail_tree_insert(runs_avail, &chunk->map[0]); + } else { + bool zero; + size_t unzeroed; + + zero = false; + malloc_mutex_unlock(&arena->lock); + chunk = (arena_chunk_t *)chunk_alloc(chunksize, false, &zero); + malloc_mutex_lock(&arena->lock); + if (chunk == NULL) + return (NULL); +#ifdef JEMALLOC_STATS + arena->stats.mapped += chunksize; +#endif + + chunk->arena = arena; + ql_elm_new(chunk, link_dirty); + chunk->dirtied = false; + + /* + * Claim that no pages are in use, since the header is merely + * overhead. + */ + chunk->ndirty = 0; + + /* + * Initialize the map to contain one maximal free untouched run. + * Mark the pages as zeroed iff chunk_alloc() returned a zeroed + * chunk. + */ + unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED; + chunk->map[0].bits = arena_maxclass | unzeroed; + /* + * There is no need to initialize the internal page map entries + * unless the chunk is not zeroed. + */ + if (zero == false) { + for (i = map_bias+1; i < chunk_npages-1; i++) + chunk->map[i-map_bias].bits = unzeroed; + } +#ifdef JEMALLOC_DEBUG + else { + for (i = map_bias+1; i < chunk_npages-1; i++) + assert(chunk->map[i-map_bias].bits == unzeroed); + } +#endif + chunk->map[chunk_npages-1-map_bias].bits = arena_maxclass | + unzeroed; + + /* Insert the run into the runs_avail_clean tree. */ + arena_avail_tree_insert(&arena->runs_avail_clean, + &chunk->map[0]); + } + + return (chunk); +} + +static void +arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk) +{ + arena_avail_tree_t *runs_avail; + + /* + * Remove run from the appropriate runs_avail_* tree, so that the arena + * does not use it. + */ + if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0) + runs_avail = &arena->runs_avail_clean; + else + runs_avail = &arena->runs_avail_dirty; + arena_avail_tree_remove(runs_avail, &chunk->map[0]); + + if (arena->spare != NULL) { + arena_chunk_t *spare = arena->spare; + + arena->spare = chunk; + if (spare->dirtied) { + ql_remove(&chunk->arena->chunks_dirty, spare, + link_dirty); + arena->ndirty -= spare->ndirty; + } + malloc_mutex_unlock(&arena->lock); + chunk_dealloc((void *)spare, chunksize); + malloc_mutex_lock(&arena->lock); +#ifdef JEMALLOC_STATS + arena->stats.mapped -= chunksize; +#endif + } else + arena->spare = chunk; +} + +static arena_run_t * +arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero) +{ + arena_chunk_t *chunk; + arena_run_t *run; + arena_chunk_map_t *mapelm, key; + + assert(size <= arena_maxclass); + assert((size & PAGE_MASK) == 0); + + /* Search the arena's chunks for the lowest best fit. */ + key.bits = size | CHUNK_MAP_KEY; + mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key); + if (mapelm != NULL) { + arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); + size_t pageind = (((uintptr_t)mapelm - + (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) + + map_bias; + + run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << + PAGE_SHIFT)); + arena_run_split(arena, run, size, large, zero); + return (run); + } + mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key); + if (mapelm != NULL) { + arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); + size_t pageind = (((uintptr_t)mapelm - + (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) + + map_bias; + + run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << + PAGE_SHIFT)); + arena_run_split(arena, run, size, large, zero); + return (run); + } + + /* + * No usable runs. Create a new chunk from which to allocate the run. + */ + chunk = arena_chunk_alloc(arena); + if (chunk != NULL) { + run = (arena_run_t *)((uintptr_t)chunk + (map_bias << + PAGE_SHIFT)); + arena_run_split(arena, run, size, large, zero); + return (run); + } + + /* + * arena_chunk_alloc() failed, but another thread may have made + * sufficient memory available while this one dropped arena->lock in + * arena_chunk_alloc(), so search one more time. + */ + mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key); + if (mapelm != NULL) { + arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); + size_t pageind = (((uintptr_t)mapelm - + (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) + + map_bias; + + run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << + PAGE_SHIFT)); + arena_run_split(arena, run, size, large, zero); + return (run); + } + mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key); + if (mapelm != NULL) { + arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); + size_t pageind = (((uintptr_t)mapelm - + (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) + + map_bias; + + run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << + PAGE_SHIFT)); + arena_run_split(arena, run, size, large, zero); + return (run); + } + + return (NULL); +} + +static inline void +arena_maybe_purge(arena_t *arena) +{ + + /* Enforce opt_lg_dirty_mult. */ + if (opt_lg_dirty_mult >= 0 && arena->ndirty > arena->npurgatory && + (arena->ndirty - arena->npurgatory) > chunk_npages && + (arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty - + arena->npurgatory)) + arena_purge(arena, false); +} + +static inline void +arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk) +{ + ql_head(arena_chunk_map_t) mapelms; + arena_chunk_map_t *mapelm; + size_t pageind, flag_unzeroed; +#ifdef JEMALLOC_DEBUG + size_t ndirty; +#endif +#ifdef JEMALLOC_STATS + size_t nmadvise; +#endif + + ql_new(&mapelms); + + flag_unzeroed = +#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED + /* + * madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous + * mappings, but not for file-backed mappings. + */ +# ifdef JEMALLOC_SWAP + swap_enabled ? CHUNK_MAP_UNZEROED : +# endif + 0; +#else + CHUNK_MAP_UNZEROED; +#endif + + /* + * If chunk is the spare, temporarily re-allocate it, 1) so that its + * run is reinserted into runs_avail_dirty, and 2) so that it cannot be + * completely discarded by another thread while arena->lock is dropped + * by this thread. Note that the arena_run_dalloc() call will + * implicitly deallocate the chunk, so no explicit action is required + * in this function to deallocate the chunk. + * + * Note that once a chunk contains dirty pages, it cannot again contain + * a single run unless 1) it is a dirty run, or 2) this function purges + * dirty pages and causes the transition to a single clean run. Thus + * (chunk == arena->spare) is possible, but it is not possible for + * this function to be called on the spare unless it contains a dirty + * run. + */ + if (chunk == arena->spare) { + assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) != 0); + arena_chunk_alloc(arena); + } + + /* Temporarily allocate all free dirty runs within chunk. */ + for (pageind = map_bias; pageind < chunk_npages;) { + mapelm = &chunk->map[pageind-map_bias]; + if ((mapelm->bits & CHUNK_MAP_ALLOCATED) == 0) { + size_t npages; + + npages = mapelm->bits >> PAGE_SHIFT; + assert(pageind + npages <= chunk_npages); + if (mapelm->bits & CHUNK_MAP_DIRTY) { + size_t i; +#ifdef JEMALLOC_STATS + size_t cactive_diff; +#endif + + arena_avail_tree_remove( + &arena->runs_avail_dirty, mapelm); + + mapelm->bits = (npages << PAGE_SHIFT) | + flag_unzeroed | CHUNK_MAP_LARGE | + CHUNK_MAP_ALLOCATED; + /* + * Update internal elements in the page map, so + * that CHUNK_MAP_UNZEROED is properly set. + */ + for (i = 1; i < npages - 1; i++) { + chunk->map[pageind+i-map_bias].bits = + flag_unzeroed; + } + if (npages > 1) { + chunk->map[ + pageind+npages-1-map_bias].bits = + flag_unzeroed | CHUNK_MAP_LARGE | + CHUNK_MAP_ALLOCATED; + } + +#ifdef JEMALLOC_STATS + /* + * Update stats_cactive if nactive is crossing a + * chunk multiple. + */ + cactive_diff = CHUNK_CEILING((arena->nactive + + npages) << PAGE_SHIFT) - + CHUNK_CEILING(arena->nactive << PAGE_SHIFT); + if (cactive_diff != 0) + stats_cactive_add(cactive_diff); +#endif + arena->nactive += npages; + /* Append to list for later processing. */ + ql_elm_new(mapelm, u.ql_link); + ql_tail_insert(&mapelms, mapelm, u.ql_link); + } + + pageind += npages; + } else { + /* Skip allocated run. */ + if (mapelm->bits & CHUNK_MAP_LARGE) + pageind += mapelm->bits >> PAGE_SHIFT; + else { + arena_run_t *run = (arena_run_t *)((uintptr_t) + chunk + (uintptr_t)(pageind << PAGE_SHIFT)); + + assert((mapelm->bits >> PAGE_SHIFT) == 0); + dassert(run->magic == ARENA_RUN_MAGIC); + size_t binind = arena_bin_index(arena, + run->bin); + arena_bin_info_t *bin_info = + &arena_bin_info[binind]; + pageind += bin_info->run_size >> PAGE_SHIFT; + } + } + } + assert(pageind == chunk_npages); + +#ifdef JEMALLOC_DEBUG + ndirty = chunk->ndirty; +#endif +#ifdef JEMALLOC_STATS + arena->stats.purged += chunk->ndirty; +#endif + arena->ndirty -= chunk->ndirty; + chunk->ndirty = 0; + ql_remove(&arena->chunks_dirty, chunk, link_dirty); + chunk->dirtied = false; + + malloc_mutex_unlock(&arena->lock); +#ifdef JEMALLOC_STATS + nmadvise = 0; +#endif + ql_foreach(mapelm, &mapelms, u.ql_link) { + size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / + sizeof(arena_chunk_map_t)) + map_bias; + size_t npages = mapelm->bits >> PAGE_SHIFT; + + assert(pageind + npages <= chunk_npages); +#ifdef JEMALLOC_DEBUG + assert(ndirty >= npages); + ndirty -= npages; +#endif + +#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED + madvise((void *)((uintptr_t)chunk + (pageind << PAGE_SHIFT)), + (npages << PAGE_SHIFT), MADV_DONTNEED); +#elif defined(JEMALLOC_PURGE_MADVISE_FREE) + madvise((void *)((uintptr_t)chunk + (pageind << PAGE_SHIFT)), + (npages << PAGE_SHIFT), MADV_FREE); +#else +# error "No method defined for purging unused dirty pages." +#endif + +#ifdef JEMALLOC_STATS + nmadvise++; +#endif + } +#ifdef JEMALLOC_DEBUG + assert(ndirty == 0); +#endif + malloc_mutex_lock(&arena->lock); +#ifdef JEMALLOC_STATS + arena->stats.nmadvise += nmadvise; +#endif + + /* Deallocate runs. */ + for (mapelm = ql_first(&mapelms); mapelm != NULL; + mapelm = ql_first(&mapelms)) { + size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / + sizeof(arena_chunk_map_t)) + map_bias; + arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + + (uintptr_t)(pageind << PAGE_SHIFT)); + + ql_remove(&mapelms, mapelm, u.ql_link); + arena_run_dalloc(arena, run, false); + } +} + +static void +arena_purge(arena_t *arena, bool all) +{ + arena_chunk_t *chunk; + size_t npurgatory; +#ifdef JEMALLOC_DEBUG + size_t ndirty = 0; + + ql_foreach(chunk, &arena->chunks_dirty, link_dirty) { + assert(chunk->dirtied); + ndirty += chunk->ndirty; + } + assert(ndirty == arena->ndirty); +#endif + assert(arena->ndirty > arena->npurgatory || all); + assert(arena->ndirty > chunk_npages || all); + assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty - + npurgatory) || all); + +#ifdef JEMALLOC_STATS + arena->stats.npurge++; +#endif + + /* + * Compute the minimum number of pages that this thread should try to + * purge, and add the result to arena->npurgatory. This will keep + * multiple threads from racing to reduce ndirty below the threshold. + */ + npurgatory = arena->ndirty - arena->npurgatory; + if (all == false) { + assert(npurgatory >= arena->nactive >> opt_lg_dirty_mult); + npurgatory -= arena->nactive >> opt_lg_dirty_mult; + } + arena->npurgatory += npurgatory; + + while (npurgatory > 0) { + /* Get next chunk with dirty pages. */ + chunk = ql_first(&arena->chunks_dirty); + if (chunk == NULL) { + /* + * This thread was unable to purge as many pages as + * originally intended, due to races with other threads + * that either did some of the purging work, or re-used + * dirty pages. + */ + arena->npurgatory -= npurgatory; + return; + } + while (chunk->ndirty == 0) { + ql_remove(&arena->chunks_dirty, chunk, link_dirty); + chunk->dirtied = false; + chunk = ql_first(&arena->chunks_dirty); + if (chunk == NULL) { + /* Same logic as for above. */ + arena->npurgatory -= npurgatory; + return; + } + } + + if (chunk->ndirty > npurgatory) { + /* + * This thread will, at a minimum, purge all the dirty + * pages in chunk, so set npurgatory to reflect this + * thread's commitment to purge the pages. This tends + * to reduce the chances of the following scenario: + * + * 1) This thread sets arena->npurgatory such that + * (arena->ndirty - arena->npurgatory) is at the + * threshold. + * 2) This thread drops arena->lock. + * 3) Another thread causes one or more pages to be + * dirtied, and immediately determines that it must + * purge dirty pages. + * + * If this scenario *does* play out, that's okay, + * because all of the purging work being done really + * needs to happen. + */ + arena->npurgatory += chunk->ndirty - npurgatory; + npurgatory = chunk->ndirty; + } + + arena->npurgatory -= chunk->ndirty; + npurgatory -= chunk->ndirty; + arena_chunk_purge(arena, chunk); + } +} + +void +arena_purge_all(arena_t *arena) +{ + + malloc_mutex_lock(&arena->lock); + arena_purge(arena, true); + malloc_mutex_unlock(&arena->lock); +} + +static void +arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty) +{ + arena_chunk_t *chunk; + size_t size, run_ind, run_pages, flag_dirty; + arena_avail_tree_t *runs_avail; +#ifdef JEMALLOC_STATS + size_t cactive_diff; +#endif + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) + >> PAGE_SHIFT); + assert(run_ind >= map_bias); + assert(run_ind < chunk_npages); + if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_LARGE) != 0) { + size = chunk->map[run_ind-map_bias].bits & ~PAGE_MASK; + assert(size == PAGE_SIZE || + (chunk->map[run_ind+(size>>PAGE_SHIFT)-1-map_bias].bits & + ~PAGE_MASK) == 0); + assert((chunk->map[run_ind+(size>>PAGE_SHIFT)-1-map_bias].bits & + CHUNK_MAP_LARGE) != 0); + assert((chunk->map[run_ind+(size>>PAGE_SHIFT)-1-map_bias].bits & + CHUNK_MAP_ALLOCATED) != 0); + } else { + size_t binind = arena_bin_index(arena, run->bin); + arena_bin_info_t *bin_info = &arena_bin_info[binind]; + size = bin_info->run_size; + } + run_pages = (size >> PAGE_SHIFT); +#ifdef JEMALLOC_STATS + /* Update stats_cactive if nactive is crossing a chunk multiple. */ + cactive_diff = CHUNK_CEILING(arena->nactive << PAGE_SHIFT) - + CHUNK_CEILING((arena->nactive - run_pages) << PAGE_SHIFT); + if (cactive_diff != 0) + stats_cactive_sub(cactive_diff); +#endif + arena->nactive -= run_pages; + + /* + * The run is dirty if the caller claims to have dirtied it, as well as + * if it was already dirty before being allocated. + */ + if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) != 0) + dirty = true; + flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; + runs_avail = dirty ? &arena->runs_avail_dirty : + &arena->runs_avail_clean; + + /* Mark pages as unallocated in the chunk map. */ + if (dirty) { + chunk->map[run_ind-map_bias].bits = size | CHUNK_MAP_DIRTY; + chunk->map[run_ind+run_pages-1-map_bias].bits = size | + CHUNK_MAP_DIRTY; + + chunk->ndirty += run_pages; + arena->ndirty += run_pages; + } else { + chunk->map[run_ind-map_bias].bits = size | + (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED); + chunk->map[run_ind+run_pages-1-map_bias].bits = size | + (chunk->map[run_ind+run_pages-1-map_bias].bits & + CHUNK_MAP_UNZEROED); + } + + /* Try to coalesce forward. */ + if (run_ind + run_pages < chunk_npages && + (chunk->map[run_ind+run_pages-map_bias].bits & CHUNK_MAP_ALLOCATED) + == 0 && (chunk->map[run_ind+run_pages-map_bias].bits & + CHUNK_MAP_DIRTY) == flag_dirty) { + size_t nrun_size = chunk->map[run_ind+run_pages-map_bias].bits & + ~PAGE_MASK; + size_t nrun_pages = nrun_size >> PAGE_SHIFT; + + /* + * Remove successor from runs_avail; the coalesced run is + * inserted later. + */ + assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits + & ~PAGE_MASK) == nrun_size); + assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits + & CHUNK_MAP_ALLOCATED) == 0); + assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits + & CHUNK_MAP_DIRTY) == flag_dirty); + arena_avail_tree_remove(runs_avail, + &chunk->map[run_ind+run_pages-map_bias]); + + size += nrun_size; + run_pages += nrun_pages; + + chunk->map[run_ind-map_bias].bits = size | + (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK); + chunk->map[run_ind+run_pages-1-map_bias].bits = size | + (chunk->map[run_ind+run_pages-1-map_bias].bits & + CHUNK_MAP_FLAGS_MASK); + } + + /* Try to coalesce backward. */ + if (run_ind > map_bias && (chunk->map[run_ind-1-map_bias].bits & + CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[run_ind-1-map_bias].bits & + CHUNK_MAP_DIRTY) == flag_dirty) { + size_t prun_size = chunk->map[run_ind-1-map_bias].bits & + ~PAGE_MASK; + size_t prun_pages = prun_size >> PAGE_SHIFT; + + run_ind -= prun_pages; + + /* + * Remove predecessor from runs_avail; the coalesced run is + * inserted later. + */ + assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) + == prun_size); + assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_ALLOCATED) + == 0); + assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) + == flag_dirty); + arena_avail_tree_remove(runs_avail, + &chunk->map[run_ind-map_bias]); + + size += prun_size; + run_pages += prun_pages; + + chunk->map[run_ind-map_bias].bits = size | + (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK); + chunk->map[run_ind+run_pages-1-map_bias].bits = size | + (chunk->map[run_ind+run_pages-1-map_bias].bits & + CHUNK_MAP_FLAGS_MASK); + } + + /* Insert into runs_avail, now that coalescing is complete. */ + assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) == + (chunk->map[run_ind+run_pages-1-map_bias].bits & ~PAGE_MASK)); + assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) == + (chunk->map[run_ind+run_pages-1-map_bias].bits & CHUNK_MAP_DIRTY)); + arena_avail_tree_insert(runs_avail, &chunk->map[run_ind-map_bias]); + + if (dirty) { + /* + * Insert into chunks_dirty before potentially calling + * arena_chunk_dealloc(), so that chunks_dirty and + * arena->ndirty are consistent. + */ + if (chunk->dirtied == false) { + ql_tail_insert(&arena->chunks_dirty, chunk, link_dirty); + chunk->dirtied = true; + } + } + + /* + * Deallocate chunk if it is now completely unused. The bit + * manipulation checks whether the first run is unallocated and extends + * to the end of the chunk. + */ + if ((chunk->map[0].bits & (~PAGE_MASK | CHUNK_MAP_ALLOCATED)) == + arena_maxclass) + arena_chunk_dealloc(arena, chunk); + + /* + * It is okay to do dirty page processing here even if the chunk was + * deallocated above, since in that case it is the spare. Waiting + * until after possible chunk deallocation to do dirty processing + * allows for an old spare to be fully deallocated, thus decreasing the + * chances of spuriously crossing the dirty page purging threshold. + */ + if (dirty) + arena_maybe_purge(arena); +} + +static void +arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, + size_t oldsize, size_t newsize) +{ + size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> PAGE_SHIFT; + size_t head_npages = (oldsize - newsize) >> PAGE_SHIFT; + size_t flag_dirty = chunk->map[pageind-map_bias].bits & CHUNK_MAP_DIRTY; + + assert(oldsize > newsize); + + /* + * Update the chunk map so that arena_run_dalloc() can treat the + * leading run as separately allocated. Set the last element of each + * run first, in case of single-page runs. + */ + assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0); + assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0); + chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty | + (chunk->map[pageind+head_npages-1-map_bias].bits & + CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + chunk->map[pageind-map_bias].bits = (oldsize - newsize) + | flag_dirty | (chunk->map[pageind-map_bias].bits & + CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + +#ifdef JEMALLOC_DEBUG + { + size_t tail_npages = newsize >> PAGE_SHIFT; + assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias] + .bits & ~PAGE_MASK) == 0); + assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias] + .bits & CHUNK_MAP_DIRTY) == flag_dirty); + assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias] + .bits & CHUNK_MAP_LARGE) != 0); + assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias] + .bits & CHUNK_MAP_ALLOCATED) != 0); + } +#endif + chunk->map[pageind+head_npages-map_bias].bits = newsize | flag_dirty | + (chunk->map[pageind+head_npages-map_bias].bits & + CHUNK_MAP_FLAGS_MASK) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + + arena_run_dalloc(arena, run, false); +} + +static void +arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, + size_t oldsize, size_t newsize, bool dirty) +{ + size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> PAGE_SHIFT; + size_t head_npages = newsize >> PAGE_SHIFT; + size_t tail_npages = (oldsize - newsize) >> PAGE_SHIFT; + size_t flag_dirty = chunk->map[pageind-map_bias].bits & + CHUNK_MAP_DIRTY; + + assert(oldsize > newsize); + + /* + * Update the chunk map so that arena_run_dalloc() can treat the + * trailing run as separately allocated. Set the last element of each + * run first, in case of single-page runs. + */ + assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0); + assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0); + chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty | + (chunk->map[pageind+head_npages-1-map_bias].bits & + CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + chunk->map[pageind-map_bias].bits = newsize | flag_dirty | + (chunk->map[pageind-map_bias].bits & CHUNK_MAP_UNZEROED) | + CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + + assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits & + ~PAGE_MASK) == 0); + assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits & + CHUNK_MAP_LARGE) != 0); + assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits & + CHUNK_MAP_ALLOCATED) != 0); + chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits = + flag_dirty | + (chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits & + CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + chunk->map[pageind+head_npages-map_bias].bits = (oldsize - newsize) | + flag_dirty | (chunk->map[pageind+head_npages-map_bias].bits & + CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + + arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize), + dirty); +} + +static arena_run_t * +arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) +{ + arena_chunk_map_t *mapelm; + arena_run_t *run; + size_t binind; + arena_bin_info_t *bin_info; + + /* Look for a usable run. */ + mapelm = arena_run_tree_first(&bin->runs); + if (mapelm != NULL) { + arena_chunk_t *chunk; + size_t pageind; + + /* run is guaranteed to have available space. */ + arena_run_tree_remove(&bin->runs, mapelm); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm); + pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) / + sizeof(arena_chunk_map_t))) + map_bias; + run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - + (mapelm->bits >> PAGE_SHIFT)) + << PAGE_SHIFT)); +#ifdef JEMALLOC_STATS + bin->stats.reruns++; +#endif + return (run); + } + /* No existing runs have any space available. */ + + binind = arena_bin_index(arena, bin); + bin_info = &arena_bin_info[binind]; + + /* Allocate a new run. */ + malloc_mutex_unlock(&bin->lock); + /******************************/ + malloc_mutex_lock(&arena->lock); + run = arena_run_alloc(arena, bin_info->run_size, false, false); + if (run != NULL) { + bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + + (uintptr_t)bin_info->bitmap_offset); + + /* Initialize run internals. */ + run->bin = bin; + run->nextind = 0; + run->nfree = bin_info->nregs; + bitmap_init(bitmap, &bin_info->bitmap_info); +#ifdef JEMALLOC_DEBUG + run->magic = ARENA_RUN_MAGIC; +#endif + } + malloc_mutex_unlock(&arena->lock); + /********************************/ + malloc_mutex_lock(&bin->lock); + if (run != NULL) { +#ifdef JEMALLOC_STATS + bin->stats.nruns++; + bin->stats.curruns++; + if (bin->stats.curruns > bin->stats.highruns) + bin->stats.highruns = bin->stats.curruns; +#endif + return (run); + } + + /* + * arena_run_alloc() failed, but another thread may have made + * sufficient memory available while this one dropped bin->lock above, + * so search one more time. + */ + mapelm = arena_run_tree_first(&bin->runs); + if (mapelm != NULL) { + arena_chunk_t *chunk; + size_t pageind; + + /* run is guaranteed to have available space. */ + arena_run_tree_remove(&bin->runs, mapelm); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm); + pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) / + sizeof(arena_chunk_map_t))) + map_bias; + run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - + (mapelm->bits >> PAGE_SHIFT)) + << PAGE_SHIFT)); +#ifdef JEMALLOC_STATS + bin->stats.reruns++; +#endif + return (run); + } + + return (NULL); +} + +/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ +static void * +arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) +{ + void *ret; + size_t binind; + arena_bin_info_t *bin_info; + arena_run_t *run; + + binind = arena_bin_index(arena, bin); + bin_info = &arena_bin_info[binind]; + bin->runcur = NULL; + run = arena_bin_nonfull_run_get(arena, bin); + if (bin->runcur != NULL && bin->runcur->nfree > 0) { + /* + * Another thread updated runcur while this one ran without the + * bin lock in arena_bin_nonfull_run_get(). + */ + dassert(bin->runcur->magic == ARENA_RUN_MAGIC); + assert(bin->runcur->nfree > 0); + ret = arena_run_reg_alloc(bin->runcur, bin_info); + if (run != NULL) { + arena_chunk_t *chunk; + + /* + * arena_run_alloc() may have allocated run, or it may + * have pulled run from the bin's run tree. Therefore + * it is unsafe to make any assumptions about how run + * has previously been used, and arena_bin_lower_run() + * must be called, as if a region were just deallocated + * from the run. + */ + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + if (run->nfree == bin_info->nregs) + arena_dalloc_bin_run(arena, chunk, run, bin); + else + arena_bin_lower_run(arena, chunk, run, bin); + } + return (ret); + } + + if (run == NULL) + return (NULL); + + bin->runcur = run; + + dassert(bin->runcur->magic == ARENA_RUN_MAGIC); + assert(bin->runcur->nfree > 0); + + return (arena_run_reg_alloc(bin->runcur, bin_info)); +} + +#ifdef JEMALLOC_PROF +void +arena_prof_accum(arena_t *arena, uint64_t accumbytes) +{ + + if (prof_interval != 0) { + arena->prof_accumbytes += accumbytes; + if (arena->prof_accumbytes >= prof_interval) { + prof_idump(); + arena->prof_accumbytes -= prof_interval; + } + } +} +#endif + +#ifdef JEMALLOC_TCACHE +void +arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind +# ifdef JEMALLOC_PROF + , uint64_t prof_accumbytes +# endif + ) +{ + unsigned i, nfill; + arena_bin_t *bin; + arena_run_t *run; + void *ptr; + + assert(tbin->ncached == 0); + +#ifdef JEMALLOC_PROF + malloc_mutex_lock(&arena->lock); + arena_prof_accum(arena, prof_accumbytes); + malloc_mutex_unlock(&arena->lock); +#endif + bin = &arena->bins[binind]; + malloc_mutex_lock(&bin->lock); + for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> + tbin->lg_fill_div); i < nfill; i++) { + if ((run = bin->runcur) != NULL && run->nfree > 0) + ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); + else + ptr = arena_bin_malloc_hard(arena, bin); + if (ptr == NULL) + break; + /* Insert such that low regions get used first. */ + tbin->avail[nfill - 1 - i] = ptr; + } +#ifdef JEMALLOC_STATS + bin->stats.allocated += i * arena_bin_info[binind].reg_size; + bin->stats.nmalloc += i; + bin->stats.nrequests += tbin->tstats.nrequests; + bin->stats.nfills++; + tbin->tstats.nrequests = 0; +#endif + malloc_mutex_unlock(&bin->lock); + tbin->ncached = i; +} +#endif + +void * +arena_malloc_small(arena_t *arena, size_t size, bool zero) +{ + void *ret; + arena_bin_t *bin; + arena_run_t *run; + size_t binind; + + binind = SMALL_SIZE2BIN(size); + assert(binind < nbins); + bin = &arena->bins[binind]; + size = arena_bin_info[binind].reg_size; + + malloc_mutex_lock(&bin->lock); + if ((run = bin->runcur) != NULL && run->nfree > 0) + ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); + else + ret = arena_bin_malloc_hard(arena, bin); + + if (ret == NULL) { + malloc_mutex_unlock(&bin->lock); + return (NULL); + } + +#ifdef JEMALLOC_STATS + bin->stats.allocated += size; + bin->stats.nmalloc++; + bin->stats.nrequests++; +#endif + malloc_mutex_unlock(&bin->lock); +#ifdef JEMALLOC_PROF + if (isthreaded == false) { + malloc_mutex_lock(&arena->lock); + arena_prof_accum(arena, size); + malloc_mutex_unlock(&arena->lock); + } +#endif + + if (zero == false) { +#ifdef JEMALLOC_FILL + if (opt_junk) + memset(ret, 0xa5, size); + else if (opt_zero) + memset(ret, 0, size); +#endif + } else + memset(ret, 0, size); + + return (ret); +} + +void * +arena_malloc_large(arena_t *arena, size_t size, bool zero) +{ + void *ret; + + /* Large allocation. */ + size = PAGE_CEILING(size); + malloc_mutex_lock(&arena->lock); + ret = (void *)arena_run_alloc(arena, size, true, zero); + if (ret == NULL) { + malloc_mutex_unlock(&arena->lock); + return (NULL); + } +#ifdef JEMALLOC_STATS + arena->stats.nmalloc_large++; + arena->stats.nrequests_large++; + arena->stats.allocated_large += size; + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++; + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++; + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++; + if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns > + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) { + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns = + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns; + } +#endif +#ifdef JEMALLOC_PROF + arena_prof_accum(arena, size); +#endif + malloc_mutex_unlock(&arena->lock); + + if (zero == false) { +#ifdef JEMALLOC_FILL + if (opt_junk) + memset(ret, 0xa5, size); + else if (opt_zero) + memset(ret, 0, size); +#endif + } + + return (ret); +} + +void * +arena_malloc(size_t size, bool zero) +{ + + assert(size != 0); + assert(QUANTUM_CEILING(size) <= arena_maxclass); + + if (size <= small_maxclass) { +#ifdef JEMALLOC_TCACHE + tcache_t *tcache; + + if ((tcache = tcache_get()) != NULL) + return (tcache_alloc_small(tcache, size, zero)); + else + +#endif + return (arena_malloc_small(choose_arena(), size, zero)); + } else { +#ifdef JEMALLOC_TCACHE + if (size <= tcache_maxclass) { + tcache_t *tcache; + + if ((tcache = tcache_get()) != NULL) + return (tcache_alloc_large(tcache, size, zero)); + else { + return (arena_malloc_large(choose_arena(), + size, zero)); + } + } else +#endif + return (arena_malloc_large(choose_arena(), size, zero)); + } +} + +/* Only handles large allocations that require more than page alignment. */ +void * +arena_palloc(arena_t *arena, size_t size, size_t alloc_size, size_t alignment, + bool zero) +{ + void *ret; + size_t offset; + arena_chunk_t *chunk; + + assert((size & PAGE_MASK) == 0); + + alignment = PAGE_CEILING(alignment); + + malloc_mutex_lock(&arena->lock); + ret = (void *)arena_run_alloc(arena, alloc_size, true, zero); + if (ret == NULL) { + malloc_mutex_unlock(&arena->lock); + return (NULL); + } + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret); + + offset = (uintptr_t)ret & (alignment - 1); + assert((offset & PAGE_MASK) == 0); + assert(offset < alloc_size); + if (offset == 0) + arena_run_trim_tail(arena, chunk, ret, alloc_size, size, false); + else { + size_t leadsize, trailsize; + + leadsize = alignment - offset; + if (leadsize > 0) { + arena_run_trim_head(arena, chunk, ret, alloc_size, + alloc_size - leadsize); + ret = (void *)((uintptr_t)ret + leadsize); + } + + trailsize = alloc_size - leadsize - size; + if (trailsize != 0) { + /* Trim trailing space. */ + assert(trailsize < alloc_size); + arena_run_trim_tail(arena, chunk, ret, size + trailsize, + size, false); + } + } + +#ifdef JEMALLOC_STATS + arena->stats.nmalloc_large++; + arena->stats.nrequests_large++; + arena->stats.allocated_large += size; + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++; + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++; + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++; + if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns > + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) { + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns = + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns; + } +#endif + malloc_mutex_unlock(&arena->lock); + +#ifdef JEMALLOC_FILL + if (zero == false) { + if (opt_junk) + memset(ret, 0xa5, size); + else if (opt_zero) + memset(ret, 0, size); + } +#endif + return (ret); +} + +/* Return the size of the allocation pointed to by ptr. */ +size_t +arena_salloc(const void *ptr) +{ + size_t ret; + arena_chunk_t *chunk; + size_t pageind, mapbits; + + assert(ptr != NULL); + assert(CHUNK_ADDR2BASE(ptr) != ptr); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT; + mapbits = chunk->map[pageind-map_bias].bits; + assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); + if ((mapbits & CHUNK_MAP_LARGE) == 0) { + arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + + (uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) << + PAGE_SHIFT)); + dassert(run->magic == ARENA_RUN_MAGIC); + size_t binind = arena_bin_index(chunk->arena, run->bin); + arena_bin_info_t *bin_info = &arena_bin_info[binind]; + assert(((uintptr_t)ptr - ((uintptr_t)run + + (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_size == + 0); + ret = bin_info->reg_size; + } else { + assert(((uintptr_t)ptr & PAGE_MASK) == 0); + ret = mapbits & ~PAGE_MASK; + assert(ret != 0); + } + + return (ret); +} + +#ifdef JEMALLOC_PROF +void +arena_prof_promoted(const void *ptr, size_t size) +{ + arena_chunk_t *chunk; + size_t pageind, binind; + + assert(ptr != NULL); + assert(CHUNK_ADDR2BASE(ptr) != ptr); + assert(isalloc(ptr) == PAGE_SIZE); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT; + binind = SMALL_SIZE2BIN(size); + assert(binind < nbins); + chunk->map[pageind-map_bias].bits = (chunk->map[pageind-map_bias].bits & + ~CHUNK_MAP_CLASS_MASK) | ((binind+1) << CHUNK_MAP_CLASS_SHIFT); +} + +size_t +arena_salloc_demote(const void *ptr) +{ + size_t ret; + arena_chunk_t *chunk; + size_t pageind, mapbits; + + assert(ptr != NULL); + assert(CHUNK_ADDR2BASE(ptr) != ptr); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT; + mapbits = chunk->map[pageind-map_bias].bits; + assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); + if ((mapbits & CHUNK_MAP_LARGE) == 0) { + arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + + (uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) << + PAGE_SHIFT)); + dassert(run->magic == ARENA_RUN_MAGIC); + size_t binind = arena_bin_index(chunk->arena, run->bin); + arena_bin_info_t *bin_info = &arena_bin_info[binind]; + assert(((uintptr_t)ptr - ((uintptr_t)run + + (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_size == + 0); + ret = bin_info->reg_size; + } else { + assert(((uintptr_t)ptr & PAGE_MASK) == 0); + ret = mapbits & ~PAGE_MASK; + if (prof_promote && ret == PAGE_SIZE && (mapbits & + CHUNK_MAP_CLASS_MASK) != 0) { + size_t binind = ((mapbits & CHUNK_MAP_CLASS_MASK) >> + CHUNK_MAP_CLASS_SHIFT) - 1; + assert(binind < nbins); + ret = arena_bin_info[binind].reg_size; + } + assert(ret != 0); + } + + return (ret); +} +#endif + +static void +arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, + arena_bin_t *bin) +{ + + /* Dissociate run from bin. */ + if (run == bin->runcur) + bin->runcur = NULL; + else { + size_t binind = arena_bin_index(chunk->arena, bin); + arena_bin_info_t *bin_info = &arena_bin_info[binind]; + + if (bin_info->nregs != 1) { + size_t run_pageind = (((uintptr_t)run - + (uintptr_t)chunk)) >> PAGE_SHIFT; + arena_chunk_map_t *run_mapelm = + &chunk->map[run_pageind-map_bias]; + /* + * This block's conditional is necessary because if the + * run only contains one region, then it never gets + * inserted into the non-full runs tree. + */ + arena_run_tree_remove(&bin->runs, run_mapelm); + } + } +} + +static void +arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, + arena_bin_t *bin) +{ + size_t binind; + arena_bin_info_t *bin_info; + size_t npages, run_ind, past; + + assert(run != bin->runcur); + assert(arena_run_tree_search(&bin->runs, &chunk->map[ + (((uintptr_t)run-(uintptr_t)chunk)>>PAGE_SHIFT)-map_bias]) == NULL); + + binind = arena_bin_index(chunk->arena, run->bin); + bin_info = &arena_bin_info[binind]; + + malloc_mutex_unlock(&bin->lock); + /******************************/ + npages = bin_info->run_size >> PAGE_SHIFT; + run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> PAGE_SHIFT); + past = (size_t)(PAGE_CEILING((uintptr_t)run + + (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind * + bin_info->reg_size) - (uintptr_t)chunk) >> PAGE_SHIFT); + malloc_mutex_lock(&arena->lock); + + /* + * If the run was originally clean, and some pages were never touched, + * trim the clean pages before deallocating the dirty portion of the + * run. + */ + if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) == 0 && past + - run_ind < npages) { + /* + * Trim clean pages. Convert to large run beforehand. Set the + * last map element first, in case this is a one-page run. + */ + chunk->map[run_ind+npages-1-map_bias].bits = CHUNK_MAP_LARGE | + (chunk->map[run_ind+npages-1-map_bias].bits & + CHUNK_MAP_FLAGS_MASK); + chunk->map[run_ind-map_bias].bits = bin_info->run_size | + CHUNK_MAP_LARGE | (chunk->map[run_ind-map_bias].bits & + CHUNK_MAP_FLAGS_MASK); + arena_run_trim_tail(arena, chunk, run, (npages << PAGE_SHIFT), + ((past - run_ind) << PAGE_SHIFT), false); + /* npages = past - run_ind; */ + } +#ifdef JEMALLOC_DEBUG + run->magic = 0; +#endif + arena_run_dalloc(arena, run, true); + malloc_mutex_unlock(&arena->lock); + /****************************/ + malloc_mutex_lock(&bin->lock); +#ifdef JEMALLOC_STATS + bin->stats.curruns--; +#endif +} + +static void +arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, + arena_bin_t *bin) +{ + + /* + * Make sure that bin->runcur always refers to the lowest non-full run, + * if one exists. + */ + if (bin->runcur == NULL) + bin->runcur = run; + else if ((uintptr_t)run < (uintptr_t)bin->runcur) { + /* Switch runcur. */ + if (bin->runcur->nfree > 0) { + arena_chunk_t *runcur_chunk = + CHUNK_ADDR2BASE(bin->runcur); + size_t runcur_pageind = (((uintptr_t)bin->runcur - + (uintptr_t)runcur_chunk)) >> PAGE_SHIFT; + arena_chunk_map_t *runcur_mapelm = + &runcur_chunk->map[runcur_pageind-map_bias]; + + /* Insert runcur. */ + arena_run_tree_insert(&bin->runs, runcur_mapelm); + } + bin->runcur = run; + } else { + size_t run_pageind = (((uintptr_t)run - + (uintptr_t)chunk)) >> PAGE_SHIFT; + arena_chunk_map_t *run_mapelm = + &chunk->map[run_pageind-map_bias]; + + assert(arena_run_tree_search(&bin->runs, run_mapelm) == NULL); + arena_run_tree_insert(&bin->runs, run_mapelm); + } +} + +void +arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, + arena_chunk_map_t *mapelm) +{ + size_t pageind; + arena_run_t *run; + arena_bin_t *bin; +#if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS)) + size_t size; +#endif + + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT; + run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - + (mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT)); + dassert(run->magic == ARENA_RUN_MAGIC); + bin = run->bin; + size_t binind = arena_bin_index(arena, bin); + arena_bin_info_t *bin_info = &arena_bin_info[binind]; +#if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS)) + size = bin_info->reg_size; +#endif + +#ifdef JEMALLOC_FILL + if (opt_junk) + memset(ptr, 0x5a, size); +#endif + + arena_run_reg_dalloc(run, ptr); + if (run->nfree == bin_info->nregs) { + arena_dissociate_bin_run(chunk, run, bin); + arena_dalloc_bin_run(arena, chunk, run, bin); + } else if (run->nfree == 1 && run != bin->runcur) + arena_bin_lower_run(arena, chunk, run, bin); + +#ifdef JEMALLOC_STATS + bin->stats.allocated -= size; + bin->stats.ndalloc++; +#endif +} + +#ifdef JEMALLOC_STATS +void +arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty, + arena_stats_t *astats, malloc_bin_stats_t *bstats, + malloc_large_stats_t *lstats) +{ + unsigned i; + + malloc_mutex_lock(&arena->lock); + *nactive += arena->nactive; + *ndirty += arena->ndirty; + + astats->mapped += arena->stats.mapped; + astats->npurge += arena->stats.npurge; + astats->nmadvise += arena->stats.nmadvise; + astats->purged += arena->stats.purged; + astats->allocated_large += arena->stats.allocated_large; + astats->nmalloc_large += arena->stats.nmalloc_large; + astats->ndalloc_large += arena->stats.ndalloc_large; + astats->nrequests_large += arena->stats.nrequests_large; + + for (i = 0; i < nlclasses; i++) { + lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; + lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; + lstats[i].nrequests += arena->stats.lstats[i].nrequests; + lstats[i].highruns += arena->stats.lstats[i].highruns; + lstats[i].curruns += arena->stats.lstats[i].curruns; + } + malloc_mutex_unlock(&arena->lock); + + for (i = 0; i < nbins; i++) { + arena_bin_t *bin = &arena->bins[i]; + + malloc_mutex_lock(&bin->lock); + bstats[i].allocated += bin->stats.allocated; + bstats[i].nmalloc += bin->stats.nmalloc; + bstats[i].ndalloc += bin->stats.ndalloc; + bstats[i].nrequests += bin->stats.nrequests; +#ifdef JEMALLOC_TCACHE + bstats[i].nfills += bin->stats.nfills; + bstats[i].nflushes += bin->stats.nflushes; +#endif + bstats[i].nruns += bin->stats.nruns; + bstats[i].reruns += bin->stats.reruns; + bstats[i].highruns += bin->stats.highruns; + bstats[i].curruns += bin->stats.curruns; + malloc_mutex_unlock(&bin->lock); + } +} +#endif + +void +arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) +{ + + /* Large allocation. */ +#ifdef JEMALLOC_FILL +# ifndef JEMALLOC_STATS + if (opt_junk) +# endif +#endif + { +#if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS)) + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> + PAGE_SHIFT; + size_t size = chunk->map[pageind-map_bias].bits & ~PAGE_MASK; +#endif + +#ifdef JEMALLOC_FILL +# ifdef JEMALLOC_STATS + if (opt_junk) +# endif + memset(ptr, 0x5a, size); +#endif +#ifdef JEMALLOC_STATS + arena->stats.ndalloc_large++; + arena->stats.allocated_large -= size; + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].ndalloc++; + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns--; +#endif + } + + arena_run_dalloc(arena, (arena_run_t *)ptr, true); +} + +static void +arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, + size_t oldsize, size_t size) +{ + + assert(size < oldsize); + + /* + * Shrink the run, and make trailing pages available for other + * allocations. + */ + malloc_mutex_lock(&arena->lock); + arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size, + true); +#ifdef JEMALLOC_STATS + arena->stats.ndalloc_large++; + arena->stats.allocated_large -= oldsize; + arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].ndalloc++; + arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].curruns--; + + arena->stats.nmalloc_large++; + arena->stats.nrequests_large++; + arena->stats.allocated_large += size; + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++; + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++; + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++; + if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns > + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) { + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns = + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns; + } +#endif + malloc_mutex_unlock(&arena->lock); +} + +static bool +arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, + size_t oldsize, size_t size, size_t extra, bool zero) +{ + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT; + size_t npages = oldsize >> PAGE_SHIFT; + size_t followsize; + + assert(oldsize == (chunk->map[pageind-map_bias].bits & ~PAGE_MASK)); + + /* Try to extend the run. */ + assert(size + extra > oldsize); + malloc_mutex_lock(&arena->lock); + if (pageind + npages < chunk_npages && + (chunk->map[pageind+npages-map_bias].bits + & CHUNK_MAP_ALLOCATED) == 0 && (followsize = + chunk->map[pageind+npages-map_bias].bits & ~PAGE_MASK) >= size - + oldsize) { + /* + * The next run is available and sufficiently large. Split the + * following run, then merge the first part with the existing + * allocation. + */ + size_t flag_dirty; + size_t splitsize = (oldsize + followsize <= size + extra) + ? followsize : size + extra - oldsize; + arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk + + ((pageind+npages) << PAGE_SHIFT)), splitsize, true, zero); + + size = oldsize + splitsize; + npages = size >> PAGE_SHIFT; + + /* + * Mark the extended run as dirty if either portion of the run + * was dirty before allocation. This is rather pedantic, + * because there's not actually any sequence of events that + * could cause the resulting run to be passed to + * arena_run_dalloc() with the dirty argument set to false + * (which is when dirty flag consistency would really matter). + */ + flag_dirty = (chunk->map[pageind-map_bias].bits & + CHUNK_MAP_DIRTY) | + (chunk->map[pageind+npages-1-map_bias].bits & + CHUNK_MAP_DIRTY); + chunk->map[pageind-map_bias].bits = size | flag_dirty + | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + chunk->map[pageind+npages-1-map_bias].bits = flag_dirty | + CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + +#ifdef JEMALLOC_STATS + arena->stats.ndalloc_large++; + arena->stats.allocated_large -= oldsize; + arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].ndalloc++; + arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].curruns--; + + arena->stats.nmalloc_large++; + arena->stats.nrequests_large++; + arena->stats.allocated_large += size; + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++; + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++; + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++; + if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns > + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) { + arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns = + arena->stats.lstats[(size >> PAGE_SHIFT) - + 1].curruns; + } +#endif + malloc_mutex_unlock(&arena->lock); + return (false); + } + malloc_mutex_unlock(&arena->lock); + + return (true); +} + +/* + * Try to resize a large allocation, in order to avoid copying. This will + * always fail if growing an object, and the following run is already in use. + */ +static bool +arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, + bool zero) +{ + size_t psize; + + psize = PAGE_CEILING(size + extra); + if (psize == oldsize) { + /* Same size class. */ +#ifdef JEMALLOC_FILL + if (opt_junk && size < oldsize) { + memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - + size); + } +#endif + return (false); + } else { + arena_chunk_t *chunk; + arena_t *arena; + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + arena = chunk->arena; + dassert(arena->magic == ARENA_MAGIC); + + if (psize < oldsize) { +#ifdef JEMALLOC_FILL + /* Fill before shrinking in order avoid a race. */ + if (opt_junk) { + memset((void *)((uintptr_t)ptr + size), 0x5a, + oldsize - size); + } +#endif + arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, + psize); + return (false); + } else { + bool ret = arena_ralloc_large_grow(arena, chunk, ptr, + oldsize, PAGE_CEILING(size), + psize - PAGE_CEILING(size), zero); +#ifdef JEMALLOC_FILL + if (ret == false && zero == false && opt_zero) { + memset((void *)((uintptr_t)ptr + oldsize), 0, + size - oldsize); + } +#endif + return (ret); + } + } +} + +void * +arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, + bool zero) +{ + + /* + * Avoid moving the allocation if the size class can be left the same. + */ + if (oldsize <= arena_maxclass) { + if (oldsize <= small_maxclass) { + assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size + == oldsize); + if ((size + extra <= small_maxclass && + SMALL_SIZE2BIN(size + extra) == + SMALL_SIZE2BIN(oldsize)) || (size <= oldsize && + size + extra >= oldsize)) { +#ifdef JEMALLOC_FILL + if (opt_junk && size < oldsize) { + memset((void *)((uintptr_t)ptr + size), + 0x5a, oldsize - size); + } +#endif + return (ptr); + } + } else { + assert(size <= arena_maxclass); + if (size + extra > small_maxclass) { + if (arena_ralloc_large(ptr, oldsize, size, + extra, zero) == false) + return (ptr); + } + } + } + + /* Reallocation would require a move. */ + return (NULL); +} + +void * +arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, + size_t alignment, bool zero) +{ + void *ret; + size_t copysize; + + /* Try to avoid moving the allocation. */ + ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero); + if (ret != NULL) + return (ret); + + /* + * size and oldsize are different enough that we need to move the + * object. In that case, fall back to allocating new space and + * copying. + */ + if (alignment != 0) { + size_t usize = sa2u(size + extra, alignment, NULL); + if (usize == 0) + return (NULL); + ret = ipalloc(usize, alignment, zero); + } else + ret = arena_malloc(size + extra, zero); + + if (ret == NULL) { + if (extra == 0) + return (NULL); + /* Try again, this time without extra. */ + if (alignment != 0) { + size_t usize = sa2u(size, alignment, NULL); + if (usize == 0) + return (NULL); + ret = ipalloc(usize, alignment, zero); + } else + ret = arena_malloc(size, zero); + + if (ret == NULL) + return (NULL); + } + + /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */ + + /* + * Copy at most size bytes (not size+extra), since the caller has no + * expectation that the extra bytes will be reliably preserved. + */ + copysize = (size < oldsize) ? size : oldsize; + memcpy(ret, ptr, copysize); + idalloc(ptr); + return (ret); +} + +bool +arena_new(arena_t *arena, unsigned ind) +{ + unsigned i; + arena_bin_t *bin; + + arena->ind = ind; + arena->nthreads = 0; + + if (malloc_mutex_init(&arena->lock)) + return (true); + +#ifdef JEMALLOC_STATS + memset(&arena->stats, 0, sizeof(arena_stats_t)); + arena->stats.lstats = (malloc_large_stats_t *)base_alloc(nlclasses * + sizeof(malloc_large_stats_t)); + if (arena->stats.lstats == NULL) + return (true); + memset(arena->stats.lstats, 0, nlclasses * + sizeof(malloc_large_stats_t)); +# ifdef JEMALLOC_TCACHE + ql_new(&arena->tcache_ql); +# endif +#endif + +#ifdef JEMALLOC_PROF + arena->prof_accumbytes = 0; +#endif + + /* Initialize chunks. */ + ql_new(&arena->chunks_dirty); + arena->spare = NULL; + + arena->nactive = 0; + arena->ndirty = 0; + arena->npurgatory = 0; + + arena_avail_tree_new(&arena->runs_avail_clean); + arena_avail_tree_new(&arena->runs_avail_dirty); + + /* Initialize bins. */ + i = 0; +#ifdef JEMALLOC_TINY + /* (2^n)-spaced tiny bins. */ + for (; i < ntbins; i++) { + bin = &arena->bins[i]; + if (malloc_mutex_init(&bin->lock)) + return (true); + bin->runcur = NULL; + arena_run_tree_new(&bin->runs); +#ifdef JEMALLOC_STATS + memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); +#endif + } +#endif + + /* Quantum-spaced bins. */ + for (; i < ntbins + nqbins; i++) { + bin = &arena->bins[i]; + if (malloc_mutex_init(&bin->lock)) + return (true); + bin->runcur = NULL; + arena_run_tree_new(&bin->runs); +#ifdef JEMALLOC_STATS + memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); +#endif + } + + /* Cacheline-spaced bins. */ + for (; i < ntbins + nqbins + ncbins; i++) { + bin = &arena->bins[i]; + if (malloc_mutex_init(&bin->lock)) + return (true); + bin->runcur = NULL; + arena_run_tree_new(&bin->runs); +#ifdef JEMALLOC_STATS + memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); +#endif + } + + /* Subpage-spaced bins. */ + for (; i < nbins; i++) { + bin = &arena->bins[i]; + if (malloc_mutex_init(&bin->lock)) + return (true); + bin->runcur = NULL; + arena_run_tree_new(&bin->runs); +#ifdef JEMALLOC_STATS + memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); +#endif + } + +#ifdef JEMALLOC_DEBUG + arena->magic = ARENA_MAGIC; +#endif + + return (false); +} + +#ifdef JEMALLOC_DEBUG +static void +small_size2bin_validate(void) +{ + size_t i, size, binind; + + i = 1; +# ifdef JEMALLOC_TINY + /* Tiny. */ + for (; i < (1U << LG_TINY_MIN); i++) { + size = pow2_ceil(1U << LG_TINY_MIN); + binind = ffs((int)(size >> (LG_TINY_MIN + 1))); + assert(SMALL_SIZE2BIN(i) == binind); + } + for (; i < qspace_min; i++) { + size = pow2_ceil(i); + binind = ffs((int)(size >> (LG_TINY_MIN + 1))); + assert(SMALL_SIZE2BIN(i) == binind); + } +# endif + /* Quantum-spaced. */ + for (; i <= qspace_max; i++) { + size = QUANTUM_CEILING(i); + binind = ntbins + (size >> LG_QUANTUM) - 1; + assert(SMALL_SIZE2BIN(i) == binind); + } + /* Cacheline-spaced. */ + for (; i <= cspace_max; i++) { + size = CACHELINE_CEILING(i); + binind = ntbins + nqbins + ((size - cspace_min) >> + LG_CACHELINE); + assert(SMALL_SIZE2BIN(i) == binind); + } + /* Sub-page. */ + for (; i <= sspace_max; i++) { + size = SUBPAGE_CEILING(i); + binind = ntbins + nqbins + ncbins + ((size - sspace_min) + >> LG_SUBPAGE); + assert(SMALL_SIZE2BIN(i) == binind); + } +} +#endif + +static bool +small_size2bin_init(void) +{ + + if (opt_lg_qspace_max != LG_QSPACE_MAX_DEFAULT + || opt_lg_cspace_max != LG_CSPACE_MAX_DEFAULT + || (sizeof(const_small_size2bin) != ((small_maxclass-1) >> + LG_TINY_MIN) + 1)) + return (small_size2bin_init_hard()); + + small_size2bin = const_small_size2bin; +#ifdef JEMALLOC_DEBUG + small_size2bin_validate(); +#endif + return (false); +} + +static bool +small_size2bin_init_hard(void) +{ + size_t i, size, binind; + uint8_t *custom_small_size2bin; +#define CUSTOM_SMALL_SIZE2BIN(s) \ + custom_small_size2bin[(s-1) >> LG_TINY_MIN] + + assert(opt_lg_qspace_max != LG_QSPACE_MAX_DEFAULT + || opt_lg_cspace_max != LG_CSPACE_MAX_DEFAULT + || (sizeof(const_small_size2bin) != ((small_maxclass-1) >> + LG_TINY_MIN) + 1)); + + custom_small_size2bin = (uint8_t *) + base_alloc(small_maxclass >> LG_TINY_MIN); + if (custom_small_size2bin == NULL) + return (true); + + i = 1; +#ifdef JEMALLOC_TINY + /* Tiny. */ + for (; i < (1U << LG_TINY_MIN); i += TINY_MIN) { + size = pow2_ceil(1U << LG_TINY_MIN); + binind = ffs((int)(size >> (LG_TINY_MIN + 1))); + CUSTOM_SMALL_SIZE2BIN(i) = binind; + } + for (; i < qspace_min; i += TINY_MIN) { + size = pow2_ceil(i); + binind = ffs((int)(size >> (LG_TINY_MIN + 1))); + CUSTOM_SMALL_SIZE2BIN(i) = binind; + } +#endif + /* Quantum-spaced. */ + for (; i <= qspace_max; i += TINY_MIN) { + size = QUANTUM_CEILING(i); + binind = ntbins + (size >> LG_QUANTUM) - 1; + CUSTOM_SMALL_SIZE2BIN(i) = binind; + } + /* Cacheline-spaced. */ + for (; i <= cspace_max; i += TINY_MIN) { + size = CACHELINE_CEILING(i); + binind = ntbins + nqbins + ((size - cspace_min) >> + LG_CACHELINE); + CUSTOM_SMALL_SIZE2BIN(i) = binind; + } + /* Sub-page. */ + for (; i <= sspace_max; i += TINY_MIN) { + size = SUBPAGE_CEILING(i); + binind = ntbins + nqbins + ncbins + ((size - sspace_min) >> + LG_SUBPAGE); + CUSTOM_SMALL_SIZE2BIN(i) = binind; + } + + small_size2bin = custom_small_size2bin; +#ifdef JEMALLOC_DEBUG + small_size2bin_validate(); +#endif + return (false); +#undef CUSTOM_SMALL_SIZE2BIN +} + +/* + * Calculate bin_info->run_size such that it meets the following constraints: + * + * *) bin_info->run_size >= min_run_size + * *) bin_info->run_size <= arena_maxclass + * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed). + * *) bin_info->nregs <= RUN_MAXREGS + * + * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also + * calculated here, since these settings are all interdependent. + */ +static size_t +bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size) +{ + size_t try_run_size, good_run_size; + uint32_t try_nregs, good_nregs; + uint32_t try_hdr_size, good_hdr_size; + uint32_t try_bitmap_offset, good_bitmap_offset; +#ifdef JEMALLOC_PROF + uint32_t try_ctx0_offset, good_ctx0_offset; +#endif + uint32_t try_reg0_offset, good_reg0_offset; + + assert(min_run_size >= PAGE_SIZE); + assert(min_run_size <= arena_maxclass); + + /* + * Calculate known-valid settings before entering the run_size + * expansion loop, so that the first part of the loop always copies + * valid settings. + * + * The do..while loop iteratively reduces the number of regions until + * the run header and the regions no longer overlap. A closed formula + * would be quite messy, since there is an interdependency between the + * header's mask length and the number of regions. + */ + try_run_size = min_run_size; + try_nregs = ((try_run_size - sizeof(arena_run_t)) / bin_info->reg_size) + + 1; /* Counter-act try_nregs-- in loop. */ + if (try_nregs > RUN_MAXREGS) { + try_nregs = RUN_MAXREGS + + 1; /* Counter-act try_nregs-- in loop. */ + } + do { + try_nregs--; + try_hdr_size = sizeof(arena_run_t); + /* Pad to a long boundary. */ + try_hdr_size = LONG_CEILING(try_hdr_size); + try_bitmap_offset = try_hdr_size; + /* Add space for bitmap. */ + try_hdr_size += bitmap_size(try_nregs); +#ifdef JEMALLOC_PROF + if (opt_prof && prof_promote == false) { + /* Pad to a quantum boundary. */ + try_hdr_size = QUANTUM_CEILING(try_hdr_size); + try_ctx0_offset = try_hdr_size; + /* Add space for one (prof_ctx_t *) per region. */ + try_hdr_size += try_nregs * sizeof(prof_ctx_t *); + } else + try_ctx0_offset = 0; +#endif + try_reg0_offset = try_run_size - (try_nregs * + bin_info->reg_size); + } while (try_hdr_size > try_reg0_offset); + + /* run_size expansion loop. */ + do { + /* + * Copy valid settings before trying more aggressive settings. + */ + good_run_size = try_run_size; + good_nregs = try_nregs; + good_hdr_size = try_hdr_size; + good_bitmap_offset = try_bitmap_offset; +#ifdef JEMALLOC_PROF + good_ctx0_offset = try_ctx0_offset; +#endif + good_reg0_offset = try_reg0_offset; + + /* Try more aggressive settings. */ + try_run_size += PAGE_SIZE; + try_nregs = ((try_run_size - sizeof(arena_run_t)) / + bin_info->reg_size) + + 1; /* Counter-act try_nregs-- in loop. */ + if (try_nregs > RUN_MAXREGS) { + try_nregs = RUN_MAXREGS + + 1; /* Counter-act try_nregs-- in loop. */ + } + do { + try_nregs--; + try_hdr_size = sizeof(arena_run_t); + /* Pad to a long boundary. */ + try_hdr_size = LONG_CEILING(try_hdr_size); + try_bitmap_offset = try_hdr_size; + /* Add space for bitmap. */ + try_hdr_size += bitmap_size(try_nregs); +#ifdef JEMALLOC_PROF + if (opt_prof && prof_promote == false) { + /* Pad to a quantum boundary. */ + try_hdr_size = QUANTUM_CEILING(try_hdr_size); + try_ctx0_offset = try_hdr_size; + /* + * Add space for one (prof_ctx_t *) per region. + */ + try_hdr_size += try_nregs * + sizeof(prof_ctx_t *); + } +#endif + try_reg0_offset = try_run_size - (try_nregs * + bin_info->reg_size); + } while (try_hdr_size > try_reg0_offset); + } while (try_run_size <= arena_maxclass + && try_run_size <= arena_maxclass + && RUN_MAX_OVRHD * (bin_info->reg_size << 3) > RUN_MAX_OVRHD_RELAX + && (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size + && try_nregs < RUN_MAXREGS); + + assert(good_hdr_size <= good_reg0_offset); + + /* Copy final settings. */ + bin_info->run_size = good_run_size; + bin_info->nregs = good_nregs; + bin_info->bitmap_offset = good_bitmap_offset; +#ifdef JEMALLOC_PROF + bin_info->ctx0_offset = good_ctx0_offset; +#endif + bin_info->reg0_offset = good_reg0_offset; + + return (good_run_size); +} + +static bool +bin_info_init(void) +{ + arena_bin_info_t *bin_info; + unsigned i; + size_t prev_run_size; + + arena_bin_info = base_alloc(sizeof(arena_bin_info_t) * nbins); + if (arena_bin_info == NULL) + return (true); + + prev_run_size = PAGE_SIZE; + i = 0; +#ifdef JEMALLOC_TINY + /* (2^n)-spaced tiny bins. */ + for (; i < ntbins; i++) { + bin_info = &arena_bin_info[i]; + bin_info->reg_size = (1U << (LG_TINY_MIN + i)); + prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size); + bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); + } +#endif + + /* Quantum-spaced bins. */ + for (; i < ntbins + nqbins; i++) { + bin_info = &arena_bin_info[i]; + bin_info->reg_size = (i - ntbins + 1) << LG_QUANTUM; + prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size); + bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); + } + + /* Cacheline-spaced bins. */ + for (; i < ntbins + nqbins + ncbins; i++) { + bin_info = &arena_bin_info[i]; + bin_info->reg_size = cspace_min + ((i - (ntbins + nqbins)) << + LG_CACHELINE); + prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size); + bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); + } + + /* Subpage-spaced bins. */ + for (; i < nbins; i++) { + bin_info = &arena_bin_info[i]; + bin_info->reg_size = sspace_min + ((i - (ntbins + nqbins + + ncbins)) << LG_SUBPAGE); + prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size); + bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); + } + + return (false); +} + +bool +arena_boot(void) +{ + size_t header_size; + unsigned i; + + /* Set variables according to the value of opt_lg_[qc]space_max. */ + qspace_max = (1U << opt_lg_qspace_max); + cspace_min = CACHELINE_CEILING(qspace_max); + if (cspace_min == qspace_max) + cspace_min += CACHELINE; + cspace_max = (1U << opt_lg_cspace_max); + sspace_min = SUBPAGE_CEILING(cspace_max); + if (sspace_min == cspace_max) + sspace_min += SUBPAGE; + assert(sspace_min < PAGE_SIZE); + sspace_max = PAGE_SIZE - SUBPAGE; + +#ifdef JEMALLOC_TINY + assert(LG_QUANTUM >= LG_TINY_MIN); +#endif + assert(ntbins <= LG_QUANTUM); + nqbins = qspace_max >> LG_QUANTUM; + ncbins = ((cspace_max - cspace_min) >> LG_CACHELINE) + 1; + nsbins = ((sspace_max - sspace_min) >> LG_SUBPAGE) + 1; + nbins = ntbins + nqbins + ncbins + nsbins; + + /* + * The small_size2bin lookup table uses uint8_t to encode each bin + * index, so we cannot support more than 256 small size classes. This + * limit is difficult to exceed (not even possible with 16B quantum and + * 4KiB pages), and such configurations are impractical, but + * nonetheless we need to protect against this case in order to avoid + * undefined behavior. + * + * Further constrain nbins to 255 if prof_promote is true, since all + * small size classes, plus a "not small" size class must be stored in + * 8 bits of arena_chunk_map_t's bits field. + */ +#ifdef JEMALLOC_PROF + if (opt_prof && prof_promote) { + if (nbins > 255) { + char line_buf[UMAX2S_BUFSIZE]; + malloc_write("<jemalloc>: Too many small size classes ("); + malloc_write(u2s(nbins, 10, line_buf)); + malloc_write(" > max 255)\n"); + abort(); + } + } else +#endif + if (nbins > 256) { + char line_buf[UMAX2S_BUFSIZE]; + malloc_write("<jemalloc>: Too many small size classes ("); + malloc_write(u2s(nbins, 10, line_buf)); + malloc_write(" > max 256)\n"); + abort(); + } + + /* + * Compute the header size such that it is large enough to contain the + * page map. The page map is biased to omit entries for the header + * itself, so some iteration is necessary to compute the map bias. + * + * 1) Compute safe header_size and map_bias values that include enough + * space for an unbiased page map. + * 2) Refine map_bias based on (1) to omit the header pages in the page + * map. The resulting map_bias may be one too small. + * 3) Refine map_bias based on (2). The result will be >= the result + * from (2), and will always be correct. + */ + map_bias = 0; + for (i = 0; i < 3; i++) { + header_size = offsetof(arena_chunk_t, map) + + (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias)); + map_bias = (header_size >> PAGE_SHIFT) + ((header_size & + PAGE_MASK) != 0); + } + assert(map_bias > 0); + + arena_maxclass = chunksize - (map_bias << PAGE_SHIFT); + + if (small_size2bin_init()) + return (true); + + if (bin_info_init()) + return (true); + + return (false); +} diff --git a/src/atomic.c b/src/atomic.c new file mode 100644 index 0000000..77ee313 --- /dev/null +++ b/src/atomic.c @@ -0,0 +1,2 @@ +#define JEMALLOC_ATOMIC_C_ +#include "jemalloc/internal/jemalloc_internal.h" diff --git a/src/base.c b/src/base.c new file mode 100644 index 0000000..cc85e84 --- /dev/null +++ b/src/base.c @@ -0,0 +1,106 @@ +#define JEMALLOC_BASE_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +malloc_mutex_t base_mtx; + +/* + * Current pages that are being used for internal memory allocations. These + * pages are carved up in cacheline-size quanta, so that there is no chance of + * false cache line sharing. + */ +static void *base_pages; +static void *base_next_addr; +static void *base_past_addr; /* Addr immediately past base_pages. */ +static extent_node_t *base_nodes; + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static bool base_pages_alloc(size_t minsize); + +/******************************************************************************/ + +static bool +base_pages_alloc(size_t minsize) +{ + size_t csize; + bool zero; + + assert(minsize != 0); + csize = CHUNK_CEILING(minsize); + zero = false; + base_pages = chunk_alloc(csize, true, &zero); + if (base_pages == NULL) + return (true); + base_next_addr = base_pages; + base_past_addr = (void *)((uintptr_t)base_pages + csize); + + return (false); +} + +void * +base_alloc(size_t size) +{ + void *ret; + size_t csize; + + /* Round size up to nearest multiple of the cacheline size. */ + csize = CACHELINE_CEILING(size); + + malloc_mutex_lock(&base_mtx); + /* Make sure there's enough space for the allocation. */ + if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) { + if (base_pages_alloc(csize)) { + malloc_mutex_unlock(&base_mtx); + return (NULL); + } + } + /* Allocate. */ + ret = base_next_addr; + base_next_addr = (void *)((uintptr_t)base_next_addr + csize); + malloc_mutex_unlock(&base_mtx); + + return (ret); +} + +extent_node_t * +base_node_alloc(void) +{ + extent_node_t *ret; + + malloc_mutex_lock(&base_mtx); + if (base_nodes != NULL) { + ret = base_nodes; + base_nodes = *(extent_node_t **)ret; + malloc_mutex_unlock(&base_mtx); + } else { + malloc_mutex_unlock(&base_mtx); + ret = (extent_node_t *)base_alloc(sizeof(extent_node_t)); + } + + return (ret); +} + +void +base_node_dealloc(extent_node_t *node) +{ + + malloc_mutex_lock(&base_mtx); + *(extent_node_t **)node = base_nodes; + base_nodes = node; + malloc_mutex_unlock(&base_mtx); +} + +bool +base_boot(void) +{ + + base_nodes = NULL; + if (malloc_mutex_init(&base_mtx)) + return (true); + + return (false); +} diff --git a/src/bitmap.c b/src/bitmap.c new file mode 100644 index 0000000..b47e262 --- /dev/null +++ b/src/bitmap.c @@ -0,0 +1,90 @@ +#define JEMALLOC_BITMAP_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static size_t bits2groups(size_t nbits); + +/******************************************************************************/ + +static size_t +bits2groups(size_t nbits) +{ + + return ((nbits >> LG_BITMAP_GROUP_NBITS) + + !!(nbits & BITMAP_GROUP_NBITS_MASK)); +} + +void +bitmap_info_init(bitmap_info_t *binfo, size_t nbits) +{ + unsigned i; + size_t group_count; + + assert(nbits > 0); + assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); + + /* + * Compute the number of groups necessary to store nbits bits, and + * progressively work upward through the levels until reaching a level + * that requires only one group. + */ + binfo->levels[0].group_offset = 0; + group_count = bits2groups(nbits); + for (i = 1; group_count > 1; i++) { + assert(i < BITMAP_MAX_LEVELS); + binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + + group_count; + group_count = bits2groups(group_count); + } + binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + + group_count; + binfo->nlevels = i; + binfo->nbits = nbits; +} + +size_t +bitmap_info_ngroups(const bitmap_info_t *binfo) +{ + + return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP); +} + +size_t +bitmap_size(size_t nbits) +{ + bitmap_info_t binfo; + + bitmap_info_init(&binfo, nbits); + return (bitmap_info_ngroups(&binfo)); +} + +void +bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) +{ + size_t extra; + unsigned i; + + /* + * Bits are actually inverted with regard to the external bitmap + * interface, so the bitmap starts out with all 1 bits, except for + * trailing unused bits (if any). Note that each group uses bit 0 to + * correspond to the first logical bit in the group, so extra bits + * are the most significant bits of the last group. + */ + memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset << + LG_SIZEOF_BITMAP); + extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) + & BITMAP_GROUP_NBITS_MASK; + if (extra != 0) + bitmap[binfo->levels[1].group_offset - 1] >>= extra; + for (i = 1; i < binfo->nlevels; i++) { + size_t group_count = binfo->levels[i].group_offset - + binfo->levels[i-1].group_offset; + extra = (BITMAP_GROUP_NBITS - (group_count & + BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; + if (extra != 0) + bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; + } +} diff --git a/src/chunk.c b/src/chunk.c new file mode 100644 index 0000000..301519e --- /dev/null +++ b/src/chunk.c @@ -0,0 +1,171 @@ +#define JEMALLOC_CHUNK_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +size_t opt_lg_chunk = LG_CHUNK_DEFAULT; +#ifdef JEMALLOC_SWAP +bool opt_overcommit = true; +#endif + +#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF)) +malloc_mutex_t chunks_mtx; +chunk_stats_t stats_chunks; +#endif + +#ifdef JEMALLOC_IVSALLOC +rtree_t *chunks_rtree; +#endif + +/* Various chunk-related settings. */ +size_t chunksize; +size_t chunksize_mask; /* (chunksize - 1). */ +size_t chunk_npages; +size_t map_bias; +size_t arena_maxclass; /* Max size class for arenas. */ + +/******************************************************************************/ + +/* + * If the caller specifies (*zero == false), it is still possible to receive + * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc() + * takes advantage of this to avoid demanding zeroed chunks, but taking + * advantage of them if they are returned. + */ +void * +chunk_alloc(size_t size, bool base, bool *zero) +{ + void *ret; + + assert(size != 0); + assert((size & chunksize_mask) == 0); + +#ifdef JEMALLOC_SWAP + if (swap_enabled) { + ret = chunk_alloc_swap(size, zero); + if (ret != NULL) + goto RETURN; + } + + if (swap_enabled == false || opt_overcommit) { +#endif +#ifdef JEMALLOC_DSS + ret = chunk_alloc_dss(size, zero); + if (ret != NULL) + goto RETURN; +#endif + ret = chunk_alloc_mmap(size); + if (ret != NULL) { + *zero = true; + goto RETURN; + } +#ifdef JEMALLOC_SWAP + } +#endif + + /* All strategies for allocation failed. */ + ret = NULL; +RETURN: +#ifdef JEMALLOC_IVSALLOC + if (base == false && ret != NULL) { + if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) { + chunk_dealloc(ret, size); + return (NULL); + } + } +#endif +#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF)) + if (ret != NULL) { +# ifdef JEMALLOC_PROF + bool gdump; +# endif + malloc_mutex_lock(&chunks_mtx); +# ifdef JEMALLOC_STATS + stats_chunks.nchunks += (size / chunksize); +# endif + stats_chunks.curchunks += (size / chunksize); + if (stats_chunks.curchunks > stats_chunks.highchunks) { + stats_chunks.highchunks = stats_chunks.curchunks; +# ifdef JEMALLOC_PROF + gdump = true; +# endif + } +# ifdef JEMALLOC_PROF + else + gdump = false; +# endif + malloc_mutex_unlock(&chunks_mtx); +# ifdef JEMALLOC_PROF + if (opt_prof && opt_prof_gdump && gdump) + prof_gdump(); +# endif + } +#endif + + assert(CHUNK_ADDR2BASE(ret) == ret); + return (ret); +} + +void +chunk_dealloc(void *chunk, size_t size) +{ + + assert(chunk != NULL); + assert(CHUNK_ADDR2BASE(chunk) == chunk); + assert(size != 0); + assert((size & chunksize_mask) == 0); + +#ifdef JEMALLOC_IVSALLOC + rtree_set(chunks_rtree, (uintptr_t)chunk, NULL); +#endif +#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF)) + malloc_mutex_lock(&chunks_mtx); + stats_chunks.curchunks -= (size / chunksize); + malloc_mutex_unlock(&chunks_mtx); +#endif + +#ifdef JEMALLOC_SWAP + if (swap_enabled && chunk_dealloc_swap(chunk, size) == false) + return; +#endif +#ifdef JEMALLOC_DSS + if (chunk_dealloc_dss(chunk, size) == false) + return; +#endif + chunk_dealloc_mmap(chunk, size); +} + +bool +chunk_boot(void) +{ + + /* Set variables according to the value of opt_lg_chunk. */ + chunksize = (ZU(1) << opt_lg_chunk); + assert(chunksize >= PAGE_SIZE); + chunksize_mask = chunksize - 1; + chunk_npages = (chunksize >> PAGE_SHIFT); + +#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF)) + if (malloc_mutex_init(&chunks_mtx)) + return (true); + memset(&stats_chunks, 0, sizeof(chunk_stats_t)); +#endif +#ifdef JEMALLOC_SWAP + if (chunk_swap_boot()) + return (true); +#endif + if (chunk_mmap_boot()) + return (true); +#ifdef JEMALLOC_DSS + if (chunk_dss_boot()) + return (true); +#endif +#ifdef JEMALLOC_IVSALLOC + chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - opt_lg_chunk); + if (chunks_rtree == NULL) + return (true); +#endif + + return (false); +} diff --git a/src/chunk_dss.c b/src/chunk_dss.c new file mode 100644 index 0000000..5c0e290 --- /dev/null +++ b/src/chunk_dss.c @@ -0,0 +1,284 @@ +#define JEMALLOC_CHUNK_DSS_C_ +#include "jemalloc/internal/jemalloc_internal.h" +#ifdef JEMALLOC_DSS +/******************************************************************************/ +/* Data. */ + +malloc_mutex_t dss_mtx; + +/* Base address of the DSS. */ +static void *dss_base; +/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */ +static void *dss_prev; +/* Current upper limit on DSS addresses. */ +static void *dss_max; + +/* + * Trees of chunks that were previously allocated (trees differ only in node + * ordering). These are used when allocating chunks, in an attempt to re-use + * address space. Depending on function, different tree orderings are needed, + * which is why there are two trees with the same contents. + */ +static extent_tree_t dss_chunks_szad; +static extent_tree_t dss_chunks_ad; + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static void *chunk_recycle_dss(size_t size, bool *zero); +static extent_node_t *chunk_dealloc_dss_record(void *chunk, size_t size); + +/******************************************************************************/ + +static void * +chunk_recycle_dss(size_t size, bool *zero) +{ + extent_node_t *node, key; + + key.addr = NULL; + key.size = size; + malloc_mutex_lock(&dss_mtx); + node = extent_tree_szad_nsearch(&dss_chunks_szad, &key); + if (node != NULL) { + void *ret = node->addr; + + /* Remove node from the tree. */ + extent_tree_szad_remove(&dss_chunks_szad, node); + if (node->size == size) { + extent_tree_ad_remove(&dss_chunks_ad, node); + base_node_dealloc(node); + } else { + /* + * Insert the remainder of node's address range as a + * smaller chunk. Its position within dss_chunks_ad + * does not change. + */ + assert(node->size > size); + node->addr = (void *)((uintptr_t)node->addr + size); + node->size -= size; + extent_tree_szad_insert(&dss_chunks_szad, node); + } + malloc_mutex_unlock(&dss_mtx); + + if (*zero) + memset(ret, 0, size); + return (ret); + } + malloc_mutex_unlock(&dss_mtx); + + return (NULL); +} + +void * +chunk_alloc_dss(size_t size, bool *zero) +{ + void *ret; + + ret = chunk_recycle_dss(size, zero); + if (ret != NULL) + return (ret); + + /* + * sbrk() uses a signed increment argument, so take care not to + * interpret a huge allocation request as a negative increment. + */ + if ((intptr_t)size < 0) + return (NULL); + + malloc_mutex_lock(&dss_mtx); + if (dss_prev != (void *)-1) { + intptr_t incr; + + /* + * The loop is necessary to recover from races with other + * threads that are using the DSS for something other than + * malloc. + */ + do { + /* Get the current end of the DSS. */ + dss_max = sbrk(0); + + /* + * Calculate how much padding is necessary to + * chunk-align the end of the DSS. + */ + incr = (intptr_t)size + - (intptr_t)CHUNK_ADDR2OFFSET(dss_max); + if (incr == (intptr_t)size) + ret = dss_max; + else { + ret = (void *)((intptr_t)dss_max + incr); + incr += size; + } + + dss_prev = sbrk(incr); + if (dss_prev == dss_max) { + /* Success. */ + dss_max = (void *)((intptr_t)dss_prev + incr); + malloc_mutex_unlock(&dss_mtx); + *zero = true; + return (ret); + } + } while (dss_prev != (void *)-1); + } + malloc_mutex_unlock(&dss_mtx); + + return (NULL); +} + +static extent_node_t * +chunk_dealloc_dss_record(void *chunk, size_t size) +{ + extent_node_t *xnode, *node, *prev, key; + + xnode = NULL; + while (true) { + key.addr = (void *)((uintptr_t)chunk + size); + node = extent_tree_ad_nsearch(&dss_chunks_ad, &key); + /* Try to coalesce forward. */ + if (node != NULL && node->addr == key.addr) { + /* + * Coalesce chunk with the following address range. + * This does not change the position within + * dss_chunks_ad, so only remove/insert from/into + * dss_chunks_szad. + */ + extent_tree_szad_remove(&dss_chunks_szad, node); + node->addr = chunk; + node->size += size; + extent_tree_szad_insert(&dss_chunks_szad, node); + break; + } else if (xnode == NULL) { + /* + * It is possible that base_node_alloc() will cause a + * new base chunk to be allocated, so take care not to + * deadlock on dss_mtx, and recover if another thread + * deallocates an adjacent chunk while this one is busy + * allocating xnode. + */ + malloc_mutex_unlock(&dss_mtx); + xnode = base_node_alloc(); + malloc_mutex_lock(&dss_mtx); + if (xnode == NULL) + return (NULL); + } else { + /* Coalescing forward failed, so insert a new node. */ + node = xnode; + xnode = NULL; + node->addr = chunk; + node->size = size; + extent_tree_ad_insert(&dss_chunks_ad, node); + extent_tree_szad_insert(&dss_chunks_szad, node); + break; + } + } + /* Discard xnode if it ended up unused do to a race. */ + if (xnode != NULL) + base_node_dealloc(xnode); + + /* Try to coalesce backward. */ + prev = extent_tree_ad_prev(&dss_chunks_ad, node); + if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) == + chunk) { + /* + * Coalesce chunk with the previous address range. This does + * not change the position within dss_chunks_ad, so only + * remove/insert node from/into dss_chunks_szad. + */ + extent_tree_szad_remove(&dss_chunks_szad, prev); + extent_tree_ad_remove(&dss_chunks_ad, prev); + + extent_tree_szad_remove(&dss_chunks_szad, node); + node->addr = prev->addr; + node->size += prev->size; + extent_tree_szad_insert(&dss_chunks_szad, node); + + base_node_dealloc(prev); + } + + return (node); +} + +bool +chunk_in_dss(void *chunk) +{ + bool ret; + + malloc_mutex_lock(&dss_mtx); + if ((uintptr_t)chunk >= (uintptr_t)dss_base + && (uintptr_t)chunk < (uintptr_t)dss_max) + ret = true; + else + ret = false; + malloc_mutex_unlock(&dss_mtx); + + return (ret); +} + +bool +chunk_dealloc_dss(void *chunk, size_t size) +{ + bool ret; + + malloc_mutex_lock(&dss_mtx); + if ((uintptr_t)chunk >= (uintptr_t)dss_base + && (uintptr_t)chunk < (uintptr_t)dss_max) { + extent_node_t *node; + + /* Try to coalesce with other unused chunks. */ + node = chunk_dealloc_dss_record(chunk, size); + if (node != NULL) { + chunk = node->addr; + size = node->size; + } + + /* Get the current end of the DSS. */ + dss_max = sbrk(0); + + /* + * Try to shrink the DSS if this chunk is at the end of the + * DSS. The sbrk() call here is subject to a race condition + * with threads that use brk(2) or sbrk(2) directly, but the + * alternative would be to leak memory for the sake of poorly + * designed multi-threaded programs. + */ + if ((void *)((uintptr_t)chunk + size) == dss_max + && (dss_prev = sbrk(-(intptr_t)size)) == dss_max) { + /* Success. */ + dss_max = (void *)((intptr_t)dss_prev - (intptr_t)size); + + if (node != NULL) { + extent_tree_szad_remove(&dss_chunks_szad, node); + extent_tree_ad_remove(&dss_chunks_ad, node); + base_node_dealloc(node); + } + } else + madvise(chunk, size, MADV_DONTNEED); + + ret = false; + goto RETURN; + } + + ret = true; +RETURN: + malloc_mutex_unlock(&dss_mtx); + return (ret); +} + +bool +chunk_dss_boot(void) +{ + + if (malloc_mutex_init(&dss_mtx)) + return (true); + dss_base = sbrk(0); + dss_prev = dss_base; + dss_max = dss_base; + extent_tree_szad_new(&dss_chunks_szad); + extent_tree_ad_new(&dss_chunks_ad); + + return (false); +} + +/******************************************************************************/ +#endif /* JEMALLOC_DSS */ diff --git a/src/chunk_mmap.c b/src/chunk_mmap.c new file mode 100644 index 0000000..164e86e --- /dev/null +++ b/src/chunk_mmap.c @@ -0,0 +1,239 @@ +#define JEMALLOC_CHUNK_MMAP_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +/* + * Used by chunk_alloc_mmap() to decide whether to attempt the fast path and + * potentially avoid some system calls. + */ +#ifndef NO_TLS +static __thread bool mmap_unaligned_tls + JEMALLOC_ATTR(tls_model("initial-exec")); +#define MMAP_UNALIGNED_GET() mmap_unaligned_tls +#define MMAP_UNALIGNED_SET(v) do { \ + mmap_unaligned_tls = (v); \ +} while (0) +#else +static pthread_key_t mmap_unaligned_tsd; +#define MMAP_UNALIGNED_GET() ((bool)pthread_getspecific(mmap_unaligned_tsd)) +#define MMAP_UNALIGNED_SET(v) do { \ + pthread_setspecific(mmap_unaligned_tsd, (void *)(v)); \ +} while (0) +#endif + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static void *pages_map(void *addr, size_t size, bool noreserve); +static void pages_unmap(void *addr, size_t size); +static void *chunk_alloc_mmap_slow(size_t size, bool unaligned, + bool noreserve); +static void *chunk_alloc_mmap_internal(size_t size, bool noreserve); + +/******************************************************************************/ + +static void * +pages_map(void *addr, size_t size, bool noreserve) +{ + void *ret; + + /* + * We don't use MAP_FIXED here, because it can cause the *replacement* + * of existing mappings, and we only want to create new mappings. + */ + int flags = MAP_PRIVATE | MAP_ANON; +#ifdef MAP_NORESERVE + if (noreserve) + flags |= MAP_NORESERVE; +#endif + ret = mmap(addr, size, PROT_READ | PROT_WRITE, flags, -1, 0); + assert(ret != NULL); + + if (ret == MAP_FAILED) + ret = NULL; + else if (addr != NULL && ret != addr) { + /* + * We succeeded in mapping memory, but not in the right place. + */ + if (munmap(ret, size) == -1) { + char buf[BUFERROR_BUF]; + + buferror(errno, buf, sizeof(buf)); + malloc_write("<jemalloc>: Error in munmap(): "); + malloc_write(buf); + malloc_write("\n"); + if (opt_abort) + abort(); + } + ret = NULL; + } + + assert(ret == NULL || (addr == NULL && ret != addr) + || (addr != NULL && ret == addr)); + return (ret); +} + +static void +pages_unmap(void *addr, size_t size) +{ + + if (munmap(addr, size) == -1) { + char buf[BUFERROR_BUF]; + + buferror(errno, buf, sizeof(buf)); + malloc_write("<jemalloc>: Error in munmap(): "); + malloc_write(buf); + malloc_write("\n"); + if (opt_abort) + abort(); + } +} + +static void * +chunk_alloc_mmap_slow(size_t size, bool unaligned, bool noreserve) +{ + void *ret; + size_t offset; + + /* Beware size_t wrap-around. */ + if (size + chunksize <= size) + return (NULL); + + ret = pages_map(NULL, size + chunksize, noreserve); + if (ret == NULL) + return (NULL); + + /* Clean up unneeded leading/trailing space. */ + offset = CHUNK_ADDR2OFFSET(ret); + if (offset != 0) { + /* Note that mmap() returned an unaligned mapping. */ + unaligned = true; + + /* Leading space. */ + pages_unmap(ret, chunksize - offset); + + ret = (void *)((uintptr_t)ret + + (chunksize - offset)); + + /* Trailing space. */ + pages_unmap((void *)((uintptr_t)ret + size), + offset); + } else { + /* Trailing space only. */ + pages_unmap((void *)((uintptr_t)ret + size), + chunksize); + } + + /* + * If mmap() returned an aligned mapping, reset mmap_unaligned so that + * the next chunk_alloc_mmap() execution tries the fast allocation + * method. + */ + if (unaligned == false) + MMAP_UNALIGNED_SET(false); + + return (ret); +} + +static void * +chunk_alloc_mmap_internal(size_t size, bool noreserve) +{ + void *ret; + + /* + * Ideally, there would be a way to specify alignment to mmap() (like + * NetBSD has), but in the absence of such a feature, we have to work + * hard to efficiently create aligned mappings. The reliable, but + * slow method is to create a mapping that is over-sized, then trim the + * excess. However, that always results in at least one call to + * pages_unmap(). + * + * A more optimistic approach is to try mapping precisely the right + * amount, then try to append another mapping if alignment is off. In + * practice, this works out well as long as the application is not + * interleaving mappings via direct mmap() calls. If we do run into a + * situation where there is an interleaved mapping and we are unable to + * extend an unaligned mapping, our best option is to switch to the + * slow method until mmap() returns another aligned mapping. This will + * tend to leave a gap in the memory map that is too small to cause + * later problems for the optimistic method. + * + * Another possible confounding factor is address space layout + * randomization (ASLR), which causes mmap(2) to disregard the + * requested address. mmap_unaligned tracks whether the previous + * chunk_alloc_mmap() execution received any unaligned or relocated + * mappings, and if so, the current execution will immediately fall + * back to the slow method. However, we keep track of whether the fast + * method would have succeeded, and if so, we make a note to try the + * fast method next time. + */ + + if (MMAP_UNALIGNED_GET() == false) { + size_t offset; + + ret = pages_map(NULL, size, noreserve); + if (ret == NULL) + return (NULL); + + offset = CHUNK_ADDR2OFFSET(ret); + if (offset != 0) { + MMAP_UNALIGNED_SET(true); + /* Try to extend chunk boundary. */ + if (pages_map((void *)((uintptr_t)ret + size), + chunksize - offset, noreserve) == NULL) { + /* + * Extension failed. Clean up, then revert to + * the reliable-but-expensive method. + */ + pages_unmap(ret, size); + ret = chunk_alloc_mmap_slow(size, true, + noreserve); + } else { + /* Clean up unneeded leading space. */ + pages_unmap(ret, chunksize - offset); + ret = (void *)((uintptr_t)ret + (chunksize - + offset)); + } + } + } else + ret = chunk_alloc_mmap_slow(size, false, noreserve); + + return (ret); +} + +void * +chunk_alloc_mmap(size_t size) +{ + + return (chunk_alloc_mmap_internal(size, false)); +} + +void * +chunk_alloc_mmap_noreserve(size_t size) +{ + + return (chunk_alloc_mmap_internal(size, true)); +} + +void +chunk_dealloc_mmap(void *chunk, size_t size) +{ + + pages_unmap(chunk, size); +} + +bool +chunk_mmap_boot(void) +{ + +#ifdef NO_TLS + if (pthread_key_create(&mmap_unaligned_tsd, NULL) != 0) { + malloc_write("<jemalloc>: Error in pthread_key_create()\n"); + return (true); + } +#endif + + return (false); +} diff --git a/src/chunk_swap.c b/src/chunk_swap.c new file mode 100644 index 0000000..cb25ae0 --- /dev/null +++ b/src/chunk_swap.c @@ -0,0 +1,402 @@ +#define JEMALLOC_CHUNK_SWAP_C_ +#include "jemalloc/internal/jemalloc_internal.h" +#ifdef JEMALLOC_SWAP +/******************************************************************************/ +/* Data. */ + +malloc_mutex_t swap_mtx; +bool swap_enabled; +bool swap_prezeroed; +size_t swap_nfds; +int *swap_fds; +#ifdef JEMALLOC_STATS +size_t swap_avail; +#endif + +/* Base address of the mmap()ed file(s). */ +static void *swap_base; +/* Current end of the space in use (<= swap_max). */ +static void *swap_end; +/* Absolute upper limit on file-backed addresses. */ +static void *swap_max; + +/* + * Trees of chunks that were previously allocated (trees differ only in node + * ordering). These are used when allocating chunks, in an attempt to re-use + * address space. Depending on function, different tree orderings are needed, + * which is why there are two trees with the same contents. + */ +static extent_tree_t swap_chunks_szad; +static extent_tree_t swap_chunks_ad; + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static void *chunk_recycle_swap(size_t size, bool *zero); +static extent_node_t *chunk_dealloc_swap_record(void *chunk, size_t size); + +/******************************************************************************/ + +static void * +chunk_recycle_swap(size_t size, bool *zero) +{ + extent_node_t *node, key; + + key.addr = NULL; + key.size = size; + malloc_mutex_lock(&swap_mtx); + node = extent_tree_szad_nsearch(&swap_chunks_szad, &key); + if (node != NULL) { + void *ret = node->addr; + + /* Remove node from the tree. */ + extent_tree_szad_remove(&swap_chunks_szad, node); + if (node->size == size) { + extent_tree_ad_remove(&swap_chunks_ad, node); + base_node_dealloc(node); + } else { + /* + * Insert the remainder of node's address range as a + * smaller chunk. Its position within swap_chunks_ad + * does not change. + */ + assert(node->size > size); + node->addr = (void *)((uintptr_t)node->addr + size); + node->size -= size; + extent_tree_szad_insert(&swap_chunks_szad, node); + } +#ifdef JEMALLOC_STATS + swap_avail -= size; +#endif + malloc_mutex_unlock(&swap_mtx); + + if (*zero) + memset(ret, 0, size); + return (ret); + } + malloc_mutex_unlock(&swap_mtx); + + return (NULL); +} + +void * +chunk_alloc_swap(size_t size, bool *zero) +{ + void *ret; + + assert(swap_enabled); + + ret = chunk_recycle_swap(size, zero); + if (ret != NULL) + return (ret); + + malloc_mutex_lock(&swap_mtx); + if ((uintptr_t)swap_end + size <= (uintptr_t)swap_max) { + ret = swap_end; + swap_end = (void *)((uintptr_t)swap_end + size); +#ifdef JEMALLOC_STATS + swap_avail -= size; +#endif + malloc_mutex_unlock(&swap_mtx); + + if (swap_prezeroed) + *zero = true; + else if (*zero) + memset(ret, 0, size); + } else { + malloc_mutex_unlock(&swap_mtx); + return (NULL); + } + + return (ret); +} + +static extent_node_t * +chunk_dealloc_swap_record(void *chunk, size_t size) +{ + extent_node_t *xnode, *node, *prev, key; + + xnode = NULL; + while (true) { + key.addr = (void *)((uintptr_t)chunk + size); + node = extent_tree_ad_nsearch(&swap_chunks_ad, &key); + /* Try to coalesce forward. */ + if (node != NULL && node->addr == key.addr) { + /* + * Coalesce chunk with the following address range. + * This does not change the position within + * swap_chunks_ad, so only remove/insert from/into + * swap_chunks_szad. + */ + extent_tree_szad_remove(&swap_chunks_szad, node); + node->addr = chunk; + node->size += size; + extent_tree_szad_insert(&swap_chunks_szad, node); + break; + } else if (xnode == NULL) { + /* + * It is possible that base_node_alloc() will cause a + * new base chunk to be allocated, so take care not to + * deadlock on swap_mtx, and recover if another thread + * deallocates an adjacent chunk while this one is busy + * allocating xnode. + */ + malloc_mutex_unlock(&swap_mtx); + xnode = base_node_alloc(); + malloc_mutex_lock(&swap_mtx); + if (xnode == NULL) + return (NULL); + } else { + /* Coalescing forward failed, so insert a new node. */ + node = xnode; + xnode = NULL; + node->addr = chunk; + node->size = size; + extent_tree_ad_insert(&swap_chunks_ad, node); + extent_tree_szad_insert(&swap_chunks_szad, node); + break; + } + } + /* Discard xnode if it ended up unused do to a race. */ + if (xnode != NULL) + base_node_dealloc(xnode); + + /* Try to coalesce backward. */ + prev = extent_tree_ad_prev(&swap_chunks_ad, node); + if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) == + chunk) { + /* + * Coalesce chunk with the previous address range. This does + * not change the position within swap_chunks_ad, so only + * remove/insert node from/into swap_chunks_szad. + */ + extent_tree_szad_remove(&swap_chunks_szad, prev); + extent_tree_ad_remove(&swap_chunks_ad, prev); + + extent_tree_szad_remove(&swap_chunks_szad, node); + node->addr = prev->addr; + node->size += prev->size; + extent_tree_szad_insert(&swap_chunks_szad, node); + + base_node_dealloc(prev); + } + + return (node); +} + +bool +chunk_in_swap(void *chunk) +{ + bool ret; + + assert(swap_enabled); + + malloc_mutex_lock(&swap_mtx); + if ((uintptr_t)chunk >= (uintptr_t)swap_base + && (uintptr_t)chunk < (uintptr_t)swap_max) + ret = true; + else + ret = false; + malloc_mutex_unlock(&swap_mtx); + + return (ret); +} + +bool +chunk_dealloc_swap(void *chunk, size_t size) +{ + bool ret; + + assert(swap_enabled); + + malloc_mutex_lock(&swap_mtx); + if ((uintptr_t)chunk >= (uintptr_t)swap_base + && (uintptr_t)chunk < (uintptr_t)swap_max) { + extent_node_t *node; + + /* Try to coalesce with other unused chunks. */ + node = chunk_dealloc_swap_record(chunk, size); + if (node != NULL) { + chunk = node->addr; + size = node->size; + } + + /* + * Try to shrink the in-use memory if this chunk is at the end + * of the in-use memory. + */ + if ((void *)((uintptr_t)chunk + size) == swap_end) { + swap_end = (void *)((uintptr_t)swap_end - size); + + if (node != NULL) { + extent_tree_szad_remove(&swap_chunks_szad, + node); + extent_tree_ad_remove(&swap_chunks_ad, node); + base_node_dealloc(node); + } + } else + madvise(chunk, size, MADV_DONTNEED); + +#ifdef JEMALLOC_STATS + swap_avail += size; +#endif + ret = false; + goto RETURN; + } + + ret = true; +RETURN: + malloc_mutex_unlock(&swap_mtx); + return (ret); +} + +bool +chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed) +{ + bool ret; + unsigned i; + off_t off; + void *vaddr; + size_t cumsize, voff; + size_t sizes[nfds]; + + malloc_mutex_lock(&swap_mtx); + + /* Get file sizes. */ + for (i = 0, cumsize = 0; i < nfds; i++) { + off = lseek(fds[i], 0, SEEK_END); + if (off == ((off_t)-1)) { + ret = true; + goto RETURN; + } + if (PAGE_CEILING(off) != off) { + /* Truncate to a multiple of the page size. */ + off &= ~PAGE_MASK; + if (ftruncate(fds[i], off) != 0) { + ret = true; + goto RETURN; + } + } + sizes[i] = off; + if (cumsize + off < cumsize) { + /* + * Cumulative file size is greater than the total + * address space. Bail out while it's still obvious + * what the problem is. + */ + ret = true; + goto RETURN; + } + cumsize += off; + } + + /* Round down to a multiple of the chunk size. */ + cumsize &= ~chunksize_mask; + if (cumsize == 0) { + ret = true; + goto RETURN; + } + + /* + * Allocate a chunk-aligned region of anonymous memory, which will + * be the final location for the memory-mapped files. + */ + vaddr = chunk_alloc_mmap_noreserve(cumsize); + if (vaddr == NULL) { + ret = true; + goto RETURN; + } + + /* Overlay the files onto the anonymous mapping. */ + for (i = 0, voff = 0; i < nfds; i++) { + void *addr = mmap((void *)((uintptr_t)vaddr + voff), sizes[i], + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fds[i], 0); + if (addr == MAP_FAILED) { + char buf[BUFERROR_BUF]; + + + buferror(errno, buf, sizeof(buf)); + malloc_write( + "<jemalloc>: Error in mmap(..., MAP_FIXED, ...): "); + malloc_write(buf); + malloc_write("\n"); + if (opt_abort) + abort(); + if (munmap(vaddr, voff) == -1) { + buferror(errno, buf, sizeof(buf)); + malloc_write("<jemalloc>: Error in munmap(): "); + malloc_write(buf); + malloc_write("\n"); + } + ret = true; + goto RETURN; + } + assert(addr == (void *)((uintptr_t)vaddr + voff)); + + /* + * Tell the kernel that the mapping will be accessed randomly, + * and that it should not gratuitously sync pages to the + * filesystem. + */ +#ifdef MADV_RANDOM + madvise(addr, sizes[i], MADV_RANDOM); +#endif +#ifdef MADV_NOSYNC + madvise(addr, sizes[i], MADV_NOSYNC); +#endif + + voff += sizes[i]; + } + + swap_prezeroed = prezeroed; + swap_base = vaddr; + swap_end = swap_base; + swap_max = (void *)((uintptr_t)vaddr + cumsize); + + /* Copy the fds array for mallctl purposes. */ + swap_fds = (int *)base_alloc(nfds * sizeof(int)); + if (swap_fds == NULL) { + ret = true; + goto RETURN; + } + memcpy(swap_fds, fds, nfds * sizeof(int)); + swap_nfds = nfds; + +#ifdef JEMALLOC_STATS + swap_avail = cumsize; +#endif + + swap_enabled = true; + + ret = false; +RETURN: + malloc_mutex_unlock(&swap_mtx); + return (ret); +} + +bool +chunk_swap_boot(void) +{ + + if (malloc_mutex_init(&swap_mtx)) + return (true); + + swap_enabled = false; + swap_prezeroed = false; /* swap.* mallctl's depend on this. */ + swap_nfds = 0; + swap_fds = NULL; +#ifdef JEMALLOC_STATS + swap_avail = 0; +#endif + swap_base = NULL; + swap_end = NULL; + swap_max = NULL; + + extent_tree_szad_new(&swap_chunks_szad); + extent_tree_ad_new(&swap_chunks_ad); + + return (false); +} + +/******************************************************************************/ +#endif /* JEMALLOC_SWAP */ diff --git a/src/ckh.c b/src/ckh.c new file mode 100644 index 0000000..143b5b5 --- /dev/null +++ b/src/ckh.c @@ -0,0 +1,619 @@ +/* + ******************************************************************************* + * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each + * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash + * functions are employed. The original cuckoo hashing algorithm was described + * in: + * + * Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms + * 51(2):122-144. + * + * Generalization of cuckoo hashing was discussed in: + * + * Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical + * alternative to traditional hash tables. In Proceedings of the 7th + * Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA, + * January 2006. + * + * This implementation uses precisely two hash functions because that is the + * fewest that can work, and supporting multiple hashes is an implementation + * burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006) + * that shows approximate expected maximum load factors for various + * configurations: + * + * | #cells/bucket | + * #hashes | 1 | 2 | 4 | 8 | + * --------+-------+-------+-------+-------+ + * 1 | 0.006 | 0.006 | 0.03 | 0.12 | + * 2 | 0.49 | 0.86 |>0.93< |>0.96< | + * 3 | 0.91 | 0.97 | 0.98 | 0.999 | + * 4 | 0.97 | 0.99 | 0.999 | | + * + * The number of cells per bucket is chosen such that a bucket fits in one cache + * line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing, + * respectively. + * + ******************************************************************************/ +#define JEMALLOC_CKH_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static bool ckh_grow(ckh_t *ckh); +static void ckh_shrink(ckh_t *ckh); + +/******************************************************************************/ + +/* + * Search bucket for key and return the cell number if found; SIZE_T_MAX + * otherwise. + */ +JEMALLOC_INLINE size_t +ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) +{ + ckhc_t *cell; + unsigned i; + + for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { + cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; + if (cell->key != NULL && ckh->keycomp(key, cell->key)) + return ((bucket << LG_CKH_BUCKET_CELLS) + i); + } + + return (SIZE_T_MAX); +} + +/* + * Search table for key and return cell number if found; SIZE_T_MAX otherwise. + */ +JEMALLOC_INLINE size_t +ckh_isearch(ckh_t *ckh, const void *key) +{ + size_t hash1, hash2, bucket, cell; + + assert(ckh != NULL); + dassert(ckh->magic == CKH_MAGIC); + + ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2); + + /* Search primary bucket. */ + bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1); + cell = ckh_bucket_search(ckh, bucket, key); + if (cell != SIZE_T_MAX) + return (cell); + + /* Search secondary bucket. */ + bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1); + cell = ckh_bucket_search(ckh, bucket, key); + return (cell); +} + +JEMALLOC_INLINE bool +ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, + const void *data) +{ + ckhc_t *cell; + unsigned offset, i; + + /* + * Cycle through the cells in the bucket, starting at a random position. + * The randomness avoids worst-case search overhead as buckets fill up. + */ + prn32(offset, LG_CKH_BUCKET_CELLS, ckh->prn_state, CKH_A, CKH_C); + for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { + cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + + ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; + if (cell->key == NULL) { + cell->key = key; + cell->data = data; + ckh->count++; + return (false); + } + } + + return (true); +} + +/* + * No space is available in bucket. Randomly evict an item, then try to find an + * alternate location for that item. Iteratively repeat this + * eviction/relocation procedure until either success or detection of an + * eviction/relocation bucket cycle. + */ +JEMALLOC_INLINE bool +ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, + void const **argdata) +{ + const void *key, *data, *tkey, *tdata; + ckhc_t *cell; + size_t hash1, hash2, bucket, tbucket; + unsigned i; + + bucket = argbucket; + key = *argkey; + data = *argdata; + while (true) { + /* + * Choose a random item within the bucket to evict. This is + * critical to correct function, because without (eventually) + * evicting all items within a bucket during iteration, it + * would be possible to get stuck in an infinite loop if there + * were an item for which both hashes indicated the same + * bucket. + */ + prn32(i, LG_CKH_BUCKET_CELLS, ckh->prn_state, CKH_A, CKH_C); + cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; + assert(cell->key != NULL); + + /* Swap cell->{key,data} and {key,data} (evict). */ + tkey = cell->key; tdata = cell->data; + cell->key = key; cell->data = data; + key = tkey; data = tdata; + +#ifdef CKH_COUNT + ckh->nrelocs++; +#endif + + /* Find the alternate bucket for the evicted item. */ + ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2); + tbucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1); + if (tbucket == bucket) { + tbucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1); + /* + * It may be that (tbucket == bucket) still, if the + * item's hashes both indicate this bucket. However, + * we are guaranteed to eventually escape this bucket + * during iteration, assuming pseudo-random item + * selection (true randomness would make infinite + * looping a remote possibility). The reason we can + * never get trapped forever is that there are two + * cases: + * + * 1) This bucket == argbucket, so we will quickly + * detect an eviction cycle and terminate. + * 2) An item was evicted to this bucket from another, + * which means that at least one item in this bucket + * has hashes that indicate distinct buckets. + */ + } + /* Check for a cycle. */ + if (tbucket == argbucket) { + *argkey = key; + *argdata = data; + return (true); + } + + bucket = tbucket; + if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) + return (false); + } +} + +JEMALLOC_INLINE bool +ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) +{ + size_t hash1, hash2, bucket; + const void *key = *argkey; + const void *data = *argdata; + + ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2); + + /* Try to insert in primary bucket. */ + bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1); + if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) + return (false); + + /* Try to insert in secondary bucket. */ + bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1); + if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) + return (false); + + /* + * Try to find a place for this item via iterative eviction/relocation. + */ + return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata)); +} + +/* + * Try to rebuild the hash table from scratch by inserting all items from the + * old table into the new. + */ +JEMALLOC_INLINE bool +ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) +{ + size_t count, i, nins; + const void *key, *data; + + count = ckh->count; + ckh->count = 0; + for (i = nins = 0; nins < count; i++) { + if (aTab[i].key != NULL) { + key = aTab[i].key; + data = aTab[i].data; + if (ckh_try_insert(ckh, &key, &data)) { + ckh->count = count; + return (true); + } + nins++; + } + } + + return (false); +} + +static bool +ckh_grow(ckh_t *ckh) +{ + bool ret; + ckhc_t *tab, *ttab; + size_t lg_curcells; + unsigned lg_prevbuckets; + +#ifdef CKH_COUNT + ckh->ngrows++; +#endif + + /* + * It is possible (though unlikely, given well behaved hashes) that the + * table will have to be doubled more than once in order to create a + * usable table. + */ + lg_prevbuckets = ckh->lg_curbuckets; + lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS; + while (true) { + size_t usize; + + lg_curcells++; + usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE, NULL); + if (usize == 0) { + ret = true; + goto RETURN; + } + tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); + if (tab == NULL) { + ret = true; + goto RETURN; + } + /* Swap in new table. */ + ttab = ckh->tab; + ckh->tab = tab; + tab = ttab; + ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; + + if (ckh_rebuild(ckh, tab) == false) { + idalloc(tab); + break; + } + + /* Rebuilding failed, so back out partially rebuilt table. */ + idalloc(ckh->tab); + ckh->tab = tab; + ckh->lg_curbuckets = lg_prevbuckets; + } + + ret = false; +RETURN: + return (ret); +} + +static void +ckh_shrink(ckh_t *ckh) +{ + ckhc_t *tab, *ttab; + size_t lg_curcells, usize; + unsigned lg_prevbuckets; + + /* + * It is possible (though unlikely, given well behaved hashes) that the + * table rebuild will fail. + */ + lg_prevbuckets = ckh->lg_curbuckets; + lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; + usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE, NULL); + if (usize == 0) + return; + tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); + if (tab == NULL) { + /* + * An OOM error isn't worth propagating, since it doesn't + * prevent this or future operations from proceeding. + */ + return; + } + /* Swap in new table. */ + ttab = ckh->tab; + ckh->tab = tab; + tab = ttab; + ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; + + if (ckh_rebuild(ckh, tab) == false) { + idalloc(tab); +#ifdef CKH_COUNT + ckh->nshrinks++; +#endif + return; + } + + /* Rebuilding failed, so back out partially rebuilt table. */ + idalloc(ckh->tab); + ckh->tab = tab; + ckh->lg_curbuckets = lg_prevbuckets; +#ifdef CKH_COUNT + ckh->nshrinkfails++; +#endif +} + +bool +ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp) +{ + bool ret; + size_t mincells, usize; + unsigned lg_mincells; + + assert(minitems > 0); + assert(hash != NULL); + assert(keycomp != NULL); + +#ifdef CKH_COUNT + ckh->ngrows = 0; + ckh->nshrinks = 0; + ckh->nshrinkfails = 0; + ckh->ninserts = 0; + ckh->nrelocs = 0; +#endif + ckh->prn_state = 42; /* Value doesn't really matter. */ + ckh->count = 0; + + /* + * Find the minimum power of 2 that is large enough to fit aBaseCount + * entries. We are using (2+,2) cuckoo hashing, which has an expected + * maximum load factor of at least ~0.86, so 0.75 is a conservative load + * factor that will typically allow 2^aLgMinItems to fit without ever + * growing the table. + */ + assert(LG_CKH_BUCKET_CELLS > 0); + mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2; + for (lg_mincells = LG_CKH_BUCKET_CELLS; + (ZU(1) << lg_mincells) < mincells; + lg_mincells++) + ; /* Do nothing. */ + ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; + ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; + ckh->hash = hash; + ckh->keycomp = keycomp; + + usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE, NULL); + if (usize == 0) { + ret = true; + goto RETURN; + } + ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); + if (ckh->tab == NULL) { + ret = true; + goto RETURN; + } + +#ifdef JEMALLOC_DEBUG + ckh->magic = CKH_MAGIC; +#endif + + ret = false; +RETURN: + return (ret); +} + +void +ckh_delete(ckh_t *ckh) +{ + + assert(ckh != NULL); + dassert(ckh->magic == CKH_MAGIC); + +#ifdef CKH_VERBOSE + malloc_printf( + "%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64"," + " nshrinkfails: %"PRIu64", ninserts: %"PRIu64"," + " nrelocs: %"PRIu64"\n", __func__, ckh, + (unsigned long long)ckh->ngrows, + (unsigned long long)ckh->nshrinks, + (unsigned long long)ckh->nshrinkfails, + (unsigned long long)ckh->ninserts, + (unsigned long long)ckh->nrelocs); +#endif + + idalloc(ckh->tab); +#ifdef JEMALLOC_DEBUG + memset(ckh, 0x5a, sizeof(ckh_t)); +#endif +} + +size_t +ckh_count(ckh_t *ckh) +{ + + assert(ckh != NULL); + dassert(ckh->magic == CKH_MAGIC); + + return (ckh->count); +} + +bool +ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) +{ + size_t i, ncells; + + for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets + + LG_CKH_BUCKET_CELLS)); i < ncells; i++) { + if (ckh->tab[i].key != NULL) { + if (key != NULL) + *key = (void *)ckh->tab[i].key; + if (data != NULL) + *data = (void *)ckh->tab[i].data; + *tabind = i + 1; + return (false); + } + } + + return (true); +} + +bool +ckh_insert(ckh_t *ckh, const void *key, const void *data) +{ + bool ret; + + assert(ckh != NULL); + dassert(ckh->magic == CKH_MAGIC); + assert(ckh_search(ckh, key, NULL, NULL)); + +#ifdef CKH_COUNT + ckh->ninserts++; +#endif + + while (ckh_try_insert(ckh, &key, &data)) { + if (ckh_grow(ckh)) { + ret = true; + goto RETURN; + } + } + + ret = false; +RETURN: + return (ret); +} + +bool +ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data) +{ + size_t cell; + + assert(ckh != NULL); + dassert(ckh->magic == CKH_MAGIC); + + cell = ckh_isearch(ckh, searchkey); + if (cell != SIZE_T_MAX) { + if (key != NULL) + *key = (void *)ckh->tab[cell].key; + if (data != NULL) + *data = (void *)ckh->tab[cell].data; + ckh->tab[cell].key = NULL; + ckh->tab[cell].data = NULL; /* Not necessary. */ + + ckh->count--; + /* Try to halve the table if it is less than 1/4 full. */ + if (ckh->count < (ZU(1) << (ckh->lg_curbuckets + + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets + > ckh->lg_minbuckets) { + /* Ignore error due to OOM. */ + ckh_shrink(ckh); + } + + return (false); + } + + return (true); +} + +bool +ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) +{ + size_t cell; + + assert(ckh != NULL); + dassert(ckh->magic == CKH_MAGIC); + + cell = ckh_isearch(ckh, searchkey); + if (cell != SIZE_T_MAX) { + if (key != NULL) + *key = (void *)ckh->tab[cell].key; + if (data != NULL) + *data = (void *)ckh->tab[cell].data; + return (false); + } + + return (true); +} + +void +ckh_string_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2) +{ + size_t ret1, ret2; + uint64_t h; + + assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64)); + assert(hash1 != NULL); + assert(hash2 != NULL); + + h = hash(key, strlen((const char *)key), 0x94122f335b332aeaLLU); + if (minbits <= 32) { + /* + * Avoid doing multiple hashes, since a single hash provides + * enough bits. + */ + ret1 = h & ZU(0xffffffffU); + ret2 = h >> 32; + } else { + ret1 = h; + ret2 = hash(key, strlen((const char *)key), + 0x8432a476666bbc13U); + } + + *hash1 = ret1; + *hash2 = ret2; +} + +bool +ckh_string_keycomp(const void *k1, const void *k2) +{ + + assert(k1 != NULL); + assert(k2 != NULL); + + return (strcmp((char *)k1, (char *)k2) ? false : true); +} + +void +ckh_pointer_hash(const void *key, unsigned minbits, size_t *hash1, + size_t *hash2) +{ + size_t ret1, ret2; + uint64_t h; + union { + const void *v; + uint64_t i; + } u; + + assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64)); + assert(hash1 != NULL); + assert(hash2 != NULL); + + assert(sizeof(u.v) == sizeof(u.i)); +#if (LG_SIZEOF_PTR != LG_SIZEOF_INT) + u.i = 0; +#endif + u.v = key; + h = hash(&u.i, sizeof(u.i), 0xd983396e68886082LLU); + if (minbits <= 32) { + /* + * Avoid doing multiple hashes, since a single hash provides + * enough bits. + */ + ret1 = h & ZU(0xffffffffU); + ret2 = h >> 32; + } else { + assert(SIZEOF_PTR == 8); + ret1 = h; + ret2 = hash(&u.i, sizeof(u.i), 0x5e2be9aff8709a5dLLU); + } + + *hash1 = ret1; + *hash2 = ret2; +} + +bool +ckh_pointer_keycomp(const void *k1, const void *k2) +{ + + return ((k1 == k2) ? true : false); +} diff --git a/src/ctl.c b/src/ctl.c new file mode 100644 index 0000000..e5336d3 --- /dev/null +++ b/src/ctl.c @@ -0,0 +1,1670 @@ +#define JEMALLOC_CTL_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +/* + * ctl_mtx protects the following: + * - ctl_stats.* + * - opt_prof_active + * - swap_enabled + * - swap_prezeroed + */ +static malloc_mutex_t ctl_mtx; +static bool ctl_initialized; +static uint64_t ctl_epoch; +static ctl_stats_t ctl_stats; + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +#define CTL_PROTO(n) \ +static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen); + +#define INDEX_PROTO(n) \ +const ctl_node_t *n##_index(const size_t *mib, size_t miblen, \ + size_t i); + +#ifdef JEMALLOC_STATS +static bool ctl_arena_init(ctl_arena_stats_t *astats); +#endif +static void ctl_arena_clear(ctl_arena_stats_t *astats); +#ifdef JEMALLOC_STATS +static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, + arena_t *arena); +static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, + ctl_arena_stats_t *astats); +#endif +static void ctl_arena_refresh(arena_t *arena, unsigned i); +static void ctl_refresh(void); +static bool ctl_init(void); +static int ctl_lookup(const char *name, ctl_node_t const **nodesp, + size_t *mibp, size_t *depthp); + +CTL_PROTO(version) +CTL_PROTO(epoch) +#ifdef JEMALLOC_TCACHE +CTL_PROTO(tcache_flush) +#endif +CTL_PROTO(thread_arena) +#ifdef JEMALLOC_STATS +CTL_PROTO(thread_allocated) +CTL_PROTO(thread_allocatedp) +CTL_PROTO(thread_deallocated) +CTL_PROTO(thread_deallocatedp) +#endif +CTL_PROTO(config_debug) +CTL_PROTO(config_dss) +CTL_PROTO(config_dynamic_page_shift) +CTL_PROTO(config_fill) +CTL_PROTO(config_lazy_lock) +CTL_PROTO(config_prof) +CTL_PROTO(config_prof_libgcc) +CTL_PROTO(config_prof_libunwind) +CTL_PROTO(config_stats) +CTL_PROTO(config_swap) +CTL_PROTO(config_sysv) +CTL_PROTO(config_tcache) +CTL_PROTO(config_tiny) +CTL_PROTO(config_tls) +CTL_PROTO(config_xmalloc) +CTL_PROTO(opt_abort) +CTL_PROTO(opt_lg_qspace_max) +CTL_PROTO(opt_lg_cspace_max) +CTL_PROTO(opt_lg_chunk) +CTL_PROTO(opt_narenas) +CTL_PROTO(opt_lg_dirty_mult) +CTL_PROTO(opt_stats_print) +#ifdef JEMALLOC_FILL +CTL_PROTO(opt_junk) +CTL_PROTO(opt_zero) +#endif +#ifdef JEMALLOC_SYSV +CTL_PROTO(opt_sysv) +#endif +#ifdef JEMALLOC_XMALLOC +CTL_PROTO(opt_xmalloc) +#endif +#ifdef JEMALLOC_TCACHE +CTL_PROTO(opt_tcache) +CTL_PROTO(opt_lg_tcache_gc_sweep) +#endif +#ifdef JEMALLOC_PROF +CTL_PROTO(opt_prof) +CTL_PROTO(opt_prof_prefix) +CTL_PROTO(opt_prof_active) +CTL_PROTO(opt_lg_prof_bt_max) +CTL_PROTO(opt_lg_prof_sample) +CTL_PROTO(opt_lg_prof_interval) +CTL_PROTO(opt_prof_gdump) +CTL_PROTO(opt_prof_leak) +CTL_PROTO(opt_prof_accum) +CTL_PROTO(opt_lg_prof_tcmax) +#endif +#ifdef JEMALLOC_SWAP +CTL_PROTO(opt_overcommit) +#endif +CTL_PROTO(arenas_bin_i_size) +CTL_PROTO(arenas_bin_i_nregs) +CTL_PROTO(arenas_bin_i_run_size) +INDEX_PROTO(arenas_bin_i) +CTL_PROTO(arenas_lrun_i_size) +INDEX_PROTO(arenas_lrun_i) +CTL_PROTO(arenas_narenas) +CTL_PROTO(arenas_initialized) +CTL_PROTO(arenas_quantum) +CTL_PROTO(arenas_cacheline) +CTL_PROTO(arenas_subpage) +CTL_PROTO(arenas_pagesize) +CTL_PROTO(arenas_chunksize) +#ifdef JEMALLOC_TINY +CTL_PROTO(arenas_tspace_min) +CTL_PROTO(arenas_tspace_max) +#endif +CTL_PROTO(arenas_qspace_min) +CTL_PROTO(arenas_qspace_max) +CTL_PROTO(arenas_cspace_min) +CTL_PROTO(arenas_cspace_max) +CTL_PROTO(arenas_sspace_min) +CTL_PROTO(arenas_sspace_max) +#ifdef JEMALLOC_TCACHE +CTL_PROTO(arenas_tcache_max) +#endif +CTL_PROTO(arenas_ntbins) +CTL_PROTO(arenas_nqbins) +CTL_PROTO(arenas_ncbins) +CTL_PROTO(arenas_nsbins) +CTL_PROTO(arenas_nbins) +#ifdef JEMALLOC_TCACHE +CTL_PROTO(arenas_nhbins) +#endif +CTL_PROTO(arenas_nlruns) +CTL_PROTO(arenas_purge) +#ifdef JEMALLOC_PROF +CTL_PROTO(prof_active) +CTL_PROTO(prof_dump) +CTL_PROTO(prof_interval) +#endif +#ifdef JEMALLOC_STATS +CTL_PROTO(stats_chunks_current) +CTL_PROTO(stats_chunks_total) +CTL_PROTO(stats_chunks_high) +CTL_PROTO(stats_huge_allocated) +CTL_PROTO(stats_huge_nmalloc) +CTL_PROTO(stats_huge_ndalloc) +CTL_PROTO(stats_arenas_i_small_allocated) +CTL_PROTO(stats_arenas_i_small_nmalloc) +CTL_PROTO(stats_arenas_i_small_ndalloc) +CTL_PROTO(stats_arenas_i_small_nrequests) +CTL_PROTO(stats_arenas_i_large_allocated) +CTL_PROTO(stats_arenas_i_large_nmalloc) +CTL_PROTO(stats_arenas_i_large_ndalloc) +CTL_PROTO(stats_arenas_i_large_nrequests) +CTL_PROTO(stats_arenas_i_bins_j_allocated) +CTL_PROTO(stats_arenas_i_bins_j_nmalloc) +CTL_PROTO(stats_arenas_i_bins_j_ndalloc) +CTL_PROTO(stats_arenas_i_bins_j_nrequests) +#ifdef JEMALLOC_TCACHE +CTL_PROTO(stats_arenas_i_bins_j_nfills) +CTL_PROTO(stats_arenas_i_bins_j_nflushes) +#endif +CTL_PROTO(stats_arenas_i_bins_j_nruns) +CTL_PROTO(stats_arenas_i_bins_j_nreruns) +CTL_PROTO(stats_arenas_i_bins_j_highruns) +CTL_PROTO(stats_arenas_i_bins_j_curruns) +INDEX_PROTO(stats_arenas_i_bins_j) +CTL_PROTO(stats_arenas_i_lruns_j_nmalloc) +CTL_PROTO(stats_arenas_i_lruns_j_ndalloc) +CTL_PROTO(stats_arenas_i_lruns_j_nrequests) +CTL_PROTO(stats_arenas_i_lruns_j_highruns) +CTL_PROTO(stats_arenas_i_lruns_j_curruns) +INDEX_PROTO(stats_arenas_i_lruns_j) +#endif +CTL_PROTO(stats_arenas_i_nthreads) +CTL_PROTO(stats_arenas_i_pactive) +CTL_PROTO(stats_arenas_i_pdirty) +#ifdef JEMALLOC_STATS +CTL_PROTO(stats_arenas_i_mapped) +CTL_PROTO(stats_arenas_i_npurge) +CTL_PROTO(stats_arenas_i_nmadvise) +CTL_PROTO(stats_arenas_i_purged) +#endif +INDEX_PROTO(stats_arenas_i) +#ifdef JEMALLOC_STATS +CTL_PROTO(stats_cactive) +CTL_PROTO(stats_allocated) +CTL_PROTO(stats_active) +CTL_PROTO(stats_mapped) +#endif +#ifdef JEMALLOC_SWAP +# ifdef JEMALLOC_STATS +CTL_PROTO(swap_avail) +# endif +CTL_PROTO(swap_prezeroed) +CTL_PROTO(swap_nfds) +CTL_PROTO(swap_fds) +#endif + +/******************************************************************************/ +/* mallctl tree. */ + +/* Maximum tree depth. */ +#define CTL_MAX_DEPTH 6 + +#define NAME(n) true, {.named = {n +#define CHILD(c) sizeof(c##_node) / sizeof(ctl_node_t), c##_node}}, NULL +#define CTL(c) 0, NULL}}, c##_ctl + +/* + * Only handles internal indexed nodes, since there are currently no external + * ones. + */ +#define INDEX(i) false, {.indexed = {i##_index}}, NULL + +#ifdef JEMALLOC_TCACHE +static const ctl_node_t tcache_node[] = { + {NAME("flush"), CTL(tcache_flush)} +}; +#endif + +static const ctl_node_t thread_node[] = { + {NAME("arena"), CTL(thread_arena)} +#ifdef JEMALLOC_STATS + , + {NAME("allocated"), CTL(thread_allocated)}, + {NAME("allocatedp"), CTL(thread_allocatedp)}, + {NAME("deallocated"), CTL(thread_deallocated)}, + {NAME("deallocatedp"), CTL(thread_deallocatedp)} +#endif +}; + +static const ctl_node_t config_node[] = { + {NAME("debug"), CTL(config_debug)}, + {NAME("dss"), CTL(config_dss)}, + {NAME("dynamic_page_shift"), CTL(config_dynamic_page_shift)}, + {NAME("fill"), CTL(config_fill)}, + {NAME("lazy_lock"), CTL(config_lazy_lock)}, + {NAME("prof"), CTL(config_prof)}, + {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, + {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, + {NAME("stats"), CTL(config_stats)}, + {NAME("swap"), CTL(config_swap)}, + {NAME("sysv"), CTL(config_sysv)}, + {NAME("tcache"), CTL(config_tcache)}, + {NAME("tiny"), CTL(config_tiny)}, + {NAME("tls"), CTL(config_tls)}, + {NAME("xmalloc"), CTL(config_xmalloc)} +}; + +static const ctl_node_t opt_node[] = { + {NAME("abort"), CTL(opt_abort)}, + {NAME("lg_qspace_max"), CTL(opt_lg_qspace_max)}, + {NAME("lg_cspace_max"), CTL(opt_lg_cspace_max)}, + {NAME("lg_chunk"), CTL(opt_lg_chunk)}, + {NAME("narenas"), CTL(opt_narenas)}, + {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)}, + {NAME("stats_print"), CTL(opt_stats_print)} +#ifdef JEMALLOC_FILL + , + {NAME("junk"), CTL(opt_junk)}, + {NAME("zero"), CTL(opt_zero)} +#endif +#ifdef JEMALLOC_SYSV + , + {NAME("sysv"), CTL(opt_sysv)} +#endif +#ifdef JEMALLOC_XMALLOC + , + {NAME("xmalloc"), CTL(opt_xmalloc)} +#endif +#ifdef JEMALLOC_TCACHE + , + {NAME("tcache"), CTL(opt_tcache)}, + {NAME("lg_tcache_gc_sweep"), CTL(opt_lg_tcache_gc_sweep)} +#endif +#ifdef JEMALLOC_PROF + , + {NAME("prof"), CTL(opt_prof)}, + {NAME("prof_prefix"), CTL(opt_prof_prefix)}, + {NAME("prof_active"), CTL(opt_prof_active)}, + {NAME("lg_prof_bt_max"), CTL(opt_lg_prof_bt_max)}, + {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)}, + {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)}, + {NAME("prof_gdump"), CTL(opt_prof_gdump)}, + {NAME("prof_leak"), CTL(opt_prof_leak)}, + {NAME("prof_accum"), CTL(opt_prof_accum)}, + {NAME("lg_prof_tcmax"), CTL(opt_lg_prof_tcmax)} +#endif +#ifdef JEMALLOC_SWAP + , + {NAME("overcommit"), CTL(opt_overcommit)} +#endif +}; + +static const ctl_node_t arenas_bin_i_node[] = { + {NAME("size"), CTL(arenas_bin_i_size)}, + {NAME("nregs"), CTL(arenas_bin_i_nregs)}, + {NAME("run_size"), CTL(arenas_bin_i_run_size)} +}; +static const ctl_node_t super_arenas_bin_i_node[] = { + {NAME(""), CHILD(arenas_bin_i)} +}; + +static const ctl_node_t arenas_bin_node[] = { + {INDEX(arenas_bin_i)} +}; + +static const ctl_node_t arenas_lrun_i_node[] = { + {NAME("size"), CTL(arenas_lrun_i_size)} +}; +static const ctl_node_t super_arenas_lrun_i_node[] = { + {NAME(""), CHILD(arenas_lrun_i)} +}; + +static const ctl_node_t arenas_lrun_node[] = { + {INDEX(arenas_lrun_i)} +}; + +static const ctl_node_t arenas_node[] = { + {NAME("narenas"), CTL(arenas_narenas)}, + {NAME("initialized"), CTL(arenas_initialized)}, + {NAME("quantum"), CTL(arenas_quantum)}, + {NAME("cacheline"), CTL(arenas_cacheline)}, + {NAME("subpage"), CTL(arenas_subpage)}, + {NAME("pagesize"), CTL(arenas_pagesize)}, + {NAME("chunksize"), CTL(arenas_chunksize)}, +#ifdef JEMALLOC_TINY + {NAME("tspace_min"), CTL(arenas_tspace_min)}, + {NAME("tspace_max"), CTL(arenas_tspace_max)}, +#endif + {NAME("qspace_min"), CTL(arenas_qspace_min)}, + {NAME("qspace_max"), CTL(arenas_qspace_max)}, + {NAME("cspace_min"), CTL(arenas_cspace_min)}, + {NAME("cspace_max"), CTL(arenas_cspace_max)}, + {NAME("sspace_min"), CTL(arenas_sspace_min)}, + {NAME("sspace_max"), CTL(arenas_sspace_max)}, +#ifdef JEMALLOC_TCACHE + {NAME("tcache_max"), CTL(arenas_tcache_max)}, +#endif + {NAME("ntbins"), CTL(arenas_ntbins)}, + {NAME("nqbins"), CTL(arenas_nqbins)}, + {NAME("ncbins"), CTL(arenas_ncbins)}, + {NAME("nsbins"), CTL(arenas_nsbins)}, + {NAME("nbins"), CTL(arenas_nbins)}, +#ifdef JEMALLOC_TCACHE + {NAME("nhbins"), CTL(arenas_nhbins)}, +#endif + {NAME("bin"), CHILD(arenas_bin)}, + {NAME("nlruns"), CTL(arenas_nlruns)}, + {NAME("lrun"), CHILD(arenas_lrun)}, + {NAME("purge"), CTL(arenas_purge)} +}; + +#ifdef JEMALLOC_PROF +static const ctl_node_t prof_node[] = { + {NAME("active"), CTL(prof_active)}, + {NAME("dump"), CTL(prof_dump)}, + {NAME("interval"), CTL(prof_interval)} +}; +#endif + +#ifdef JEMALLOC_STATS +static const ctl_node_t stats_chunks_node[] = { + {NAME("current"), CTL(stats_chunks_current)}, + {NAME("total"), CTL(stats_chunks_total)}, + {NAME("high"), CTL(stats_chunks_high)} +}; + +static const ctl_node_t stats_huge_node[] = { + {NAME("allocated"), CTL(stats_huge_allocated)}, + {NAME("nmalloc"), CTL(stats_huge_nmalloc)}, + {NAME("ndalloc"), CTL(stats_huge_ndalloc)} +}; + +static const ctl_node_t stats_arenas_i_small_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)} +}; + +static const ctl_node_t stats_arenas_i_large_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} +}; + +static const ctl_node_t stats_arenas_i_bins_j_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, +#ifdef JEMALLOC_TCACHE + {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, + {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, +#endif + {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)}, + {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)}, + {NAME("highruns"), CTL(stats_arenas_i_bins_j_highruns)}, + {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)} +}; +static const ctl_node_t super_stats_arenas_i_bins_j_node[] = { + {NAME(""), CHILD(stats_arenas_i_bins_j)} +}; + +static const ctl_node_t stats_arenas_i_bins_node[] = { + {INDEX(stats_arenas_i_bins_j)} +}; + +static const ctl_node_t stats_arenas_i_lruns_j_node[] = { + {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)}, + {NAME("highruns"), CTL(stats_arenas_i_lruns_j_highruns)}, + {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)} +}; +static const ctl_node_t super_stats_arenas_i_lruns_j_node[] = { + {NAME(""), CHILD(stats_arenas_i_lruns_j)} +}; + +static const ctl_node_t stats_arenas_i_lruns_node[] = { + {INDEX(stats_arenas_i_lruns_j)} +}; +#endif + +static const ctl_node_t stats_arenas_i_node[] = { + {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, + {NAME("pactive"), CTL(stats_arenas_i_pactive)}, + {NAME("pdirty"), CTL(stats_arenas_i_pdirty)} +#ifdef JEMALLOC_STATS + , + {NAME("mapped"), CTL(stats_arenas_i_mapped)}, + {NAME("npurge"), CTL(stats_arenas_i_npurge)}, + {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)}, + {NAME("purged"), CTL(stats_arenas_i_purged)}, + {NAME("small"), CHILD(stats_arenas_i_small)}, + {NAME("large"), CHILD(stats_arenas_i_large)}, + {NAME("bins"), CHILD(stats_arenas_i_bins)}, + {NAME("lruns"), CHILD(stats_arenas_i_lruns)} +#endif +}; +static const ctl_node_t super_stats_arenas_i_node[] = { + {NAME(""), CHILD(stats_arenas_i)} +}; + +static const ctl_node_t stats_arenas_node[] = { + {INDEX(stats_arenas_i)} +}; + +static const ctl_node_t stats_node[] = { +#ifdef JEMALLOC_STATS + {NAME("cactive"), CTL(stats_cactive)}, + {NAME("allocated"), CTL(stats_allocated)}, + {NAME("active"), CTL(stats_active)}, + {NAME("mapped"), CTL(stats_mapped)}, + {NAME("chunks"), CHILD(stats_chunks)}, + {NAME("huge"), CHILD(stats_huge)}, +#endif + {NAME("arenas"), CHILD(stats_arenas)} +}; + +#ifdef JEMALLOC_SWAP +static const ctl_node_t swap_node[] = { +# ifdef JEMALLOC_STATS + {NAME("avail"), CTL(swap_avail)}, +# endif + {NAME("prezeroed"), CTL(swap_prezeroed)}, + {NAME("nfds"), CTL(swap_nfds)}, + {NAME("fds"), CTL(swap_fds)} +}; +#endif + +static const ctl_node_t root_node[] = { + {NAME("version"), CTL(version)}, + {NAME("epoch"), CTL(epoch)}, +#ifdef JEMALLOC_TCACHE + {NAME("tcache"), CHILD(tcache)}, +#endif + {NAME("thread"), CHILD(thread)}, + {NAME("config"), CHILD(config)}, + {NAME("opt"), CHILD(opt)}, + {NAME("arenas"), CHILD(arenas)}, +#ifdef JEMALLOC_PROF + {NAME("prof"), CHILD(prof)}, +#endif + {NAME("stats"), CHILD(stats)} +#ifdef JEMALLOC_SWAP + , + {NAME("swap"), CHILD(swap)} +#endif +}; +static const ctl_node_t super_root_node[] = { + {NAME(""), CHILD(root)} +}; + +#undef NAME +#undef CHILD +#undef CTL +#undef INDEX + +/******************************************************************************/ + +#ifdef JEMALLOC_STATS +static bool +ctl_arena_init(ctl_arena_stats_t *astats) +{ + + if (astats->bstats == NULL) { + astats->bstats = (malloc_bin_stats_t *)base_alloc(nbins * + sizeof(malloc_bin_stats_t)); + if (astats->bstats == NULL) + return (true); + } + if (astats->lstats == NULL) { + astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses * + sizeof(malloc_large_stats_t)); + if (astats->lstats == NULL) + return (true); + } + + return (false); +} +#endif + +static void +ctl_arena_clear(ctl_arena_stats_t *astats) +{ + + astats->pactive = 0; + astats->pdirty = 0; +#ifdef JEMALLOC_STATS + memset(&astats->astats, 0, sizeof(arena_stats_t)); + astats->allocated_small = 0; + astats->nmalloc_small = 0; + astats->ndalloc_small = 0; + astats->nrequests_small = 0; + memset(astats->bstats, 0, nbins * sizeof(malloc_bin_stats_t)); + memset(astats->lstats, 0, nlclasses * sizeof(malloc_large_stats_t)); +#endif +} + +#ifdef JEMALLOC_STATS +static void +ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena) +{ + unsigned i; + + arena_stats_merge(arena, &cstats->pactive, &cstats->pdirty, + &cstats->astats, cstats->bstats, cstats->lstats); + + for (i = 0; i < nbins; i++) { + cstats->allocated_small += cstats->bstats[i].allocated; + cstats->nmalloc_small += cstats->bstats[i].nmalloc; + cstats->ndalloc_small += cstats->bstats[i].ndalloc; + cstats->nrequests_small += cstats->bstats[i].nrequests; + } +} + +static void +ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) +{ + unsigned i; + + sstats->pactive += astats->pactive; + sstats->pdirty += astats->pdirty; + + sstats->astats.mapped += astats->astats.mapped; + sstats->astats.npurge += astats->astats.npurge; + sstats->astats.nmadvise += astats->astats.nmadvise; + sstats->astats.purged += astats->astats.purged; + + sstats->allocated_small += astats->allocated_small; + sstats->nmalloc_small += astats->nmalloc_small; + sstats->ndalloc_small += astats->ndalloc_small; + sstats->nrequests_small += astats->nrequests_small; + + sstats->astats.allocated_large += astats->astats.allocated_large; + sstats->astats.nmalloc_large += astats->astats.nmalloc_large; + sstats->astats.ndalloc_large += astats->astats.ndalloc_large; + sstats->astats.nrequests_large += astats->astats.nrequests_large; + + for (i = 0; i < nlclasses; i++) { + sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc; + sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc; + sstats->lstats[i].nrequests += astats->lstats[i].nrequests; + sstats->lstats[i].highruns += astats->lstats[i].highruns; + sstats->lstats[i].curruns += astats->lstats[i].curruns; + } + + for (i = 0; i < nbins; i++) { + sstats->bstats[i].allocated += astats->bstats[i].allocated; + sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; + sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; + sstats->bstats[i].nrequests += astats->bstats[i].nrequests; +#ifdef JEMALLOC_TCACHE + sstats->bstats[i].nfills += astats->bstats[i].nfills; + sstats->bstats[i].nflushes += astats->bstats[i].nflushes; +#endif + sstats->bstats[i].nruns += astats->bstats[i].nruns; + sstats->bstats[i].reruns += astats->bstats[i].reruns; + sstats->bstats[i].highruns += astats->bstats[i].highruns; + sstats->bstats[i].curruns += astats->bstats[i].curruns; + } +} +#endif + +static void +ctl_arena_refresh(arena_t *arena, unsigned i) +{ + ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; + ctl_arena_stats_t *sstats = &ctl_stats.arenas[narenas]; + + ctl_arena_clear(astats); + + sstats->nthreads += astats->nthreads; +#ifdef JEMALLOC_STATS + ctl_arena_stats_amerge(astats, arena); + /* Merge into sum stats as well. */ + ctl_arena_stats_smerge(sstats, astats); +#else + astats->pactive += arena->nactive; + astats->pdirty += arena->ndirty; + /* Merge into sum stats as well. */ + sstats->pactive += arena->nactive; + sstats->pdirty += arena->ndirty; +#endif +} + +static void +ctl_refresh(void) +{ + unsigned i; + arena_t *tarenas[narenas]; + +#ifdef JEMALLOC_STATS + malloc_mutex_lock(&chunks_mtx); + ctl_stats.chunks.current = stats_chunks.curchunks; + ctl_stats.chunks.total = stats_chunks.nchunks; + ctl_stats.chunks.high = stats_chunks.highchunks; + malloc_mutex_unlock(&chunks_mtx); + + malloc_mutex_lock(&huge_mtx); + ctl_stats.huge.allocated = huge_allocated; + ctl_stats.huge.nmalloc = huge_nmalloc; + ctl_stats.huge.ndalloc = huge_ndalloc; + malloc_mutex_unlock(&huge_mtx); +#endif + + /* + * Clear sum stats, since they will be merged into by + * ctl_arena_refresh(). + */ + ctl_stats.arenas[narenas].nthreads = 0; + ctl_arena_clear(&ctl_stats.arenas[narenas]); + + malloc_mutex_lock(&arenas_lock); + memcpy(tarenas, arenas, sizeof(arena_t *) * narenas); + for (i = 0; i < narenas; i++) { + if (arenas[i] != NULL) + ctl_stats.arenas[i].nthreads = arenas[i]->nthreads; + else + ctl_stats.arenas[i].nthreads = 0; + } + malloc_mutex_unlock(&arenas_lock); + for (i = 0; i < narenas; i++) { + bool initialized = (tarenas[i] != NULL); + + ctl_stats.arenas[i].initialized = initialized; + if (initialized) + ctl_arena_refresh(tarenas[i], i); + } + +#ifdef JEMALLOC_STATS + ctl_stats.allocated = ctl_stats.arenas[narenas].allocated_small + + ctl_stats.arenas[narenas].astats.allocated_large + + ctl_stats.huge.allocated; + ctl_stats.active = (ctl_stats.arenas[narenas].pactive << PAGE_SHIFT) + + ctl_stats.huge.allocated; + ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk); + +# ifdef JEMALLOC_SWAP + malloc_mutex_lock(&swap_mtx); + ctl_stats.swap_avail = swap_avail; + malloc_mutex_unlock(&swap_mtx); +# endif +#endif + + ctl_epoch++; +} + +static bool +ctl_init(void) +{ + bool ret; + + malloc_mutex_lock(&ctl_mtx); + if (ctl_initialized == false) { +#ifdef JEMALLOC_STATS + unsigned i; +#endif + + /* + * Allocate space for one extra arena stats element, which + * contains summed stats across all arenas. + */ + ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc( + (narenas + 1) * sizeof(ctl_arena_stats_t)); + if (ctl_stats.arenas == NULL) { + ret = true; + goto RETURN; + } + memset(ctl_stats.arenas, 0, (narenas + 1) * + sizeof(ctl_arena_stats_t)); + + /* + * Initialize all stats structures, regardless of whether they + * ever get used. Lazy initialization would allow errors to + * cause inconsistent state to be viewable by the application. + */ +#ifdef JEMALLOC_STATS + for (i = 0; i <= narenas; i++) { + if (ctl_arena_init(&ctl_stats.arenas[i])) { + ret = true; + goto RETURN; + } + } +#endif + ctl_stats.arenas[narenas].initialized = true; + + ctl_epoch = 0; + ctl_refresh(); + ctl_initialized = true; + } + + ret = false; +RETURN: + malloc_mutex_unlock(&ctl_mtx); + return (ret); +} + +static int +ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, + size_t *depthp) +{ + int ret; + const char *elm, *tdot, *dot; + size_t elen, i, j; + const ctl_node_t *node; + + elm = name; + /* Equivalent to strchrnul(). */ + dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); + elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); + if (elen == 0) { + ret = ENOENT; + goto RETURN; + } + node = super_root_node; + for (i = 0; i < *depthp; i++) { + assert(node->named); + assert(node->u.named.nchildren > 0); + if (node->u.named.children[0].named) { + const ctl_node_t *pnode = node; + + /* Children are named. */ + for (j = 0; j < node->u.named.nchildren; j++) { + const ctl_node_t *child = + &node->u.named.children[j]; + if (strlen(child->u.named.name) == elen + && strncmp(elm, child->u.named.name, + elen) == 0) { + node = child; + if (nodesp != NULL) + nodesp[i] = node; + mibp[i] = j; + break; + } + } + if (node == pnode) { + ret = ENOENT; + goto RETURN; + } + } else { + unsigned long index; + const ctl_node_t *inode; + + /* Children are indexed. */ + index = strtoul(elm, NULL, 10); + if (index == ULONG_MAX) { + ret = ENOENT; + goto RETURN; + } + + inode = &node->u.named.children[0]; + node = inode->u.indexed.index(mibp, *depthp, + index); + if (node == NULL) { + ret = ENOENT; + goto RETURN; + } + + if (nodesp != NULL) + nodesp[i] = node; + mibp[i] = (size_t)index; + } + + if (node->ctl != NULL) { + /* Terminal node. */ + if (*dot != '\0') { + /* + * The name contains more elements than are + * in this path through the tree. + */ + ret = ENOENT; + goto RETURN; + } + /* Complete lookup successful. */ + *depthp = i + 1; + break; + } + + /* Update elm. */ + if (*dot == '\0') { + /* No more elements. */ + ret = ENOENT; + goto RETURN; + } + elm = &dot[1]; + dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : + strchr(elm, '\0'); + elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); + } + + ret = 0; +RETURN: + return (ret); +} + +int +ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) +{ + int ret; + size_t depth; + ctl_node_t const *nodes[CTL_MAX_DEPTH]; + size_t mib[CTL_MAX_DEPTH]; + + if (ctl_initialized == false && ctl_init()) { + ret = EAGAIN; + goto RETURN; + } + + depth = CTL_MAX_DEPTH; + ret = ctl_lookup(name, nodes, mib, &depth); + if (ret != 0) + goto RETURN; + + if (nodes[depth-1]->ctl == NULL) { + /* The name refers to a partial path through the ctl tree. */ + ret = ENOENT; + goto RETURN; + } + + ret = nodes[depth-1]->ctl(mib, depth, oldp, oldlenp, newp, newlen); +RETURN: + return(ret); +} + +int +ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp) +{ + int ret; + + if (ctl_initialized == false && ctl_init()) { + ret = EAGAIN; + goto RETURN; + } + + ret = ctl_lookup(name, NULL, mibp, miblenp); +RETURN: + return(ret); +} + +int +ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + const ctl_node_t *node; + size_t i; + + if (ctl_initialized == false && ctl_init()) { + ret = EAGAIN; + goto RETURN; + } + + /* Iterate down the tree. */ + node = super_root_node; + for (i = 0; i < miblen; i++) { + if (node->u.named.children[0].named) { + /* Children are named. */ + if (node->u.named.nchildren <= mib[i]) { + ret = ENOENT; + goto RETURN; + } + node = &node->u.named.children[mib[i]]; + } else { + const ctl_node_t *inode; + + /* Indexed element. */ + inode = &node->u.named.children[0]; + node = inode->u.indexed.index(mib, miblen, mib[i]); + if (node == NULL) { + ret = ENOENT; + goto RETURN; + } + } + } + + /* Call the ctl function. */ + if (node->ctl == NULL) { + /* Partial MIB. */ + ret = ENOENT; + goto RETURN; + } + ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen); + +RETURN: + return(ret); +} + +bool +ctl_boot(void) +{ + + if (malloc_mutex_init(&ctl_mtx)) + return (true); + + ctl_initialized = false; + + return (false); +} + +/******************************************************************************/ +/* *_ctl() functions. */ + +#define READONLY() do { \ + if (newp != NULL || newlen != 0) { \ + ret = EPERM; \ + goto RETURN; \ + } \ +} while (0) + +#define WRITEONLY() do { \ + if (oldp != NULL || oldlenp != NULL) { \ + ret = EPERM; \ + goto RETURN; \ + } \ +} while (0) + +#define VOID() do { \ + READONLY(); \ + WRITEONLY(); \ +} while (0) + +#define READ(v, t) do { \ + if (oldp != NULL && oldlenp != NULL) { \ + if (*oldlenp != sizeof(t)) { \ + size_t copylen = (sizeof(t) <= *oldlenp) \ + ? sizeof(t) : *oldlenp; \ + memcpy(oldp, (void *)&v, copylen); \ + ret = EINVAL; \ + goto RETURN; \ + } else \ + *(t *)oldp = v; \ + } \ +} while (0) + +#define WRITE(v, t) do { \ + if (newp != NULL) { \ + if (newlen != sizeof(t)) { \ + ret = EINVAL; \ + goto RETURN; \ + } \ + v = *(t *)newp; \ + } \ +} while (0) + +#define CTL_RO_GEN(n, v, t) \ +static int \ +n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ + void *newp, size_t newlen) \ +{ \ + int ret; \ + t oldval; \ + \ + malloc_mutex_lock(&ctl_mtx); \ + READONLY(); \ + oldval = v; \ + READ(oldval, t); \ + \ + ret = 0; \ +RETURN: \ + malloc_mutex_unlock(&ctl_mtx); \ + return (ret); \ +} + +/* + * ctl_mtx is not acquired, under the assumption that no pertinent data will + * mutate during the call. + */ +#define CTL_RO_NL_GEN(n, v, t) \ +static int \ +n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ + void *newp, size_t newlen) \ +{ \ + int ret; \ + t oldval; \ + \ + READONLY(); \ + oldval = v; \ + READ(oldval, t); \ + \ + ret = 0; \ +RETURN: \ + return (ret); \ +} + +#define CTL_RO_TRUE_GEN(n) \ +static int \ +n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ + void *newp, size_t newlen) \ +{ \ + int ret; \ + bool oldval; \ + \ + READONLY(); \ + oldval = true; \ + READ(oldval, bool); \ + \ + ret = 0; \ +RETURN: \ + return (ret); \ +} + +#define CTL_RO_FALSE_GEN(n) \ +static int \ +n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ + void *newp, size_t newlen) \ +{ \ + int ret; \ + bool oldval; \ + \ + READONLY(); \ + oldval = false; \ + READ(oldval, bool); \ + \ + ret = 0; \ +RETURN: \ + return (ret); \ +} + +CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) + +static int +epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + uint64_t newval; + + malloc_mutex_lock(&ctl_mtx); + newval = 0; + WRITE(newval, uint64_t); + if (newval != 0) + ctl_refresh(); + READ(ctl_epoch, uint64_t); + + ret = 0; +RETURN: + malloc_mutex_unlock(&ctl_mtx); + return (ret); +} + +#ifdef JEMALLOC_TCACHE +static int +tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + tcache_t *tcache; + + VOID(); + + tcache = TCACHE_GET(); + if (tcache == NULL) { + ret = 0; + goto RETURN; + } + tcache_destroy(tcache); + TCACHE_SET(NULL); + + ret = 0; +RETURN: + return (ret); +} +#endif + +static int +thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + unsigned newind, oldind; + + newind = oldind = choose_arena()->ind; + WRITE(newind, unsigned); + READ(oldind, unsigned); + if (newind != oldind) { + arena_t *arena; + + if (newind >= narenas) { + /* New arena index is out of range. */ + ret = EFAULT; + goto RETURN; + } + + /* Initialize arena if necessary. */ + malloc_mutex_lock(&arenas_lock); + if ((arena = arenas[newind]) == NULL) + arena = arenas_extend(newind); + arenas[oldind]->nthreads--; + arenas[newind]->nthreads++; + malloc_mutex_unlock(&arenas_lock); + if (arena == NULL) { + ret = EAGAIN; + goto RETURN; + } + + /* Set new arena association. */ + ARENA_SET(arena); +#ifdef JEMALLOC_TCACHE + { + tcache_t *tcache = TCACHE_GET(); + if (tcache != NULL) + tcache->arena = arena; + } +#endif + } + + ret = 0; +RETURN: + return (ret); +} + +#ifdef JEMALLOC_STATS +CTL_RO_NL_GEN(thread_allocated, ALLOCATED_GET(), uint64_t); +CTL_RO_NL_GEN(thread_allocatedp, ALLOCATEDP_GET(), uint64_t *); +CTL_RO_NL_GEN(thread_deallocated, DEALLOCATED_GET(), uint64_t); +CTL_RO_NL_GEN(thread_deallocatedp, DEALLOCATEDP_GET(), uint64_t *); +#endif + +/******************************************************************************/ + +#ifdef JEMALLOC_DEBUG +CTL_RO_TRUE_GEN(config_debug) +#else +CTL_RO_FALSE_GEN(config_debug) +#endif + +#ifdef JEMALLOC_DSS +CTL_RO_TRUE_GEN(config_dss) +#else +CTL_RO_FALSE_GEN(config_dss) +#endif + +#ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT +CTL_RO_TRUE_GEN(config_dynamic_page_shift) +#else +CTL_RO_FALSE_GEN(config_dynamic_page_shift) +#endif + +#ifdef JEMALLOC_FILL +CTL_RO_TRUE_GEN(config_fill) +#else +CTL_RO_FALSE_GEN(config_fill) +#endif + +#ifdef JEMALLOC_LAZY_LOCK +CTL_RO_TRUE_GEN(config_lazy_lock) +#else +CTL_RO_FALSE_GEN(config_lazy_lock) +#endif + +#ifdef JEMALLOC_PROF +CTL_RO_TRUE_GEN(config_prof) +#else +CTL_RO_FALSE_GEN(config_prof) +#endif + +#ifdef JEMALLOC_PROF_LIBGCC +CTL_RO_TRUE_GEN(config_prof_libgcc) +#else +CTL_RO_FALSE_GEN(config_prof_libgcc) +#endif + +#ifdef JEMALLOC_PROF_LIBUNWIND +CTL_RO_TRUE_GEN(config_prof_libunwind) +#else +CTL_RO_FALSE_GEN(config_prof_libunwind) +#endif + +#ifdef JEMALLOC_STATS +CTL_RO_TRUE_GEN(config_stats) +#else +CTL_RO_FALSE_GEN(config_stats) +#endif + +#ifdef JEMALLOC_SWAP +CTL_RO_TRUE_GEN(config_swap) +#else +CTL_RO_FALSE_GEN(config_swap) +#endif + +#ifdef JEMALLOC_SYSV +CTL_RO_TRUE_GEN(config_sysv) +#else +CTL_RO_FALSE_GEN(config_sysv) +#endif + +#ifdef JEMALLOC_TCACHE +CTL_RO_TRUE_GEN(config_tcache) +#else +CTL_RO_FALSE_GEN(config_tcache) +#endif + +#ifdef JEMALLOC_TINY +CTL_RO_TRUE_GEN(config_tiny) +#else +CTL_RO_FALSE_GEN(config_tiny) +#endif + +#ifdef JEMALLOC_TLS +CTL_RO_TRUE_GEN(config_tls) +#else +CTL_RO_FALSE_GEN(config_tls) +#endif + +#ifdef JEMALLOC_XMALLOC +CTL_RO_TRUE_GEN(config_xmalloc) +#else +CTL_RO_FALSE_GEN(config_xmalloc) +#endif + +/******************************************************************************/ + +CTL_RO_NL_GEN(opt_abort, opt_abort, bool) +CTL_RO_NL_GEN(opt_lg_qspace_max, opt_lg_qspace_max, size_t) +CTL_RO_NL_GEN(opt_lg_cspace_max, opt_lg_cspace_max, size_t) +CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) +CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t) +CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) +CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) +#ifdef JEMALLOC_FILL +CTL_RO_NL_GEN(opt_junk, opt_junk, bool) +CTL_RO_NL_GEN(opt_zero, opt_zero, bool) +#endif +#ifdef JEMALLOC_SYSV +CTL_RO_NL_GEN(opt_sysv, opt_sysv, bool) +#endif +#ifdef JEMALLOC_XMALLOC +CTL_RO_NL_GEN(opt_xmalloc, opt_xmalloc, bool) +#endif +#ifdef JEMALLOC_TCACHE +CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool) +CTL_RO_NL_GEN(opt_lg_tcache_gc_sweep, opt_lg_tcache_gc_sweep, ssize_t) +#endif +#ifdef JEMALLOC_PROF +CTL_RO_NL_GEN(opt_prof, opt_prof, bool) +CTL_RO_NL_GEN(opt_prof_prefix, opt_prof_prefix, const char *) +CTL_RO_GEN(opt_prof_active, opt_prof_active, bool) /* Mutable. */ +CTL_RO_NL_GEN(opt_lg_prof_bt_max, opt_lg_prof_bt_max, size_t) +CTL_RO_NL_GEN(opt_lg_prof_sample, opt_lg_prof_sample, size_t) +CTL_RO_NL_GEN(opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) +CTL_RO_NL_GEN(opt_prof_gdump, opt_prof_gdump, bool) +CTL_RO_NL_GEN(opt_prof_leak, opt_prof_leak, bool) +CTL_RO_NL_GEN(opt_prof_accum, opt_prof_accum, bool) +CTL_RO_NL_GEN(opt_lg_prof_tcmax, opt_lg_prof_tcmax, ssize_t) +#endif +#ifdef JEMALLOC_SWAP +CTL_RO_NL_GEN(opt_overcommit, opt_overcommit, bool) +#endif + +/******************************************************************************/ + +CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) +CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) +CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) +const ctl_node_t * +arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i) +{ + + if (i > nbins) + return (NULL); + return (super_arenas_bin_i_node); +} + +CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << PAGE_SHIFT), size_t) +const ctl_node_t * +arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) +{ + + if (i > nlclasses) + return (NULL); + return (super_arenas_lrun_i_node); +} + +CTL_RO_NL_GEN(arenas_narenas, narenas, unsigned) + +static int +arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + unsigned nread, i; + + malloc_mutex_lock(&ctl_mtx); + READONLY(); + if (*oldlenp != narenas * sizeof(bool)) { + ret = EINVAL; + nread = (*oldlenp < narenas * sizeof(bool)) + ? (*oldlenp / sizeof(bool)) : narenas; + } else { + ret = 0; + nread = narenas; + } + + for (i = 0; i < nread; i++) + ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized; + +RETURN: + malloc_mutex_unlock(&ctl_mtx); + return (ret); +} + +CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) +CTL_RO_NL_GEN(arenas_cacheline, CACHELINE, size_t) +CTL_RO_NL_GEN(arenas_subpage, SUBPAGE, size_t) +CTL_RO_NL_GEN(arenas_pagesize, PAGE_SIZE, size_t) +CTL_RO_NL_GEN(arenas_chunksize, chunksize, size_t) +#ifdef JEMALLOC_TINY +CTL_RO_NL_GEN(arenas_tspace_min, (1U << LG_TINY_MIN), size_t) +CTL_RO_NL_GEN(arenas_tspace_max, (qspace_min >> 1), size_t) +#endif +CTL_RO_NL_GEN(arenas_qspace_min, qspace_min, size_t) +CTL_RO_NL_GEN(arenas_qspace_max, qspace_max, size_t) +CTL_RO_NL_GEN(arenas_cspace_min, cspace_min, size_t) +CTL_RO_NL_GEN(arenas_cspace_max, cspace_max, size_t) +CTL_RO_NL_GEN(arenas_sspace_min, sspace_min, size_t) +CTL_RO_NL_GEN(arenas_sspace_max, sspace_max, size_t) +#ifdef JEMALLOC_TCACHE +CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t) +#endif +CTL_RO_NL_GEN(arenas_ntbins, ntbins, unsigned) +CTL_RO_NL_GEN(arenas_nqbins, nqbins, unsigned) +CTL_RO_NL_GEN(arenas_ncbins, ncbins, unsigned) +CTL_RO_NL_GEN(arenas_nsbins, nsbins, unsigned) +CTL_RO_NL_GEN(arenas_nbins, nbins, unsigned) +#ifdef JEMALLOC_TCACHE +CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned) +#endif +CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t) + +static int +arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + unsigned arena; + + WRITEONLY(); + arena = UINT_MAX; + WRITE(arena, unsigned); + if (newp != NULL && arena >= narenas) { + ret = EFAULT; + goto RETURN; + } else { + arena_t *tarenas[narenas]; + + malloc_mutex_lock(&arenas_lock); + memcpy(tarenas, arenas, sizeof(arena_t *) * narenas); + malloc_mutex_unlock(&arenas_lock); + + if (arena == UINT_MAX) { + unsigned i; + for (i = 0; i < narenas; i++) { + if (tarenas[i] != NULL) + arena_purge_all(tarenas[i]); + } + } else { + assert(arena < narenas); + if (tarenas[arena] != NULL) + arena_purge_all(tarenas[arena]); + } + } + + ret = 0; +RETURN: + return (ret); +} + +/******************************************************************************/ + +#ifdef JEMALLOC_PROF +static int +prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + bool oldval; + + malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */ + oldval = opt_prof_active; + if (newp != NULL) { + /* + * The memory barriers will tend to make opt_prof_active + * propagate faster on systems with weak memory ordering. + */ + mb_write(); + WRITE(opt_prof_active, bool); + mb_write(); + } + READ(oldval, bool); + + ret = 0; +RETURN: + malloc_mutex_unlock(&ctl_mtx); + return (ret); +} + +static int +prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + const char *filename = NULL; + + WRITEONLY(); + WRITE(filename, const char *); + + if (prof_mdump(filename)) { + ret = EFAULT; + goto RETURN; + } + + ret = 0; +RETURN: + return (ret); +} + +CTL_RO_NL_GEN(prof_interval, prof_interval, uint64_t) +#endif + +/******************************************************************************/ + +#ifdef JEMALLOC_STATS +CTL_RO_GEN(stats_chunks_current, ctl_stats.chunks.current, size_t) +CTL_RO_GEN(stats_chunks_total, ctl_stats.chunks.total, uint64_t) +CTL_RO_GEN(stats_chunks_high, ctl_stats.chunks.high, size_t) +CTL_RO_GEN(stats_huge_allocated, huge_allocated, size_t) +CTL_RO_GEN(stats_huge_nmalloc, huge_nmalloc, uint64_t) +CTL_RO_GEN(stats_huge_ndalloc, huge_ndalloc, uint64_t) +CTL_RO_GEN(stats_arenas_i_small_allocated, + ctl_stats.arenas[mib[2]].allocated_small, size_t) +CTL_RO_GEN(stats_arenas_i_small_nmalloc, + ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t) +CTL_RO_GEN(stats_arenas_i_small_ndalloc, + ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t) +CTL_RO_GEN(stats_arenas_i_small_nrequests, + ctl_stats.arenas[mib[2]].nrequests_small, uint64_t) +CTL_RO_GEN(stats_arenas_i_large_allocated, + ctl_stats.arenas[mib[2]].astats.allocated_large, size_t) +CTL_RO_GEN(stats_arenas_i_large_nmalloc, + ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t) +CTL_RO_GEN(stats_arenas_i_large_ndalloc, + ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t) +CTL_RO_GEN(stats_arenas_i_large_nrequests, + ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t) + +CTL_RO_GEN(stats_arenas_i_bins_j_allocated, + ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t) +CTL_RO_GEN(stats_arenas_i_bins_j_nmalloc, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t) +CTL_RO_GEN(stats_arenas_i_bins_j_ndalloc, + ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t) +CTL_RO_GEN(stats_arenas_i_bins_j_nrequests, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t) +#ifdef JEMALLOC_TCACHE +CTL_RO_GEN(stats_arenas_i_bins_j_nfills, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t) +CTL_RO_GEN(stats_arenas_i_bins_j_nflushes, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t) +#endif +CTL_RO_GEN(stats_arenas_i_bins_j_nruns, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t) +CTL_RO_GEN(stats_arenas_i_bins_j_nreruns, + ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t) +CTL_RO_GEN(stats_arenas_i_bins_j_highruns, + ctl_stats.arenas[mib[2]].bstats[mib[4]].highruns, size_t) +CTL_RO_GEN(stats_arenas_i_bins_j_curruns, + ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t) + +const ctl_node_t * +stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j) +{ + + if (j > nbins) + return (NULL); + return (super_stats_arenas_i_bins_j_node); +} + +CTL_RO_GEN(stats_arenas_i_lruns_j_nmalloc, + ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t) +CTL_RO_GEN(stats_arenas_i_lruns_j_ndalloc, + ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t) +CTL_RO_GEN(stats_arenas_i_lruns_j_nrequests, + ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t) +CTL_RO_GEN(stats_arenas_i_lruns_j_curruns, + ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t) +CTL_RO_GEN(stats_arenas_i_lruns_j_highruns, + ctl_stats.arenas[mib[2]].lstats[mib[4]].highruns, size_t) + +const ctl_node_t * +stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j) +{ + + if (j > nlclasses) + return (NULL); + return (super_stats_arenas_i_lruns_j_node); +} + +#endif +CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) +CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) +CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) +#ifdef JEMALLOC_STATS +CTL_RO_GEN(stats_arenas_i_mapped, ctl_stats.arenas[mib[2]].astats.mapped, + size_t) +CTL_RO_GEN(stats_arenas_i_npurge, ctl_stats.arenas[mib[2]].astats.npurge, + uint64_t) +CTL_RO_GEN(stats_arenas_i_nmadvise, ctl_stats.arenas[mib[2]].astats.nmadvise, + uint64_t) +CTL_RO_GEN(stats_arenas_i_purged, ctl_stats.arenas[mib[2]].astats.purged, + uint64_t) +#endif + +const ctl_node_t * +stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) +{ + const ctl_node_t * ret; + + malloc_mutex_lock(&ctl_mtx); + if (ctl_stats.arenas[i].initialized == false) { + ret = NULL; + goto RETURN; + } + + ret = super_stats_arenas_i_node; +RETURN: + malloc_mutex_unlock(&ctl_mtx); + return (ret); +} + +#ifdef JEMALLOC_STATS +CTL_RO_GEN(stats_cactive, &stats_cactive, size_t *) +CTL_RO_GEN(stats_allocated, ctl_stats.allocated, size_t) +CTL_RO_GEN(stats_active, ctl_stats.active, size_t) +CTL_RO_GEN(stats_mapped, ctl_stats.mapped, size_t) +#endif + +/******************************************************************************/ + +#ifdef JEMALLOC_SWAP +# ifdef JEMALLOC_STATS +CTL_RO_GEN(swap_avail, ctl_stats.swap_avail, size_t) +# endif + +static int +swap_prezeroed_ctl(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + + malloc_mutex_lock(&ctl_mtx); + if (swap_enabled) { + READONLY(); + } else { + /* + * swap_prezeroed isn't actually used by the swap code until it + * is set during a successful chunk_swap_enabled() call. We + * use it here to store the value that we'll pass to + * chunk_swap_enable() in a swap.fds mallctl(). This is not + * very clean, but the obvious alternatives are even worse. + */ + WRITE(swap_prezeroed, bool); + } + + READ(swap_prezeroed, bool); + + ret = 0; +RETURN: + malloc_mutex_unlock(&ctl_mtx); + return (ret); +} + +CTL_RO_GEN(swap_nfds, swap_nfds, size_t) + +static int +swap_fds_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + + malloc_mutex_lock(&ctl_mtx); + if (swap_enabled) { + READONLY(); + } else if (newp != NULL) { + size_t nfds = newlen / sizeof(int); + + { + int fds[nfds]; + + memcpy(fds, newp, nfds * sizeof(int)); + if (chunk_swap_enable(fds, nfds, swap_prezeroed)) { + ret = EFAULT; + goto RETURN; + } + } + } + + if (oldp != NULL && oldlenp != NULL) { + if (*oldlenp != swap_nfds * sizeof(int)) { + size_t copylen = (swap_nfds * sizeof(int) <= *oldlenp) + ? swap_nfds * sizeof(int) : *oldlenp; + + memcpy(oldp, swap_fds, copylen); + ret = EINVAL; + goto RETURN; + } else + memcpy(oldp, swap_fds, *oldlenp); + } + + ret = 0; +RETURN: + malloc_mutex_unlock(&ctl_mtx); + return (ret); +} +#endif diff --git a/src/extent.c b/src/extent.c new file mode 100644 index 0000000..3c04d3a --- /dev/null +++ b/src/extent.c @@ -0,0 +1,41 @@ +#define JEMALLOC_EXTENT_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ + +#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS)) +static inline int +extent_szad_comp(extent_node_t *a, extent_node_t *b) +{ + int ret; + size_t a_size = a->size; + size_t b_size = b->size; + + ret = (a_size > b_size) - (a_size < b_size); + if (ret == 0) { + uintptr_t a_addr = (uintptr_t)a->addr; + uintptr_t b_addr = (uintptr_t)b->addr; + + ret = (a_addr > b_addr) - (a_addr < b_addr); + } + + return (ret); +} + +/* Generate red-black tree functions. */ +rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad, + extent_szad_comp) +#endif + +static inline int +extent_ad_comp(extent_node_t *a, extent_node_t *b) +{ + uintptr_t a_addr = (uintptr_t)a->addr; + uintptr_t b_addr = (uintptr_t)b->addr; + + return ((a_addr > b_addr) - (a_addr < b_addr)); +} + +/* Generate red-black tree functions. */ +rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad, + extent_ad_comp) diff --git a/src/hash.c b/src/hash.c new file mode 100644 index 0000000..cfa4da0 --- /dev/null +++ b/src/hash.c @@ -0,0 +1,2 @@ +#define JEMALLOC_HASH_C_ +#include "jemalloc/internal/jemalloc_internal.h" diff --git a/src/huge.c b/src/huge.c new file mode 100644 index 0000000..ac3f3a0 --- /dev/null +++ b/src/huge.c @@ -0,0 +1,379 @@ +#define JEMALLOC_HUGE_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +#ifdef JEMALLOC_STATS +uint64_t huge_nmalloc; +uint64_t huge_ndalloc; +size_t huge_allocated; +#endif + +malloc_mutex_t huge_mtx; + +/******************************************************************************/ + +/* Tree of chunks that are stand-alone huge allocations. */ +static extent_tree_t huge; + +void * +huge_malloc(size_t size, bool zero) +{ + void *ret; + size_t csize; + extent_node_t *node; + + /* Allocate one or more contiguous chunks for this request. */ + + csize = CHUNK_CEILING(size); + if (csize == 0) { + /* size is large enough to cause size_t wrap-around. */ + return (NULL); + } + + /* Allocate an extent node with which to track the chunk. */ + node = base_node_alloc(); + if (node == NULL) + return (NULL); + + ret = chunk_alloc(csize, false, &zero); + if (ret == NULL) { + base_node_dealloc(node); + return (NULL); + } + + /* Insert node into huge. */ + node->addr = ret; + node->size = csize; + + malloc_mutex_lock(&huge_mtx); + extent_tree_ad_insert(&huge, node); +#ifdef JEMALLOC_STATS + stats_cactive_add(csize); + huge_nmalloc++; + huge_allocated += csize; +#endif + malloc_mutex_unlock(&huge_mtx); + +#ifdef JEMALLOC_FILL + if (zero == false) { + if (opt_junk) + memset(ret, 0xa5, csize); + else if (opt_zero) + memset(ret, 0, csize); + } +#endif + + return (ret); +} + +/* Only handles large allocations that require more than chunk alignment. */ +void * +huge_palloc(size_t size, size_t alignment, bool zero) +{ + void *ret; + size_t alloc_size, chunk_size, offset; + extent_node_t *node; + + /* + * This allocation requires alignment that is even larger than chunk + * alignment. This means that huge_malloc() isn't good enough. + * + * Allocate almost twice as many chunks as are demanded by the size or + * alignment, in order to assure the alignment can be achieved, then + * unmap leading and trailing chunks. + */ + assert(alignment > chunksize); + + chunk_size = CHUNK_CEILING(size); + + if (size >= alignment) + alloc_size = chunk_size + alignment - chunksize; + else + alloc_size = (alignment << 1) - chunksize; + + /* Allocate an extent node with which to track the chunk. */ + node = base_node_alloc(); + if (node == NULL) + return (NULL); + + ret = chunk_alloc(alloc_size, false, &zero); + if (ret == NULL) { + base_node_dealloc(node); + return (NULL); + } + + offset = (uintptr_t)ret & (alignment - 1); + assert((offset & chunksize_mask) == 0); + assert(offset < alloc_size); + if (offset == 0) { + /* Trim trailing space. */ + chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size + - chunk_size); + } else { + size_t trailsize; + + /* Trim leading space. */ + chunk_dealloc(ret, alignment - offset); + + ret = (void *)((uintptr_t)ret + (alignment - offset)); + + trailsize = alloc_size - (alignment - offset) - chunk_size; + if (trailsize != 0) { + /* Trim trailing space. */ + assert(trailsize < alloc_size); + chunk_dealloc((void *)((uintptr_t)ret + chunk_size), + trailsize); + } + } + + /* Insert node into huge. */ + node->addr = ret; + node->size = chunk_size; + + malloc_mutex_lock(&huge_mtx); + extent_tree_ad_insert(&huge, node); +#ifdef JEMALLOC_STATS + stats_cactive_add(chunk_size); + huge_nmalloc++; + huge_allocated += chunk_size; +#endif + malloc_mutex_unlock(&huge_mtx); + +#ifdef JEMALLOC_FILL + if (zero == false) { + if (opt_junk) + memset(ret, 0xa5, chunk_size); + else if (opt_zero) + memset(ret, 0, chunk_size); + } +#endif + + return (ret); +} + +void * +huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra) +{ + + /* + * Avoid moving the allocation if the size class can be left the same. + */ + if (oldsize > arena_maxclass + && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size) + && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) { + assert(CHUNK_CEILING(oldsize) == oldsize); +#ifdef JEMALLOC_FILL + if (opt_junk && size < oldsize) { + memset((void *)((uintptr_t)ptr + size), 0x5a, + oldsize - size); + } +#endif + return (ptr); + } + + /* Reallocation would require a move. */ + return (NULL); +} + +void * +huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, + size_t alignment, bool zero) +{ + void *ret; + size_t copysize; + + /* Try to avoid moving the allocation. */ + ret = huge_ralloc_no_move(ptr, oldsize, size, extra); + if (ret != NULL) + return (ret); + + /* + * size and oldsize are different enough that we need to use a + * different size class. In that case, fall back to allocating new + * space and copying. + */ + if (alignment > chunksize) + ret = huge_palloc(size + extra, alignment, zero); + else + ret = huge_malloc(size + extra, zero); + + if (ret == NULL) { + if (extra == 0) + return (NULL); + /* Try again, this time without extra. */ + if (alignment > chunksize) + ret = huge_palloc(size, alignment, zero); + else + ret = huge_malloc(size, zero); + + if (ret == NULL) + return (NULL); + } + + /* + * Copy at most size bytes (not size+extra), since the caller has no + * expectation that the extra bytes will be reliably preserved. + */ + copysize = (size < oldsize) ? size : oldsize; + + /* + * Use mremap(2) if this is a huge-->huge reallocation, and neither the + * source nor the destination are in swap or dss. + */ +#ifdef JEMALLOC_MREMAP_FIXED + if (oldsize >= chunksize +# ifdef JEMALLOC_SWAP + && (swap_enabled == false || (chunk_in_swap(ptr) == false && + chunk_in_swap(ret) == false)) +# endif +# ifdef JEMALLOC_DSS + && chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false +# endif + ) { + size_t newsize = huge_salloc(ret); + + if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED, + ret) == MAP_FAILED) { + /* + * Assuming no chunk management bugs in the allocator, + * the only documented way an error can occur here is + * if the application changed the map type for a + * portion of the old allocation. This is firmly in + * undefined behavior territory, so write a diagnostic + * message, and optionally abort. + */ + char buf[BUFERROR_BUF]; + + buferror(errno, buf, sizeof(buf)); + malloc_write("<jemalloc>: Error in mremap(): "); + malloc_write(buf); + malloc_write("\n"); + if (opt_abort) + abort(); + memcpy(ret, ptr, copysize); + idalloc(ptr); + } else + huge_dalloc(ptr, false); + } else +#endif + { + memcpy(ret, ptr, copysize); + idalloc(ptr); + } + return (ret); +} + +void +huge_dalloc(void *ptr, bool unmap) +{ + extent_node_t *node, key; + + malloc_mutex_lock(&huge_mtx); + + /* Extract from tree of huge allocations. */ + key.addr = ptr; + node = extent_tree_ad_search(&huge, &key); + assert(node != NULL); + assert(node->addr == ptr); + extent_tree_ad_remove(&huge, node); + +#ifdef JEMALLOC_STATS + stats_cactive_sub(node->size); + huge_ndalloc++; + huge_allocated -= node->size; +#endif + + malloc_mutex_unlock(&huge_mtx); + + if (unmap) { + /* Unmap chunk. */ +#ifdef JEMALLOC_FILL +#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS)) + if (opt_junk) + memset(node->addr, 0x5a, node->size); +#endif +#endif + chunk_dealloc(node->addr, node->size); + } + + base_node_dealloc(node); +} + +size_t +huge_salloc(const void *ptr) +{ + size_t ret; + extent_node_t *node, key; + + malloc_mutex_lock(&huge_mtx); + + /* Extract from tree of huge allocations. */ + key.addr = __DECONST(void *, ptr); + node = extent_tree_ad_search(&huge, &key); + assert(node != NULL); + + ret = node->size; + + malloc_mutex_unlock(&huge_mtx); + + return (ret); +} + +#ifdef JEMALLOC_PROF +prof_ctx_t * +huge_prof_ctx_get(const void *ptr) +{ + prof_ctx_t *ret; + extent_node_t *node, key; + + malloc_mutex_lock(&huge_mtx); + + /* Extract from tree of huge allocations. */ + key.addr = __DECONST(void *, ptr); + node = extent_tree_ad_search(&huge, &key); + assert(node != NULL); + + ret = node->prof_ctx; + + malloc_mutex_unlock(&huge_mtx); + + return (ret); +} + +void +huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) +{ + extent_node_t *node, key; + + malloc_mutex_lock(&huge_mtx); + + /* Extract from tree of huge allocations. */ + key.addr = __DECONST(void *, ptr); + node = extent_tree_ad_search(&huge, &key); + assert(node != NULL); + + node->prof_ctx = ctx; + + malloc_mutex_unlock(&huge_mtx); +} +#endif + +bool +huge_boot(void) +{ + + /* Initialize chunks data. */ + if (malloc_mutex_init(&huge_mtx)) + return (true); + extent_tree_ad_new(&huge); + +#ifdef JEMALLOC_STATS + huge_nmalloc = 0; + huge_ndalloc = 0; + huge_allocated = 0; +#endif + + return (false); +} diff --git a/src/jemalloc.c b/src/jemalloc.c new file mode 100644 index 0000000..e287516 --- /dev/null +++ b/src/jemalloc.c @@ -0,0 +1,1847 @@ +#define JEMALLOC_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +malloc_mutex_t arenas_lock; +arena_t **arenas; +unsigned narenas; + +pthread_key_t arenas_tsd; +#ifndef NO_TLS +__thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec")); +#endif + +#ifdef JEMALLOC_STATS +# ifndef NO_TLS +__thread thread_allocated_t thread_allocated_tls; +# else +pthread_key_t thread_allocated_tsd; +# endif +#endif + +/* Set to true once the allocator has been initialized. */ +static bool malloc_initialized = false; + +/* Used to let the initializing thread recursively allocate. */ +static pthread_t malloc_initializer = (unsigned long)0; + +/* Used to avoid initialization races. */ +static malloc_mutex_t init_lock = +#ifdef JEMALLOC_OSSPIN + 0 +#else + MALLOC_MUTEX_INITIALIZER +#endif + ; + +#ifdef DYNAMIC_PAGE_SHIFT +size_t pagesize; +size_t pagesize_mask; +size_t lg_pagesize; +#endif + +unsigned ncpus; + +/* Runtime configuration options. */ +const char *JEMALLOC_P(malloc_conf) JEMALLOC_ATTR(visibility("default")); +#ifdef JEMALLOC_DEBUG +bool opt_abort = true; +# ifdef JEMALLOC_FILL +bool opt_junk = true; +# endif +#else +bool opt_abort = false; +# ifdef JEMALLOC_FILL +bool opt_junk = false; +# endif +#endif +#ifdef JEMALLOC_SYSV +bool opt_sysv = false; +#endif +#ifdef JEMALLOC_XMALLOC +bool opt_xmalloc = false; +#endif +#ifdef JEMALLOC_FILL +bool opt_zero = false; +#endif +size_t opt_narenas = 0; + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static void wrtmessage(void *cbopaque, const char *s); +static void stats_print_atexit(void); +static unsigned malloc_ncpus(void); +static void arenas_cleanup(void *arg); +#if (defined(JEMALLOC_STATS) && defined(NO_TLS)) +static void thread_allocated_cleanup(void *arg); +#endif +static bool malloc_conf_next(char const **opts_p, char const **k_p, + size_t *klen_p, char const **v_p, size_t *vlen_p); +static void malloc_conf_error(const char *msg, const char *k, size_t klen, + const char *v, size_t vlen); +static void malloc_conf_init(void); +static bool malloc_init_hard(void); + +/******************************************************************************/ +/* malloc_message() setup. */ + +#ifdef JEMALLOC_HAVE_ATTR +JEMALLOC_ATTR(visibility("hidden")) +#else +static +#endif +void +wrtmessage(void *cbopaque, const char *s) +{ +#ifdef JEMALLOC_CC_SILENCE + int result = +#endif + write(STDERR_FILENO, s, strlen(s)); +#ifdef JEMALLOC_CC_SILENCE + if (result < 0) + result = errno; +#endif +} + +void (*JEMALLOC_P(malloc_message))(void *, const char *s) + JEMALLOC_ATTR(visibility("default")) = wrtmessage; + +/******************************************************************************/ +/* + * Begin miscellaneous support functions. + */ + +/* Create a new arena and insert it into the arenas array at index ind. */ +arena_t * +arenas_extend(unsigned ind) +{ + arena_t *ret; + + /* Allocate enough space for trailing bins. */ + ret = (arena_t *)base_alloc(offsetof(arena_t, bins) + + (sizeof(arena_bin_t) * nbins)); + if (ret != NULL && arena_new(ret, ind) == false) { + arenas[ind] = ret; + return (ret); + } + /* Only reached if there is an OOM error. */ + + /* + * OOM here is quite inconvenient to propagate, since dealing with it + * would require a check for failure in the fast path. Instead, punt + * by using arenas[0]. In practice, this is an extremely unlikely + * failure. + */ + malloc_write("<jemalloc>: Error initializing arena\n"); + if (opt_abort) + abort(); + + return (arenas[0]); +} + +/* + * Choose an arena based on a per-thread value (slow-path code only, called + * only by choose_arena()). + */ +arena_t * +choose_arena_hard(void) +{ + arena_t *ret; + + if (narenas > 1) { + unsigned i, choose, first_null; + + choose = 0; + first_null = narenas; + malloc_mutex_lock(&arenas_lock); + assert(arenas[0] != NULL); + for (i = 1; i < narenas; i++) { + if (arenas[i] != NULL) { + /* + * Choose the first arena that has the lowest + * number of threads assigned to it. + */ + if (arenas[i]->nthreads < + arenas[choose]->nthreads) + choose = i; + } else if (first_null == narenas) { + /* + * Record the index of the first uninitialized + * arena, in case all extant arenas are in use. + * + * NB: It is possible for there to be + * discontinuities in terms of initialized + * versus uninitialized arenas, due to the + * "thread.arena" mallctl. + */ + first_null = i; + } + } + + if (arenas[choose] == 0 || first_null == narenas) { + /* + * Use an unloaded arena, or the least loaded arena if + * all arenas are already initialized. + */ + ret = arenas[choose]; + } else { + /* Initialize a new arena. */ + ret = arenas_extend(first_null); + } + ret->nthreads++; + malloc_mutex_unlock(&arenas_lock); + } else { + ret = arenas[0]; + malloc_mutex_lock(&arenas_lock); + ret->nthreads++; + malloc_mutex_unlock(&arenas_lock); + } + + ARENA_SET(ret); + + return (ret); +} + +/* + * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so + * provide a wrapper. + */ +int +buferror(int errnum, char *buf, size_t buflen) +{ +#ifdef _GNU_SOURCE + char *b = strerror_r(errno, buf, buflen); + if (b != buf) { + strncpy(buf, b, buflen); + buf[buflen-1] = '\0'; + } + return (0); +#else + return (strerror_r(errno, buf, buflen)); +#endif +} + +static void +stats_print_atexit(void) +{ + +#if (defined(JEMALLOC_TCACHE) && defined(JEMALLOC_STATS)) + unsigned i; + + /* + * Merge stats from extant threads. This is racy, since individual + * threads do not lock when recording tcache stats events. As a + * consequence, the final stats may be slightly out of date by the time + * they are reported, if other threads continue to allocate. + */ + for (i = 0; i < narenas; i++) { + arena_t *arena = arenas[i]; + if (arena != NULL) { + tcache_t *tcache; + + /* + * tcache_stats_merge() locks bins, so if any code is + * introduced that acquires both arena and bin locks in + * the opposite order, deadlocks may result. + */ + malloc_mutex_lock(&arena->lock); + ql_foreach(tcache, &arena->tcache_ql, link) { + tcache_stats_merge(tcache, arena); + } + malloc_mutex_unlock(&arena->lock); + } + } +#endif + JEMALLOC_P(malloc_stats_print)(NULL, NULL, NULL); +} + +#if (defined(JEMALLOC_STATS) && defined(NO_TLS)) +thread_allocated_t * +thread_allocated_get_hard(void) +{ + thread_allocated_t *thread_allocated = (thread_allocated_t *) + imalloc(sizeof(thread_allocated_t)); + if (thread_allocated == NULL) { + static thread_allocated_t static_thread_allocated = {0, 0}; + malloc_write("<jemalloc>: Error allocating TSD;" + " mallctl(\"thread.{de,}allocated[p]\", ...)" + " will be inaccurate\n"); + if (opt_abort) + abort(); + return (&static_thread_allocated); + } + pthread_setspecific(thread_allocated_tsd, thread_allocated); + thread_allocated->allocated = 0; + thread_allocated->deallocated = 0; + return (thread_allocated); +} +#endif + +/* + * End miscellaneous support functions. + */ +/******************************************************************************/ +/* + * Begin initialization functions. + */ + +static unsigned +malloc_ncpus(void) +{ + unsigned ret; + long result; + + result = sysconf(_SC_NPROCESSORS_ONLN); + if (result == -1) { + /* Error. */ + ret = 1; + } + ret = (unsigned)result; + + return (ret); +} + +static void +arenas_cleanup(void *arg) +{ + arena_t *arena = (arena_t *)arg; + + malloc_mutex_lock(&arenas_lock); + arena->nthreads--; + malloc_mutex_unlock(&arenas_lock); +} + +#if (defined(JEMALLOC_STATS) && defined(NO_TLS)) +static void +thread_allocated_cleanup(void *arg) +{ + uint64_t *allocated = (uint64_t *)arg; + + if (allocated != NULL) + idalloc(allocated); +} +#endif + +/* + * FreeBSD's pthreads implementation calls malloc(3), so the malloc + * implementation has to take pains to avoid infinite recursion during + * initialization. + */ +static inline bool +malloc_init(void) +{ + + if (malloc_initialized == false) + return (malloc_init_hard()); + + return (false); +} + +static bool +malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, + char const **v_p, size_t *vlen_p) +{ + bool accept; + const char *opts = *opts_p; + + *k_p = opts; + + for (accept = false; accept == false;) { + switch (*opts) { + case 'A': case 'B': case 'C': case 'D': case 'E': + case 'F': case 'G': case 'H': case 'I': case 'J': + case 'K': case 'L': case 'M': case 'N': case 'O': + case 'P': case 'Q': case 'R': case 'S': case 'T': + case 'U': case 'V': case 'W': case 'X': case 'Y': + case 'Z': + case 'a': case 'b': case 'c': case 'd': case 'e': + case 'f': case 'g': case 'h': case 'i': case 'j': + case 'k': case 'l': case 'm': case 'n': case 'o': + case 'p': case 'q': case 'r': case 's': case 't': + case 'u': case 'v': case 'w': case 'x': case 'y': + case 'z': + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + case '_': + opts++; + break; + case ':': + opts++; + *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; + *v_p = opts; + accept = true; + break; + case '\0': + if (opts != *opts_p) { + malloc_write("<jemalloc>: Conf string " + "ends with key\n"); + } + return (true); + default: + malloc_write("<jemalloc>: Malformed conf " + "string\n"); + return (true); + } + } + + for (accept = false; accept == false;) { + switch (*opts) { + case ',': + opts++; + /* + * Look ahead one character here, because the + * next time this function is called, it will + * assume that end of input has been cleanly + * reached if no input remains, but we have + * optimistically already consumed the comma if + * one exists. + */ + if (*opts == '\0') { + malloc_write("<jemalloc>: Conf string " + "ends with comma\n"); + } + *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; + accept = true; + break; + case '\0': + *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; + accept = true; + break; + default: + opts++; + break; + } + } + + *opts_p = opts; + return (false); +} + +static void +malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, + size_t vlen) +{ + char buf[PATH_MAX + 1]; + + malloc_write("<jemalloc>: "); + malloc_write(msg); + malloc_write(": "); + memcpy(buf, k, klen); + memcpy(&buf[klen], ":", 1); + memcpy(&buf[klen+1], v, vlen); + buf[klen+1+vlen] = '\0'; + malloc_write(buf); + malloc_write("\n"); +} + +static void +malloc_conf_init(void) +{ + unsigned i; + char buf[PATH_MAX + 1]; + const char *opts, *k, *v; + size_t klen, vlen; + + for (i = 0; i < 3; i++) { + /* Get runtime configuration. */ + switch (i) { + case 0: + if (JEMALLOC_P(malloc_conf) != NULL) { + /* + * Use options that were compiled into the + * program. + */ + opts = JEMALLOC_P(malloc_conf); + } else { + /* No configuration specified. */ + buf[0] = '\0'; + opts = buf; + } + break; + case 1: { + int linklen; + const char *linkname = +#ifdef JEMALLOC_PREFIX + "/etc/"JEMALLOC_PREFIX"malloc.conf" +#else + "/etc/malloc.conf" +#endif + ; + + if ((linklen = readlink(linkname, buf, + sizeof(buf) - 1)) != -1) { + /* + * Use the contents of the "/etc/malloc.conf" + * symbolic link's name. + */ + buf[linklen] = '\0'; + opts = buf; + } else { + /* No configuration specified. */ + buf[0] = '\0'; + opts = buf; + } + break; + } + case 2: { + const char *envname = +#ifdef JEMALLOC_PREFIX + JEMALLOC_CPREFIX"MALLOC_CONF" +#else + "MALLOC_CONF" +#endif + ; + + if ((opts = getenv(envname)) != NULL) { + /* + * Do nothing; opts is already initialized to + * the value of the MALLOC_CONF environment + * variable. + */ + } else { + /* No configuration specified. */ + buf[0] = '\0'; + opts = buf; + } + break; + } + default: + /* NOTREACHED */ + assert(false); + buf[0] = '\0'; + opts = buf; + } + + while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v, + &vlen) == false) { +#define CONF_HANDLE_BOOL(n) \ + if (sizeof(#n)-1 == klen && strncmp(#n, k, \ + klen) == 0) { \ + if (strncmp("true", v, vlen) == 0 && \ + vlen == sizeof("true")-1) \ + opt_##n = true; \ + else if (strncmp("false", v, vlen) == \ + 0 && vlen == sizeof("false")-1) \ + opt_##n = false; \ + else { \ + malloc_conf_error( \ + "Invalid conf value", \ + k, klen, v, vlen); \ + } \ + continue; \ + } +#define CONF_HANDLE_SIZE_T(n, min, max) \ + if (sizeof(#n)-1 == klen && strncmp(#n, k, \ + klen) == 0) { \ + unsigned long ul; \ + char *end; \ + \ + errno = 0; \ + ul = strtoul(v, &end, 0); \ + if (errno != 0 || (uintptr_t)end - \ + (uintptr_t)v != vlen) { \ + malloc_conf_error( \ + "Invalid conf value", \ + k, klen, v, vlen); \ + } else if (ul < min || ul > max) { \ + malloc_conf_error( \ + "Out-of-range conf value", \ + k, klen, v, vlen); \ + } else \ + opt_##n = ul; \ + continue; \ + } +#define CONF_HANDLE_SSIZE_T(n, min, max) \ + if (sizeof(#n)-1 == klen && strncmp(#n, k, \ + klen) == 0) { \ + long l; \ + char *end; \ + \ + errno = 0; \ + l = strtol(v, &end, 0); \ + if (errno != 0 || (uintptr_t)end - \ + (uintptr_t)v != vlen) { \ + malloc_conf_error( \ + "Invalid conf value", \ + k, klen, v, vlen); \ + } else if (l < (ssize_t)min || l > \ + (ssize_t)max) { \ + malloc_conf_error( \ + "Out-of-range conf value", \ + k, klen, v, vlen); \ + } else \ + opt_##n = l; \ + continue; \ + } +#define CONF_HANDLE_CHAR_P(n, d) \ + if (sizeof(#n)-1 == klen && strncmp(#n, k, \ + klen) == 0) { \ + size_t cpylen = (vlen <= \ + sizeof(opt_##n)-1) ? vlen : \ + sizeof(opt_##n)-1; \ + strncpy(opt_##n, v, cpylen); \ + opt_##n[cpylen] = '\0'; \ + continue; \ + } + + CONF_HANDLE_BOOL(abort) + CONF_HANDLE_SIZE_T(lg_qspace_max, LG_QUANTUM, + PAGE_SHIFT-1) + CONF_HANDLE_SIZE_T(lg_cspace_max, LG_QUANTUM, + PAGE_SHIFT-1) + /* + * Chunks always require at least one * header page, + * plus one data page. + */ + CONF_HANDLE_SIZE_T(lg_chunk, PAGE_SHIFT+1, + (sizeof(size_t) << 3) - 1) + CONF_HANDLE_SIZE_T(narenas, 1, SIZE_T_MAX) + CONF_HANDLE_SSIZE_T(lg_dirty_mult, -1, + (sizeof(size_t) << 3) - 1) + CONF_HANDLE_BOOL(stats_print) +#ifdef JEMALLOC_FILL + CONF_HANDLE_BOOL(junk) + CONF_HANDLE_BOOL(zero) +#endif +#ifdef JEMALLOC_SYSV + CONF_HANDLE_BOOL(sysv) +#endif +#ifdef JEMALLOC_XMALLOC + CONF_HANDLE_BOOL(xmalloc) +#endif +#ifdef JEMALLOC_TCACHE + CONF_HANDLE_BOOL(tcache) + CONF_HANDLE_SSIZE_T(lg_tcache_gc_sweep, -1, + (sizeof(size_t) << 3) - 1) + CONF_HANDLE_SSIZE_T(lg_tcache_max, -1, + (sizeof(size_t) << 3) - 1) +#endif +#ifdef JEMALLOC_PROF + CONF_HANDLE_BOOL(prof) + CONF_HANDLE_CHAR_P(prof_prefix, "jeprof") + CONF_HANDLE_SIZE_T(lg_prof_bt_max, 0, LG_PROF_BT_MAX) + CONF_HANDLE_BOOL(prof_active) + CONF_HANDLE_SSIZE_T(lg_prof_sample, 0, + (sizeof(uint64_t) << 3) - 1) + CONF_HANDLE_BOOL(prof_accum) + CONF_HANDLE_SSIZE_T(lg_prof_tcmax, -1, + (sizeof(size_t) << 3) - 1) + CONF_HANDLE_SSIZE_T(lg_prof_interval, -1, + (sizeof(uint64_t) << 3) - 1) + CONF_HANDLE_BOOL(prof_gdump) + CONF_HANDLE_BOOL(prof_leak) +#endif +#ifdef JEMALLOC_SWAP + CONF_HANDLE_BOOL(overcommit) +#endif + malloc_conf_error("Invalid conf pair", k, klen, v, + vlen); +#undef CONF_HANDLE_BOOL +#undef CONF_HANDLE_SIZE_T +#undef CONF_HANDLE_SSIZE_T +#undef CONF_HANDLE_CHAR_P + } + + /* Validate configuration of options that are inter-related. */ + if (opt_lg_qspace_max+1 >= opt_lg_cspace_max) { + malloc_write("<jemalloc>: Invalid lg_[qc]space_max " + "relationship; restoring defaults\n"); + opt_lg_qspace_max = LG_QSPACE_MAX_DEFAULT; + opt_lg_cspace_max = LG_CSPACE_MAX_DEFAULT; + } + } +} + +static bool +malloc_init_hard(void) +{ + arena_t *init_arenas[1]; + + malloc_mutex_lock(&init_lock); + if (malloc_initialized || malloc_initializer == pthread_self()) { + /* + * Another thread initialized the allocator before this one + * acquired init_lock, or this thread is the initializing + * thread, and it is recursively allocating. + */ + malloc_mutex_unlock(&init_lock); + return (false); + } + if (malloc_initializer != (unsigned long)0) { + /* Busy-wait until the initializing thread completes. */ + do { + malloc_mutex_unlock(&init_lock); + CPU_SPINWAIT; + malloc_mutex_lock(&init_lock); + } while (malloc_initialized == false); + malloc_mutex_unlock(&init_lock); + return (false); + } + +#ifdef DYNAMIC_PAGE_SHIFT + /* Get page size. */ + { + long result; + + result = sysconf(_SC_PAGESIZE); + assert(result != -1); + pagesize = (unsigned)result; + + /* + * We assume that pagesize is a power of 2 when calculating + * pagesize_mask and lg_pagesize. + */ + assert(((result - 1) & result) == 0); + pagesize_mask = result - 1; + lg_pagesize = ffs((int)result) - 1; + } +#endif + +#ifdef JEMALLOC_PROF + prof_boot0(); +#endif + + malloc_conf_init(); + + /* Register fork handlers. */ + if (pthread_atfork(jemalloc_prefork, jemalloc_postfork, + jemalloc_postfork) != 0) { + malloc_write("<jemalloc>: Error in pthread_atfork()\n"); + if (opt_abort) + abort(); + } + + if (ctl_boot()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + + if (opt_stats_print) { + /* Print statistics at exit. */ + if (atexit(stats_print_atexit) != 0) { + malloc_write("<jemalloc>: Error in atexit()\n"); + if (opt_abort) + abort(); + } + } + + if (chunk_boot()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + + if (base_boot()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + +#ifdef JEMALLOC_PROF + prof_boot1(); +#endif + + if (arena_boot()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + +#ifdef JEMALLOC_TCACHE + if (tcache_boot()) { + malloc_mutex_unlock(&init_lock); + return (true); + } +#endif + + if (huge_boot()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + +#if (defined(JEMALLOC_STATS) && defined(NO_TLS)) + /* Initialize allocation counters before any allocations can occur. */ + if (pthread_key_create(&thread_allocated_tsd, thread_allocated_cleanup) + != 0) { + malloc_mutex_unlock(&init_lock); + return (true); + } +#endif + + /* + * Create enough scaffolding to allow recursive allocation in + * malloc_ncpus(). + */ + narenas = 1; + arenas = init_arenas; + memset(arenas, 0, sizeof(arena_t *) * narenas); + + /* + * Initialize one arena here. The rest are lazily created in + * choose_arena_hard(). + */ + arenas_extend(0); + if (arenas[0] == NULL) { + malloc_mutex_unlock(&init_lock); + return (true); + } + + /* + * Assign the initial arena to the initial thread, in order to avoid + * spurious creation of an extra arena if the application switches to + * threaded mode. + */ + ARENA_SET(arenas[0]); + arenas[0]->nthreads++; + + if (malloc_mutex_init(&arenas_lock)) + return (true); + + if (pthread_key_create(&arenas_tsd, arenas_cleanup) != 0) { + malloc_mutex_unlock(&init_lock); + return (true); + } + +#ifdef JEMALLOC_PROF + if (prof_boot2()) { + malloc_mutex_unlock(&init_lock); + return (true); + } +#endif + + /* Get number of CPUs. */ + malloc_initializer = pthread_self(); + malloc_mutex_unlock(&init_lock); + ncpus = malloc_ncpus(); + malloc_mutex_lock(&init_lock); + + if (opt_narenas == 0) { + /* + * For SMP systems, create more than one arena per CPU by + * default. + */ + if (ncpus > 1) + opt_narenas = ncpus << 2; + else + opt_narenas = 1; + } + narenas = opt_narenas; + /* + * Make sure that the arenas array can be allocated. In practice, this + * limit is enough to allow the allocator to function, but the ctl + * machinery will fail to allocate memory at far lower limits. + */ + if (narenas > chunksize / sizeof(arena_t *)) { + char buf[UMAX2S_BUFSIZE]; + + narenas = chunksize / sizeof(arena_t *); + malloc_write("<jemalloc>: Reducing narenas to limit ("); + malloc_write(u2s(narenas, 10, buf)); + malloc_write(")\n"); + } + + /* Allocate and initialize arenas. */ + arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas); + if (arenas == NULL) { + malloc_mutex_unlock(&init_lock); + return (true); + } + /* + * Zero the array. In practice, this should always be pre-zeroed, + * since it was just mmap()ed, but let's be sure. + */ + memset(arenas, 0, sizeof(arena_t *) * narenas); + /* Copy the pointer to the one arena that was already initialized. */ + arenas[0] = init_arenas[0]; + +#ifdef JEMALLOC_ZONE + /* Register the custom zone. */ + malloc_zone_register(create_zone()); + + /* + * Convert the default szone to an "overlay zone" that is capable of + * deallocating szone-allocated objects, but allocating new objects + * from jemalloc. + */ + szone2ozone(malloc_default_zone()); +#endif + + malloc_initialized = true; + malloc_mutex_unlock(&init_lock); + return (false); +} + +#ifdef JEMALLOC_ZONE +JEMALLOC_ATTR(constructor) +void +jemalloc_darwin_init(void) +{ + + if (malloc_init_hard()) + abort(); +} +#endif + +/* + * End initialization functions. + */ +/******************************************************************************/ +/* + * Begin malloc(3)-compatible functions. + */ + +JEMALLOC_ATTR(malloc) +JEMALLOC_ATTR(visibility("default")) +void * +JEMALLOC_P(malloc)(size_t size) +{ + void *ret; +#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS)) + size_t usize +# ifdef JEMALLOC_CC_SILENCE + = 0 +# endif + ; +#endif +#ifdef JEMALLOC_PROF + prof_thr_cnt_t *cnt +# ifdef JEMALLOC_CC_SILENCE + = NULL +# endif + ; +#endif + + if (malloc_init()) { + ret = NULL; + goto OOM; + } + + if (size == 0) { +#ifdef JEMALLOC_SYSV + if (opt_sysv == false) +#endif + size = 1; +#ifdef JEMALLOC_SYSV + else { +# ifdef JEMALLOC_XMALLOC + if (opt_xmalloc) { + malloc_write("<jemalloc>: Error in malloc(): " + "invalid size 0\n"); + abort(); + } +# endif + ret = NULL; + goto RETURN; + } +#endif + } + +#ifdef JEMALLOC_PROF + if (opt_prof) { + usize = s2u(size); + if ((cnt = prof_alloc_prep(usize)) == NULL) { + ret = NULL; + goto OOM; + } + if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= + small_maxclass) { + ret = imalloc(small_maxclass+1); + if (ret != NULL) + arena_prof_promoted(ret, usize); + } else + ret = imalloc(size); + } else +#endif + { +#ifdef JEMALLOC_STATS + usize = s2u(size); +#endif + ret = imalloc(size); + } + +OOM: + if (ret == NULL) { +#ifdef JEMALLOC_XMALLOC + if (opt_xmalloc) { + malloc_write("<jemalloc>: Error in malloc(): " + "out of memory\n"); + abort(); + } +#endif + errno = ENOMEM; + } + +#ifdef JEMALLOC_SYSV +RETURN: +#endif +#ifdef JEMALLOC_PROF + if (opt_prof && ret != NULL) + prof_malloc(ret, usize, cnt); +#endif +#ifdef JEMALLOC_STATS + if (ret != NULL) { + assert(usize == isalloc(ret)); + ALLOCATED_ADD(usize, 0); + } +#endif + return (ret); +} + +JEMALLOC_ATTR(nonnull(1)) +JEMALLOC_ATTR(visibility("default")) +int +JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size) +{ + int ret; + size_t usize +#ifdef JEMALLOC_CC_SILENCE + = 0 +#endif + ; + void *result; +#ifdef JEMALLOC_PROF + prof_thr_cnt_t *cnt +# ifdef JEMALLOC_CC_SILENCE + = NULL +# endif + ; +#endif + + if (malloc_init()) + result = NULL; + else { + if (size == 0) { +#ifdef JEMALLOC_SYSV + if (opt_sysv == false) +#endif + size = 1; +#ifdef JEMALLOC_SYSV + else { +# ifdef JEMALLOC_XMALLOC + if (opt_xmalloc) { + malloc_write("<jemalloc>: Error in " + "posix_memalign(): invalid size " + "0\n"); + abort(); + } +# endif + result = NULL; + *memptr = NULL; + ret = 0; + goto RETURN; + } +#endif + } + + /* Make sure that alignment is a large enough power of 2. */ + if (((alignment - 1) & alignment) != 0 + || alignment < sizeof(void *)) { +#ifdef JEMALLOC_XMALLOC + if (opt_xmalloc) { + malloc_write("<jemalloc>: Error in " + "posix_memalign(): invalid alignment\n"); + abort(); + } +#endif + result = NULL; + ret = EINVAL; + goto RETURN; + } + + usize = sa2u(size, alignment, NULL); + if (usize == 0) { + result = NULL; + ret = ENOMEM; + goto RETURN; + } + +#ifdef JEMALLOC_PROF + if (opt_prof) { + if ((cnt = prof_alloc_prep(usize)) == NULL) { + result = NULL; + ret = EINVAL; + } else { + if (prof_promote && (uintptr_t)cnt != + (uintptr_t)1U && usize <= small_maxclass) { + assert(sa2u(small_maxclass+1, + alignment, NULL) != 0); + result = ipalloc(sa2u(small_maxclass+1, + alignment, NULL), alignment, false); + if (result != NULL) { + arena_prof_promoted(result, + usize); + } + } else { + result = ipalloc(usize, alignment, + false); + } + } + } else +#endif + result = ipalloc(usize, alignment, false); + } + + if (result == NULL) { +#ifdef JEMALLOC_XMALLOC + if (opt_xmalloc) { + malloc_write("<jemalloc>: Error in posix_memalign(): " + "out of memory\n"); + abort(); + } +#endif + ret = ENOMEM; + goto RETURN; + } + + *memptr = result; + ret = 0; + +RETURN: +#ifdef JEMALLOC_STATS + if (result != NULL) { + assert(usize == isalloc(result)); + ALLOCATED_ADD(usize, 0); + } +#endif +#ifdef JEMALLOC_PROF + if (opt_prof && result != NULL) + prof_malloc(result, usize, cnt); +#endif + return (ret); +} + +JEMALLOC_ATTR(malloc) +JEMALLOC_ATTR(visibility("default")) +void * +JEMALLOC_P(calloc)(size_t num, size_t size) +{ + void *ret; + size_t num_size; +#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS)) + size_t usize +# ifdef JEMALLOC_CC_SILENCE + = 0 +# endif + ; +#endif +#ifdef JEMALLOC_PROF + prof_thr_cnt_t *cnt +# ifdef JEMALLOC_CC_SILENCE + = NULL +# endif + ; +#endif + + if (malloc_init()) { + num_size = 0; + ret = NULL; + goto RETURN; + } + + num_size = num * size; + if (num_size == 0) { +#ifdef JEMALLOC_SYSV + if ((opt_sysv == false) && ((num == 0) || (size == 0))) +#endif + num_size = 1; +#ifdef JEMALLOC_SYSV + else { + ret = NULL; + goto RETURN; + } +#endif + /* + * Try to avoid division here. We know that it isn't possible to + * overflow during multiplication if neither operand uses any of the + * most significant half of the bits in a size_t. + */ + } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2))) + && (num_size / size != num)) { + /* size_t overflow. */ + ret = NULL; + goto RETURN; + } + +#ifdef JEMALLOC_PROF + if (opt_prof) { + usize = s2u(num_size); + if ((cnt = prof_alloc_prep(usize)) == NULL) { + ret = NULL; + goto RETURN; + } + if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize + <= small_maxclass) { + ret = icalloc(small_maxclass+1); + if (ret != NULL) + arena_prof_promoted(ret, usize); + } else + ret = icalloc(num_size); + } else +#endif + { +#ifdef JEMALLOC_STATS + usize = s2u(num_size); +#endif + ret = icalloc(num_size); + } + +RETURN: + if (ret == NULL) { +#ifdef JEMALLOC_XMALLOC + if (opt_xmalloc) { + malloc_write("<jemalloc>: Error in calloc(): out of " + "memory\n"); + abort(); + } +#endif + errno = ENOMEM; + } + +#ifdef JEMALLOC_PROF + if (opt_prof && ret != NULL) + prof_malloc(ret, usize, cnt); +#endif +#ifdef JEMALLOC_STATS + if (ret != NULL) { + assert(usize == isalloc(ret)); + ALLOCATED_ADD(usize, 0); + } +#endif + return (ret); +} + +JEMALLOC_ATTR(visibility("default")) +void * +JEMALLOC_P(realloc)(void *ptr, size_t size) +{ + void *ret; +#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS)) + size_t usize +# ifdef JEMALLOC_CC_SILENCE + = 0 +# endif + ; + size_t old_size = 0; +#endif +#ifdef JEMALLOC_PROF + prof_thr_cnt_t *cnt +# ifdef JEMALLOC_CC_SILENCE + = NULL +# endif + ; + prof_ctx_t *old_ctx +# ifdef JEMALLOC_CC_SILENCE + = NULL +# endif + ; +#endif + + if (size == 0) { +#ifdef JEMALLOC_SYSV + if (opt_sysv == false) +#endif + size = 1; +#ifdef JEMALLOC_SYSV + else { + if (ptr != NULL) { +#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS)) + old_size = isalloc(ptr); +#endif +#ifdef JEMALLOC_PROF + if (opt_prof) { + old_ctx = prof_ctx_get(ptr); + cnt = NULL; + } +#endif + idalloc(ptr); + } +#ifdef JEMALLOC_PROF + else if (opt_prof) { + old_ctx = NULL; + cnt = NULL; + } +#endif + ret = NULL; + goto RETURN; + } +#endif + } + + if (ptr != NULL) { + assert(malloc_initialized || malloc_initializer == + pthread_self()); + +#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS)) + old_size = isalloc(ptr); +#endif +#ifdef JEMALLOC_PROF + if (opt_prof) { + usize = s2u(size); + old_ctx = prof_ctx_get(ptr); + if ((cnt = prof_alloc_prep(usize)) == NULL) { + ret = NULL; + goto OOM; + } + if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && + usize <= small_maxclass) { + ret = iralloc(ptr, small_maxclass+1, 0, 0, + false, false); + if (ret != NULL) + arena_prof_promoted(ret, usize); + } else + ret = iralloc(ptr, size, 0, 0, false, false); + } else +#endif + { +#ifdef JEMALLOC_STATS + usize = s2u(size); +#endif + ret = iralloc(ptr, size, 0, 0, false, false); + } + +#ifdef JEMALLOC_PROF +OOM: +#endif + if (ret == NULL) { +#ifdef JEMALLOC_XMALLOC + if (opt_xmalloc) { + malloc_write("<jemalloc>: Error in realloc(): " + "out of memory\n"); + abort(); + } +#endif + errno = ENOMEM; + } + } else { +#ifdef JEMALLOC_PROF + if (opt_prof) + old_ctx = NULL; +#endif + if (malloc_init()) { +#ifdef JEMALLOC_PROF + if (opt_prof) + cnt = NULL; +#endif + ret = NULL; + } else { +#ifdef JEMALLOC_PROF + if (opt_prof) { + usize = s2u(size); + if ((cnt = prof_alloc_prep(usize)) == NULL) + ret = NULL; + else { + if (prof_promote && (uintptr_t)cnt != + (uintptr_t)1U && usize <= + small_maxclass) { + ret = imalloc(small_maxclass+1); + if (ret != NULL) { + arena_prof_promoted(ret, + usize); + } + } else + ret = imalloc(size); + } + } else +#endif + { +#ifdef JEMALLOC_STATS + usize = s2u(size); +#endif + ret = imalloc(size); + } + } + + if (ret == NULL) { +#ifdef JEMALLOC_XMALLOC + if (opt_xmalloc) { + malloc_write("<jemalloc>: Error in realloc(): " + "out of memory\n"); + abort(); + } +#endif + errno = ENOMEM; + } + } + +#ifdef JEMALLOC_SYSV +RETURN: +#endif +#ifdef JEMALLOC_PROF + if (opt_prof) + prof_realloc(ret, usize, cnt, old_size, old_ctx); +#endif +#ifdef JEMALLOC_STATS + if (ret != NULL) { + assert(usize == isalloc(ret)); + ALLOCATED_ADD(usize, old_size); + } +#endif + return (ret); +} + +JEMALLOC_ATTR(visibility("default")) +void +JEMALLOC_P(free)(void *ptr) +{ + + if (ptr != NULL) { +#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS)) + size_t usize; +#endif + + assert(malloc_initialized || malloc_initializer == + pthread_self()); + +#ifdef JEMALLOC_STATS + usize = isalloc(ptr); +#endif +#ifdef JEMALLOC_PROF + if (opt_prof) { +# ifndef JEMALLOC_STATS + usize = isalloc(ptr); +# endif + prof_free(ptr, usize); + } +#endif +#ifdef JEMALLOC_STATS + ALLOCATED_ADD(0, usize); +#endif + idalloc(ptr); + } +} + +/* + * End malloc(3)-compatible functions. + */ +/******************************************************************************/ +/* + * Begin non-standard override functions. + * + * These overrides are omitted if the JEMALLOC_PREFIX is defined, since the + * entire point is to avoid accidental mixed allocator usage. + */ +#ifndef JEMALLOC_PREFIX + +#ifdef JEMALLOC_OVERRIDE_MEMALIGN +JEMALLOC_ATTR(malloc) +JEMALLOC_ATTR(visibility("default")) +void * +JEMALLOC_P(memalign)(size_t alignment, size_t size) +{ + void *ret; +#ifdef JEMALLOC_CC_SILENCE + int result = +#endif + JEMALLOC_P(posix_memalign)(&ret, alignment, size); +#ifdef JEMALLOC_CC_SILENCE + if (result != 0) + return (NULL); +#endif + return (ret); +} +#endif + +#ifdef JEMALLOC_OVERRIDE_VALLOC +JEMALLOC_ATTR(malloc) +JEMALLOC_ATTR(visibility("default")) +void * +JEMALLOC_P(valloc)(size_t size) +{ + void *ret; +#ifdef JEMALLOC_CC_SILENCE + int result = +#endif + JEMALLOC_P(posix_memalign)(&ret, PAGE_SIZE, size); +#ifdef JEMALLOC_CC_SILENCE + if (result != 0) + return (NULL); +#endif + return (ret); +} +#endif + +#endif /* JEMALLOC_PREFIX */ +/* + * End non-standard override functions. + */ +/******************************************************************************/ +/* + * Begin non-standard functions. + */ + +JEMALLOC_ATTR(visibility("default")) +size_t +JEMALLOC_P(malloc_usable_size)(const void *ptr) +{ + size_t ret; + + assert(malloc_initialized || malloc_initializer == pthread_self()); + +#ifdef JEMALLOC_IVSALLOC + ret = ivsalloc(ptr); +#else + assert(ptr != NULL); + ret = isalloc(ptr); +#endif + + return (ret); +} + +JEMALLOC_ATTR(visibility("default")) +void +JEMALLOC_P(malloc_stats_print)(void (*write_cb)(void *, const char *), + void *cbopaque, const char *opts) +{ + + stats_print(write_cb, cbopaque, opts); +} + +JEMALLOC_ATTR(visibility("default")) +int +JEMALLOC_P(mallctl)(const char *name, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) +{ + + if (malloc_init()) + return (EAGAIN); + + return (ctl_byname(name, oldp, oldlenp, newp, newlen)); +} + +JEMALLOC_ATTR(visibility("default")) +int +JEMALLOC_P(mallctlnametomib)(const char *name, size_t *mibp, size_t *miblenp) +{ + + if (malloc_init()) + return (EAGAIN); + + return (ctl_nametomib(name, mibp, miblenp)); +} + +JEMALLOC_ATTR(visibility("default")) +int +JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + + if (malloc_init()) + return (EAGAIN); + + return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); +} + +JEMALLOC_INLINE void * +iallocm(size_t usize, size_t alignment, bool zero) +{ + + assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment, + NULL))); + + if (alignment != 0) + return (ipalloc(usize, alignment, zero)); + else if (zero) + return (icalloc(usize)); + else + return (imalloc(usize)); +} + +JEMALLOC_ATTR(nonnull(1)) +JEMALLOC_ATTR(visibility("default")) +int +JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags) +{ + void *p; + size_t usize; + size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) + & (SIZE_T_MAX-1)); + bool zero = flags & ALLOCM_ZERO; +#ifdef JEMALLOC_PROF + prof_thr_cnt_t *cnt; +#endif + + assert(ptr != NULL); + assert(size != 0); + + if (malloc_init()) + goto OOM; + + usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, + NULL); + if (usize == 0) + goto OOM; + +#ifdef JEMALLOC_PROF + if (opt_prof) { + if ((cnt = prof_alloc_prep(usize)) == NULL) + goto OOM; + if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= + small_maxclass) { + size_t usize_promoted = (alignment == 0) ? + s2u(small_maxclass+1) : sa2u(small_maxclass+1, + alignment, NULL); + assert(usize_promoted != 0); + p = iallocm(usize_promoted, alignment, zero); + if (p == NULL) + goto OOM; + arena_prof_promoted(p, usize); + } else { + p = iallocm(usize, alignment, zero); + if (p == NULL) + goto OOM; + } + + if (rsize != NULL) + *rsize = usize; + } else +#endif + { + p = iallocm(usize, alignment, zero); + if (p == NULL) + goto OOM; +#ifndef JEMALLOC_STATS + if (rsize != NULL) +#endif + { +#ifdef JEMALLOC_STATS + if (rsize != NULL) +#endif + *rsize = usize; + } + } + + *ptr = p; +#ifdef JEMALLOC_STATS + assert(usize == isalloc(p)); + ALLOCATED_ADD(usize, 0); +#endif + return (ALLOCM_SUCCESS); +OOM: +#ifdef JEMALLOC_XMALLOC + if (opt_xmalloc) { + malloc_write("<jemalloc>: Error in allocm(): " + "out of memory\n"); + abort(); + } +#endif + *ptr = NULL; + return (ALLOCM_ERR_OOM); +} + +JEMALLOC_ATTR(nonnull(1)) +JEMALLOC_ATTR(visibility("default")) +int +JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size, size_t extra, + int flags) +{ + void *p, *q; + size_t usize; +#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS)) + size_t old_size; +#endif + size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) + & (SIZE_T_MAX-1)); + bool zero = flags & ALLOCM_ZERO; + bool no_move = flags & ALLOCM_NO_MOVE; +#ifdef JEMALLOC_PROF + prof_thr_cnt_t *cnt; + prof_ctx_t *old_ctx; +#endif + + assert(ptr != NULL); + assert(*ptr != NULL); + assert(size != 0); + assert(SIZE_T_MAX - size >= extra); + assert(malloc_initialized || malloc_initializer == pthread_self()); + + p = *ptr; +#ifdef JEMALLOC_PROF + if (opt_prof) { + /* + * usize isn't knowable before iralloc() returns when extra is + * non-zero. Therefore, compute its maximum possible value and + * use that in prof_alloc_prep() to decide whether to capture a + * backtrace. prof_realloc() will use the actual usize to + * decide whether to sample. + */ + size_t max_usize = (alignment == 0) ? s2u(size+extra) : + sa2u(size+extra, alignment, NULL); + old_size = isalloc(p); + old_ctx = prof_ctx_get(p); + if ((cnt = prof_alloc_prep(max_usize)) == NULL) + goto OOM; + if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && max_usize + <= small_maxclass) { + q = iralloc(p, small_maxclass+1, (small_maxclass+1 >= + size+extra) ? 0 : size+extra - (small_maxclass+1), + alignment, zero, no_move); + if (q == NULL) + goto ERR; + usize = isalloc(q); + arena_prof_promoted(q, usize); + } else { + q = iralloc(p, size, extra, alignment, zero, no_move); + if (q == NULL) + goto ERR; + usize = isalloc(q); + } + prof_realloc(q, usize, cnt, old_size, old_ctx); + if (rsize != NULL) + *rsize = usize; + } else +#endif + { +#ifdef JEMALLOC_STATS + old_size = isalloc(p); +#endif + q = iralloc(p, size, extra, alignment, zero, no_move); + if (q == NULL) + goto ERR; +#ifndef JEMALLOC_STATS + if (rsize != NULL) +#endif + { + usize = isalloc(q); +#ifdef JEMALLOC_STATS + if (rsize != NULL) +#endif + *rsize = usize; + } + } + + *ptr = q; +#ifdef JEMALLOC_STATS + ALLOCATED_ADD(usize, old_size); +#endif + return (ALLOCM_SUCCESS); +ERR: + if (no_move) + return (ALLOCM_ERR_NOT_MOVED); +#ifdef JEMALLOC_PROF +OOM: +#endif +#ifdef JEMALLOC_XMALLOC + if (opt_xmalloc) { + malloc_write("<jemalloc>: Error in rallocm(): " + "out of memory\n"); + abort(); + } +#endif + return (ALLOCM_ERR_OOM); +} + +JEMALLOC_ATTR(nonnull(1)) +JEMALLOC_ATTR(visibility("default")) +int +JEMALLOC_P(sallocm)(const void *ptr, size_t *rsize, int flags) +{ + size_t sz; + + assert(malloc_initialized || malloc_initializer == pthread_self()); + +#ifdef JEMALLOC_IVSALLOC + sz = ivsalloc(ptr); +#else + assert(ptr != NULL); + sz = isalloc(ptr); +#endif + assert(rsize != NULL); + *rsize = sz; + + return (ALLOCM_SUCCESS); +} + +JEMALLOC_ATTR(nonnull(1)) +JEMALLOC_ATTR(visibility("default")) +int +JEMALLOC_P(dallocm)(void *ptr, int flags) +{ +#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS)) + size_t usize; +#endif + + assert(ptr != NULL); + assert(malloc_initialized || malloc_initializer == pthread_self()); + +#ifdef JEMALLOC_STATS + usize = isalloc(ptr); +#endif +#ifdef JEMALLOC_PROF + if (opt_prof) { +# ifndef JEMALLOC_STATS + usize = isalloc(ptr); +# endif + prof_free(ptr, usize); + } +#endif +#ifdef JEMALLOC_STATS + ALLOCATED_ADD(0, usize); +#endif + idalloc(ptr); + + return (ALLOCM_SUCCESS); +} + +/* + * End non-standard functions. + */ +/******************************************************************************/ + +/* + * The following functions are used by threading libraries for protection of + * malloc during fork(). + */ + +void +jemalloc_prefork(void) +{ + unsigned i; + + /* Acquire all mutexes in a safe order. */ + + malloc_mutex_lock(&arenas_lock); + for (i = 0; i < narenas; i++) { + if (arenas[i] != NULL) + malloc_mutex_lock(&arenas[i]->lock); + } + + malloc_mutex_lock(&base_mtx); + + malloc_mutex_lock(&huge_mtx); + +#ifdef JEMALLOC_DSS + malloc_mutex_lock(&dss_mtx); +#endif + +#ifdef JEMALLOC_SWAP + malloc_mutex_lock(&swap_mtx); +#endif +} + +void +jemalloc_postfork(void) +{ + unsigned i; + + /* Release all mutexes, now that fork() has completed. */ + +#ifdef JEMALLOC_SWAP + malloc_mutex_unlock(&swap_mtx); +#endif + +#ifdef JEMALLOC_DSS + malloc_mutex_unlock(&dss_mtx); +#endif + + malloc_mutex_unlock(&huge_mtx); + + malloc_mutex_unlock(&base_mtx); + + for (i = 0; i < narenas; i++) { + if (arenas[i] != NULL) + malloc_mutex_unlock(&arenas[i]->lock); + } + malloc_mutex_unlock(&arenas_lock); +} + +/******************************************************************************/ diff --git a/src/mb.c b/src/mb.c new file mode 100644 index 0000000..dc2c0a2 --- /dev/null +++ b/src/mb.c @@ -0,0 +1,2 @@ +#define JEMALLOC_MB_C_ +#include "jemalloc/internal/jemalloc_internal.h" diff --git a/src/mutex.c b/src/mutex.c new file mode 100644 index 0000000..ca89ef1 --- /dev/null +++ b/src/mutex.c @@ -0,0 +1,90 @@ +#define JEMALLOC_MUTEX_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +#ifdef JEMALLOC_LAZY_LOCK +bool isthreaded = false; +#endif + +#ifdef JEMALLOC_LAZY_LOCK +static void pthread_create_once(void); +#endif + +/******************************************************************************/ +/* + * We intercept pthread_create() calls in order to toggle isthreaded if the + * process goes multi-threaded. + */ + +#ifdef JEMALLOC_LAZY_LOCK +static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, + void *(*)(void *), void *__restrict); + +static void +pthread_create_once(void) +{ + + pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); + if (pthread_create_fptr == NULL) { + malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, " + "\"pthread_create\")\n"); + abort(); + } + + isthreaded = true; +} + +JEMALLOC_ATTR(visibility("default")) +int +pthread_create(pthread_t *__restrict thread, + const pthread_attr_t *__restrict attr, void *(*start_routine)(void *), + void *__restrict arg) +{ + static pthread_once_t once_control = PTHREAD_ONCE_INIT; + + pthread_once(&once_control, pthread_create_once); + + return (pthread_create_fptr(thread, attr, start_routine, arg)); +} +#endif + +/******************************************************************************/ + +bool +malloc_mutex_init(malloc_mutex_t *mutex) +{ +#ifdef JEMALLOC_OSSPIN + *mutex = 0; +#else + pthread_mutexattr_t attr; + + if (pthread_mutexattr_init(&attr) != 0) + return (true); +#ifdef PTHREAD_MUTEX_ADAPTIVE_NP + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP); +#else + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT); +#endif + if (pthread_mutex_init(mutex, &attr) != 0) { + pthread_mutexattr_destroy(&attr); + return (true); + } + pthread_mutexattr_destroy(&attr); + +#endif + return (false); +} + +void +malloc_mutex_destroy(malloc_mutex_t *mutex) +{ + +#ifndef JEMALLOC_OSSPIN + if (pthread_mutex_destroy(mutex) != 0) { + malloc_write("<jemalloc>: Error in pthread_mutex_destroy()\n"); + abort(); + } +#endif +} diff --git a/src/prof.c b/src/prof.c new file mode 100644 index 0000000..8370042 --- /dev/null +++ b/src/prof.c @@ -0,0 +1,1243 @@ +#define JEMALLOC_PROF_C_ +#include "jemalloc/internal/jemalloc_internal.h" +#ifdef JEMALLOC_PROF +/******************************************************************************/ + +#ifdef JEMALLOC_PROF_LIBUNWIND +#define UNW_LOCAL_ONLY +#include <libunwind.h> +#endif + +#ifdef JEMALLOC_PROF_LIBGCC +#include <unwind.h> +#endif + +/******************************************************************************/ +/* Data. */ + +bool opt_prof = false; +bool opt_prof_active = true; +size_t opt_lg_prof_bt_max = LG_PROF_BT_MAX_DEFAULT; +size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT; +ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; +bool opt_prof_gdump = false; +bool opt_prof_leak = false; +bool opt_prof_accum = true; +ssize_t opt_lg_prof_tcmax = LG_PROF_TCMAX_DEFAULT; +char opt_prof_prefix[PATH_MAX + 1]; + +uint64_t prof_interval; +bool prof_promote; + +unsigned prof_bt_max; + +#ifndef NO_TLS +__thread prof_tdata_t *prof_tdata_tls + JEMALLOC_ATTR(tls_model("initial-exec")); +#endif +pthread_key_t prof_tdata_tsd; + +/* + * Global hash of (prof_bt_t *)-->(prof_ctx_t *). This is the master data + * structure that knows about all backtraces currently captured. + */ +static ckh_t bt2ctx; +static malloc_mutex_t bt2ctx_mtx; + +static malloc_mutex_t prof_dump_seq_mtx; +static uint64_t prof_dump_seq; +static uint64_t prof_dump_iseq; +static uint64_t prof_dump_mseq; +static uint64_t prof_dump_useq; + +/* + * This buffer is rather large for stack allocation, so use a single buffer for + * all profile dumps. The buffer is implicitly protected by bt2ctx_mtx, since + * it must be locked anyway during dumping. + */ +static char prof_dump_buf[PROF_DUMP_BUF_SIZE]; +static unsigned prof_dump_buf_end; +static int prof_dump_fd; + +/* Do not dump any profiles until bootstrapping is complete. */ +static bool prof_booted = false; + +static malloc_mutex_t enq_mtx; +static bool enq; +static bool enq_idump; +static bool enq_gdump; + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static prof_bt_t *bt_dup(prof_bt_t *bt); +static void bt_destroy(prof_bt_t *bt); +#ifdef JEMALLOC_PROF_LIBGCC +static _Unwind_Reason_Code prof_unwind_init_callback( + struct _Unwind_Context *context, void *arg); +static _Unwind_Reason_Code prof_unwind_callback( + struct _Unwind_Context *context, void *arg); +#endif +static bool prof_flush(bool propagate_err); +static bool prof_write(const char *s, bool propagate_err); +static void prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, + size_t *leak_nctx); +static void prof_ctx_destroy(prof_ctx_t *ctx); +static void prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt); +static bool prof_dump_ctx(prof_ctx_t *ctx, prof_bt_t *bt, + bool propagate_err); +static bool prof_dump_maps(bool propagate_err); +static bool prof_dump(const char *filename, bool leakcheck, + bool propagate_err); +static void prof_dump_filename(char *filename, char v, int64_t vseq); +static void prof_fdump(void); +static void prof_bt_hash(const void *key, unsigned minbits, size_t *hash1, + size_t *hash2); +static bool prof_bt_keycomp(const void *k1, const void *k2); +static void prof_tdata_cleanup(void *arg); + +/******************************************************************************/ + +void +bt_init(prof_bt_t *bt, void **vec) +{ + + bt->vec = vec; + bt->len = 0; +} + +static void +bt_destroy(prof_bt_t *bt) +{ + + idalloc(bt); +} + +static prof_bt_t * +bt_dup(prof_bt_t *bt) +{ + prof_bt_t *ret; + + /* + * Create a single allocation that has space for vec immediately + * following the prof_bt_t structure. The backtraces that get + * stored in the backtrace caches are copied from stack-allocated + * temporary variables, so size is known at creation time. Making this + * a contiguous object improves cache locality. + */ + ret = (prof_bt_t *)imalloc(QUANTUM_CEILING(sizeof(prof_bt_t)) + + (bt->len * sizeof(void *))); + if (ret == NULL) + return (NULL); + ret->vec = (void **)((uintptr_t)ret + + QUANTUM_CEILING(sizeof(prof_bt_t))); + memcpy(ret->vec, bt->vec, bt->len * sizeof(void *)); + ret->len = bt->len; + + return (ret); +} + +static inline void +prof_enter(void) +{ + + malloc_mutex_lock(&enq_mtx); + enq = true; + malloc_mutex_unlock(&enq_mtx); + + malloc_mutex_lock(&bt2ctx_mtx); +} + +static inline void +prof_leave(void) +{ + bool idump, gdump; + + malloc_mutex_unlock(&bt2ctx_mtx); + + malloc_mutex_lock(&enq_mtx); + enq = false; + idump = enq_idump; + enq_idump = false; + gdump = enq_gdump; + enq_gdump = false; + malloc_mutex_unlock(&enq_mtx); + + if (idump) + prof_idump(); + if (gdump) + prof_gdump(); +} + +#ifdef JEMALLOC_PROF_LIBUNWIND +void +prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max) +{ + unw_context_t uc; + unw_cursor_t cursor; + unsigned i; + int err; + + assert(bt->len == 0); + assert(bt->vec != NULL); + assert(max <= (1U << opt_lg_prof_bt_max)); + + unw_getcontext(&uc); + unw_init_local(&cursor, &uc); + + /* Throw away (nignore+1) stack frames, if that many exist. */ + for (i = 0; i < nignore + 1; i++) { + err = unw_step(&cursor); + if (err <= 0) + return; + } + + /* + * Iterate over stack frames until there are no more, or until no space + * remains in bt. + */ + for (i = 0; i < max; i++) { + unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]); + bt->len++; + err = unw_step(&cursor); + if (err <= 0) + break; + } +} +#endif +#ifdef JEMALLOC_PROF_LIBGCC +static _Unwind_Reason_Code +prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) +{ + + return (_URC_NO_REASON); +} + +static _Unwind_Reason_Code +prof_unwind_callback(struct _Unwind_Context *context, void *arg) +{ + prof_unwind_data_t *data = (prof_unwind_data_t *)arg; + + if (data->nignore > 0) + data->nignore--; + else { + data->bt->vec[data->bt->len] = (void *)_Unwind_GetIP(context); + data->bt->len++; + if (data->bt->len == data->max) + return (_URC_END_OF_STACK); + } + + return (_URC_NO_REASON); +} + +void +prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max) +{ + prof_unwind_data_t data = {bt, nignore, max}; + + _Unwind_Backtrace(prof_unwind_callback, &data); +} +#endif +#ifdef JEMALLOC_PROF_GCC +void +prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max) +{ +#define BT_FRAME(i) \ + if ((i) < nignore + max) { \ + void *p; \ + if (__builtin_frame_address(i) == 0) \ + return; \ + p = __builtin_return_address(i); \ + if (p == NULL) \ + return; \ + if (i >= nignore) { \ + bt->vec[(i) - nignore] = p; \ + bt->len = (i) - nignore + 1; \ + } \ + } else \ + return; + + assert(nignore <= 3); + assert(max <= (1U << opt_lg_prof_bt_max)); + + BT_FRAME(0) + BT_FRAME(1) + BT_FRAME(2) + BT_FRAME(3) + BT_FRAME(4) + BT_FRAME(5) + BT_FRAME(6) + BT_FRAME(7) + BT_FRAME(8) + BT_FRAME(9) + + BT_FRAME(10) + BT_FRAME(11) + BT_FRAME(12) + BT_FRAME(13) + BT_FRAME(14) + BT_FRAME(15) + BT_FRAME(16) + BT_FRAME(17) + BT_FRAME(18) + BT_FRAME(19) + + BT_FRAME(20) + BT_FRAME(21) + BT_FRAME(22) + BT_FRAME(23) + BT_FRAME(24) + BT_FRAME(25) + BT_FRAME(26) + BT_FRAME(27) + BT_FRAME(28) + BT_FRAME(29) + + BT_FRAME(30) + BT_FRAME(31) + BT_FRAME(32) + BT_FRAME(33) + BT_FRAME(34) + BT_FRAME(35) + BT_FRAME(36) + BT_FRAME(37) + BT_FRAME(38) + BT_FRAME(39) + + BT_FRAME(40) + BT_FRAME(41) + BT_FRAME(42) + BT_FRAME(43) + BT_FRAME(44) + BT_FRAME(45) + BT_FRAME(46) + BT_FRAME(47) + BT_FRAME(48) + BT_FRAME(49) + + BT_FRAME(50) + BT_FRAME(51) + BT_FRAME(52) + BT_FRAME(53) + BT_FRAME(54) + BT_FRAME(55) + BT_FRAME(56) + BT_FRAME(57) + BT_FRAME(58) + BT_FRAME(59) + + BT_FRAME(60) + BT_FRAME(61) + BT_FRAME(62) + BT_FRAME(63) + BT_FRAME(64) + BT_FRAME(65) + BT_FRAME(66) + BT_FRAME(67) + BT_FRAME(68) + BT_FRAME(69) + + BT_FRAME(70) + BT_FRAME(71) + BT_FRAME(72) + BT_FRAME(73) + BT_FRAME(74) + BT_FRAME(75) + BT_FRAME(76) + BT_FRAME(77) + BT_FRAME(78) + BT_FRAME(79) + + BT_FRAME(80) + BT_FRAME(81) + BT_FRAME(82) + BT_FRAME(83) + BT_FRAME(84) + BT_FRAME(85) + BT_FRAME(86) + BT_FRAME(87) + BT_FRAME(88) + BT_FRAME(89) + + BT_FRAME(90) + BT_FRAME(91) + BT_FRAME(92) + BT_FRAME(93) + BT_FRAME(94) + BT_FRAME(95) + BT_FRAME(96) + BT_FRAME(97) + BT_FRAME(98) + BT_FRAME(99) + + BT_FRAME(100) + BT_FRAME(101) + BT_FRAME(102) + BT_FRAME(103) + BT_FRAME(104) + BT_FRAME(105) + BT_FRAME(106) + BT_FRAME(107) + BT_FRAME(108) + BT_FRAME(109) + + BT_FRAME(110) + BT_FRAME(111) + BT_FRAME(112) + BT_FRAME(113) + BT_FRAME(114) + BT_FRAME(115) + BT_FRAME(116) + BT_FRAME(117) + BT_FRAME(118) + BT_FRAME(119) + + BT_FRAME(120) + BT_FRAME(121) + BT_FRAME(122) + BT_FRAME(123) + BT_FRAME(124) + BT_FRAME(125) + BT_FRAME(126) + BT_FRAME(127) + + /* Extras to compensate for nignore. */ + BT_FRAME(128) + BT_FRAME(129) + BT_FRAME(130) +#undef BT_FRAME +} +#endif + +prof_thr_cnt_t * +prof_lookup(prof_bt_t *bt) +{ + union { + prof_thr_cnt_t *p; + void *v; + } ret; + prof_tdata_t *prof_tdata; + + prof_tdata = PROF_TCACHE_GET(); + if (prof_tdata == NULL) { + prof_tdata = prof_tdata_init(); + if (prof_tdata == NULL) + return (NULL); + } + + if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) { + union { + prof_bt_t *p; + void *v; + } btkey; + union { + prof_ctx_t *p; + void *v; + } ctx; + bool new_ctx; + + /* + * This thread's cache lacks bt. Look for it in the global + * cache. + */ + prof_enter(); + if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) { + /* bt has never been seen before. Insert it. */ + ctx.v = imalloc(sizeof(prof_ctx_t)); + if (ctx.v == NULL) { + prof_leave(); + return (NULL); + } + btkey.p = bt_dup(bt); + if (btkey.v == NULL) { + prof_leave(); + idalloc(ctx.v); + return (NULL); + } + ctx.p->bt = btkey.p; + if (malloc_mutex_init(&ctx.p->lock)) { + prof_leave(); + idalloc(btkey.v); + idalloc(ctx.v); + return (NULL); + } + memset(&ctx.p->cnt_merged, 0, sizeof(prof_cnt_t)); + ql_new(&ctx.p->cnts_ql); + if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) { + /* OOM. */ + prof_leave(); + malloc_mutex_destroy(&ctx.p->lock); + idalloc(btkey.v); + idalloc(ctx.v); + return (NULL); + } + /* + * Artificially raise curobjs, in order to avoid a race + * condition with prof_ctx_merge()/prof_ctx_destroy(). + */ + ctx.p->cnt_merged.curobjs++; + new_ctx = true; + } else + new_ctx = false; + prof_leave(); + + /* Link a prof_thd_cnt_t into ctx for this thread. */ + if (opt_lg_prof_tcmax >= 0 && ckh_count(&prof_tdata->bt2cnt) + == (ZU(1) << opt_lg_prof_tcmax)) { + assert(ckh_count(&prof_tdata->bt2cnt) > 0); + /* + * Flush the least recently used cnt in order to keep + * bt2cnt from becoming too large. + */ + ret.p = ql_last(&prof_tdata->lru_ql, lru_link); + assert(ret.v != NULL); + ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt, NULL, + NULL); + ql_remove(&prof_tdata->lru_ql, ret.p, lru_link); + prof_ctx_merge(ret.p->ctx, ret.p); + /* ret can now be re-used. */ + } else { + assert(opt_lg_prof_tcmax < 0 || + ckh_count(&prof_tdata->bt2cnt) < (ZU(1) << + opt_lg_prof_tcmax)); + /* Allocate and partially initialize a new cnt. */ + ret.v = imalloc(sizeof(prof_thr_cnt_t)); + if (ret.p == NULL) { + if (new_ctx) { + malloc_mutex_lock(&ctx.p->lock); + ctx.p->cnt_merged.curobjs--; + malloc_mutex_unlock(&ctx.p->lock); + } + return (NULL); + } + ql_elm_new(ret.p, cnts_link); + ql_elm_new(ret.p, lru_link); + } + /* Finish initializing ret. */ + ret.p->ctx = ctx.p; + ret.p->epoch = 0; + memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); + if (ckh_insert(&prof_tdata->bt2cnt, btkey.v, ret.v)) { + if (new_ctx) { + malloc_mutex_lock(&ctx.p->lock); + ctx.p->cnt_merged.curobjs--; + malloc_mutex_unlock(&ctx.p->lock); + } + idalloc(ret.v); + return (NULL); + } + ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link); + malloc_mutex_lock(&ctx.p->lock); + ql_tail_insert(&ctx.p->cnts_ql, ret.p, cnts_link); + if (new_ctx) + ctx.p->cnt_merged.curobjs--; + malloc_mutex_unlock(&ctx.p->lock); + } else { + /* Move ret to the front of the LRU. */ + ql_remove(&prof_tdata->lru_ql, ret.p, lru_link); + ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link); + } + + return (ret.p); +} + +static bool +prof_flush(bool propagate_err) +{ + bool ret = false; + ssize_t err; + + err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); + if (err == -1) { + if (propagate_err == false) { + malloc_write("<jemalloc>: write() failed during heap " + "profile flush\n"); + if (opt_abort) + abort(); + } + ret = true; + } + prof_dump_buf_end = 0; + + return (ret); +} + +static bool +prof_write(const char *s, bool propagate_err) +{ + unsigned i, slen, n; + + i = 0; + slen = strlen(s); + while (i < slen) { + /* Flush the buffer if it is full. */ + if (prof_dump_buf_end == PROF_DUMP_BUF_SIZE) + if (prof_flush(propagate_err) && propagate_err) + return (true); + + if (prof_dump_buf_end + slen <= PROF_DUMP_BUF_SIZE) { + /* Finish writing. */ + n = slen - i; + } else { + /* Write as much of s as will fit. */ + n = PROF_DUMP_BUF_SIZE - prof_dump_buf_end; + } + memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n); + prof_dump_buf_end += n; + i += n; + } + + return (false); +} + +static void +prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx) +{ + prof_thr_cnt_t *thr_cnt; + prof_cnt_t tcnt; + + malloc_mutex_lock(&ctx->lock); + + memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t)); + ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) { + volatile unsigned *epoch = &thr_cnt->epoch; + + while (true) { + unsigned epoch0 = *epoch; + + /* Make sure epoch is even. */ + if (epoch0 & 1U) + continue; + + memcpy(&tcnt, &thr_cnt->cnts, sizeof(prof_cnt_t)); + + /* Terminate if epoch didn't change while reading. */ + if (*epoch == epoch0) + break; + } + + ctx->cnt_summed.curobjs += tcnt.curobjs; + ctx->cnt_summed.curbytes += tcnt.curbytes; + if (opt_prof_accum) { + ctx->cnt_summed.accumobjs += tcnt.accumobjs; + ctx->cnt_summed.accumbytes += tcnt.accumbytes; + } + } + + if (ctx->cnt_summed.curobjs != 0) + (*leak_nctx)++; + + /* Add to cnt_all. */ + cnt_all->curobjs += ctx->cnt_summed.curobjs; + cnt_all->curbytes += ctx->cnt_summed.curbytes; + if (opt_prof_accum) { + cnt_all->accumobjs += ctx->cnt_summed.accumobjs; + cnt_all->accumbytes += ctx->cnt_summed.accumbytes; + } + + malloc_mutex_unlock(&ctx->lock); +} + +static void +prof_ctx_destroy(prof_ctx_t *ctx) +{ + + /* + * Check that ctx is still unused by any thread cache before destroying + * it. prof_lookup() interlocks bt2ctx_mtx and ctx->lock in order to + * avoid a race condition with this function, and prof_ctx_merge() + * artificially raises ctx->cnt_merged.curobjs in order to avoid a race + * between the main body of prof_ctx_merge() and entry into this + * function. + */ + prof_enter(); + malloc_mutex_lock(&ctx->lock); + if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 1) { + assert(ctx->cnt_merged.curbytes == 0); + assert(ctx->cnt_merged.accumobjs == 0); + assert(ctx->cnt_merged.accumbytes == 0); + /* Remove ctx from bt2ctx. */ + ckh_remove(&bt2ctx, ctx->bt, NULL, NULL); + prof_leave(); + /* Destroy ctx. */ + malloc_mutex_unlock(&ctx->lock); + bt_destroy(ctx->bt); + malloc_mutex_destroy(&ctx->lock); + idalloc(ctx); + } else { + /* Compensate for increment in prof_ctx_merge(). */ + ctx->cnt_merged.curobjs--; + malloc_mutex_unlock(&ctx->lock); + prof_leave(); + } +} + +static void +prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt) +{ + bool destroy; + + /* Merge cnt stats and detach from ctx. */ + malloc_mutex_lock(&ctx->lock); + ctx->cnt_merged.curobjs += cnt->cnts.curobjs; + ctx->cnt_merged.curbytes += cnt->cnts.curbytes; + ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs; + ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes; + ql_remove(&ctx->cnts_ql, cnt, cnts_link); + if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL && + ctx->cnt_merged.curobjs == 0) { + /* + * Artificially raise ctx->cnt_merged.curobjs in order to keep + * another thread from winning the race to destroy ctx while + * this one has ctx->lock dropped. Without this, it would be + * possible for another thread to: + * + * 1) Sample an allocation associated with ctx. + * 2) Deallocate the sampled object. + * 3) Successfully prof_ctx_destroy(ctx). + * + * The result would be that ctx no longer exists by the time + * this thread accesses it in prof_ctx_destroy(). + */ + ctx->cnt_merged.curobjs++; + destroy = true; + } else + destroy = false; + malloc_mutex_unlock(&ctx->lock); + if (destroy) + prof_ctx_destroy(ctx); +} + +static bool +prof_dump_ctx(prof_ctx_t *ctx, prof_bt_t *bt, bool propagate_err) +{ + char buf[UMAX2S_BUFSIZE]; + unsigned i; + + if (opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) { + assert(ctx->cnt_summed.curbytes == 0); + assert(ctx->cnt_summed.accumobjs == 0); + assert(ctx->cnt_summed.accumbytes == 0); + return (false); + } + + if (prof_write(u2s(ctx->cnt_summed.curobjs, 10, buf), propagate_err) + || prof_write(": ", propagate_err) + || prof_write(u2s(ctx->cnt_summed.curbytes, 10, buf), + propagate_err) + || prof_write(" [", propagate_err) + || prof_write(u2s(ctx->cnt_summed.accumobjs, 10, buf), + propagate_err) + || prof_write(": ", propagate_err) + || prof_write(u2s(ctx->cnt_summed.accumbytes, 10, buf), + propagate_err) + || prof_write("] @", propagate_err)) + return (true); + + for (i = 0; i < bt->len; i++) { + if (prof_write(" 0x", propagate_err) + || prof_write(u2s((uintptr_t)bt->vec[i], 16, buf), + propagate_err)) + return (true); + } + + if (prof_write("\n", propagate_err)) + return (true); + + return (false); +} + +static bool +prof_dump_maps(bool propagate_err) +{ + int mfd; + char buf[UMAX2S_BUFSIZE]; + char *s; + unsigned i, slen; + /* /proc/<pid>/maps\0 */ + char mpath[6 + UMAX2S_BUFSIZE + + 5 + 1]; + + i = 0; + + s = "/proc/"; + slen = strlen(s); + memcpy(&mpath[i], s, slen); + i += slen; + + s = u2s(getpid(), 10, buf); + slen = strlen(s); + memcpy(&mpath[i], s, slen); + i += slen; + + s = "/maps"; + slen = strlen(s); + memcpy(&mpath[i], s, slen); + i += slen; + + mpath[i] = '\0'; + + mfd = open(mpath, O_RDONLY); + if (mfd != -1) { + ssize_t nread; + + if (prof_write("\nMAPPED_LIBRARIES:\n", propagate_err) && + propagate_err) + return (true); + nread = 0; + do { + prof_dump_buf_end += nread; + if (prof_dump_buf_end == PROF_DUMP_BUF_SIZE) { + /* Make space in prof_dump_buf before read(). */ + if (prof_flush(propagate_err) && propagate_err) + return (true); + } + nread = read(mfd, &prof_dump_buf[prof_dump_buf_end], + PROF_DUMP_BUF_SIZE - prof_dump_buf_end); + } while (nread > 0); + close(mfd); + } else + return (true); + + return (false); +} + +static bool +prof_dump(const char *filename, bool leakcheck, bool propagate_err) +{ + prof_cnt_t cnt_all; + size_t tabind; + union { + prof_bt_t *p; + void *v; + } bt; + union { + prof_ctx_t *p; + void *v; + } ctx; + char buf[UMAX2S_BUFSIZE]; + size_t leak_nctx; + + prof_enter(); + prof_dump_fd = creat(filename, 0644); + if (prof_dump_fd == -1) { + if (propagate_err == false) { + malloc_write("<jemalloc>: creat(\""); + malloc_write(filename); + malloc_write("\", 0644) failed\n"); + if (opt_abort) + abort(); + } + goto ERROR; + } + + /* Merge per thread profile stats, and sum them in cnt_all. */ + memset(&cnt_all, 0, sizeof(prof_cnt_t)); + leak_nctx = 0; + for (tabind = 0; ckh_iter(&bt2ctx, &tabind, NULL, &ctx.v) == false;) + prof_ctx_sum(ctx.p, &cnt_all, &leak_nctx); + + /* Dump profile header. */ + if (prof_write("heap profile: ", propagate_err) + || prof_write(u2s(cnt_all.curobjs, 10, buf), propagate_err) + || prof_write(": ", propagate_err) + || prof_write(u2s(cnt_all.curbytes, 10, buf), propagate_err) + || prof_write(" [", propagate_err) + || prof_write(u2s(cnt_all.accumobjs, 10, buf), propagate_err) + || prof_write(": ", propagate_err) + || prof_write(u2s(cnt_all.accumbytes, 10, buf), propagate_err)) + goto ERROR; + + if (opt_lg_prof_sample == 0) { + if (prof_write("] @ heapprofile\n", propagate_err)) + goto ERROR; + } else { + if (prof_write("] @ heap_v2/", propagate_err) + || prof_write(u2s((uint64_t)1U << opt_lg_prof_sample, 10, + buf), propagate_err) + || prof_write("\n", propagate_err)) + goto ERROR; + } + + /* Dump per ctx profile stats. */ + for (tabind = 0; ckh_iter(&bt2ctx, &tabind, &bt.v, &ctx.v) + == false;) { + if (prof_dump_ctx(ctx.p, bt.p, propagate_err)) + goto ERROR; + } + + /* Dump /proc/<pid>/maps if possible. */ + if (prof_dump_maps(propagate_err)) + goto ERROR; + + if (prof_flush(propagate_err)) + goto ERROR; + close(prof_dump_fd); + prof_leave(); + + if (leakcheck && cnt_all.curbytes != 0) { + malloc_write("<jemalloc>: Leak summary: "); + malloc_write(u2s(cnt_all.curbytes, 10, buf)); + malloc_write((cnt_all.curbytes != 1) ? " bytes, " : " byte, "); + malloc_write(u2s(cnt_all.curobjs, 10, buf)); + malloc_write((cnt_all.curobjs != 1) ? " objects, " : + " object, "); + malloc_write(u2s(leak_nctx, 10, buf)); + malloc_write((leak_nctx != 1) ? " contexts\n" : " context\n"); + malloc_write("<jemalloc>: Run pprof on \""); + malloc_write(filename); + malloc_write("\" for leak detail\n"); + } + + return (false); +ERROR: + prof_leave(); + return (true); +} + +#define DUMP_FILENAME_BUFSIZE (PATH_MAX+ UMAX2S_BUFSIZE \ + + 1 \ + + UMAX2S_BUFSIZE \ + + 2 \ + + UMAX2S_BUFSIZE \ + + 5 + 1) +static void +prof_dump_filename(char *filename, char v, int64_t vseq) +{ + char buf[UMAX2S_BUFSIZE]; + char *s; + unsigned i, slen; + + /* + * Construct a filename of the form: + * + * <prefix>.<pid>.<seq>.v<vseq>.heap\0 + */ + + i = 0; + + s = opt_prof_prefix; + slen = strlen(s); + memcpy(&filename[i], s, slen); + i += slen; + + s = "."; + slen = strlen(s); + memcpy(&filename[i], s, slen); + i += slen; + + s = u2s(getpid(), 10, buf); + slen = strlen(s); + memcpy(&filename[i], s, slen); + i += slen; + + s = "."; + slen = strlen(s); + memcpy(&filename[i], s, slen); + i += slen; + + s = u2s(prof_dump_seq, 10, buf); + prof_dump_seq++; + slen = strlen(s); + memcpy(&filename[i], s, slen); + i += slen; + + s = "."; + slen = strlen(s); + memcpy(&filename[i], s, slen); + i += slen; + + filename[i] = v; + i++; + + if (vseq != 0xffffffffffffffffLLU) { + s = u2s(vseq, 10, buf); + slen = strlen(s); + memcpy(&filename[i], s, slen); + i += slen; + } + + s = ".heap"; + slen = strlen(s); + memcpy(&filename[i], s, slen); + i += slen; + + filename[i] = '\0'; +} + +static void +prof_fdump(void) +{ + char filename[DUMP_FILENAME_BUFSIZE]; + + if (prof_booted == false) + return; + + if (opt_prof_prefix[0] != '\0') { + malloc_mutex_lock(&prof_dump_seq_mtx); + prof_dump_filename(filename, 'f', 0xffffffffffffffffLLU); + malloc_mutex_unlock(&prof_dump_seq_mtx); + prof_dump(filename, opt_prof_leak, false); + } +} + +void +prof_idump(void) +{ + char filename[DUMP_FILENAME_BUFSIZE]; + + if (prof_booted == false) + return; + malloc_mutex_lock(&enq_mtx); + if (enq) { + enq_idump = true; + malloc_mutex_unlock(&enq_mtx); + return; + } + malloc_mutex_unlock(&enq_mtx); + + if (opt_prof_prefix[0] != '\0') { + malloc_mutex_lock(&prof_dump_seq_mtx); + prof_dump_filename(filename, 'i', prof_dump_iseq); + prof_dump_iseq++; + malloc_mutex_unlock(&prof_dump_seq_mtx); + prof_dump(filename, false, false); + } +} + +bool +prof_mdump(const char *filename) +{ + char filename_buf[DUMP_FILENAME_BUFSIZE]; + + if (opt_prof == false || prof_booted == false) + return (true); + + if (filename == NULL) { + /* No filename specified, so automatically generate one. */ + if (opt_prof_prefix[0] == '\0') + return (true); + malloc_mutex_lock(&prof_dump_seq_mtx); + prof_dump_filename(filename_buf, 'm', prof_dump_mseq); + prof_dump_mseq++; + malloc_mutex_unlock(&prof_dump_seq_mtx); + filename = filename_buf; + } + return (prof_dump(filename, false, true)); +} + +void +prof_gdump(void) +{ + char filename[DUMP_FILENAME_BUFSIZE]; + + if (prof_booted == false) + return; + malloc_mutex_lock(&enq_mtx); + if (enq) { + enq_gdump = true; + malloc_mutex_unlock(&enq_mtx); + return; + } + malloc_mutex_unlock(&enq_mtx); + + if (opt_prof_prefix[0] != '\0') { + malloc_mutex_lock(&prof_dump_seq_mtx); + prof_dump_filename(filename, 'u', prof_dump_useq); + prof_dump_useq++; + malloc_mutex_unlock(&prof_dump_seq_mtx); + prof_dump(filename, false, false); + } +} + +static void +prof_bt_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2) +{ + size_t ret1, ret2; + uint64_t h; + prof_bt_t *bt = (prof_bt_t *)key; + + assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64)); + assert(hash1 != NULL); + assert(hash2 != NULL); + + h = hash(bt->vec, bt->len * sizeof(void *), 0x94122f335b332aeaLLU); + if (minbits <= 32) { + /* + * Avoid doing multiple hashes, since a single hash provides + * enough bits. + */ + ret1 = h & ZU(0xffffffffU); + ret2 = h >> 32; + } else { + ret1 = h; + ret2 = hash(bt->vec, bt->len * sizeof(void *), + 0x8432a476666bbc13U); + } + + *hash1 = ret1; + *hash2 = ret2; +} + +static bool +prof_bt_keycomp(const void *k1, const void *k2) +{ + const prof_bt_t *bt1 = (prof_bt_t *)k1; + const prof_bt_t *bt2 = (prof_bt_t *)k2; + + if (bt1->len != bt2->len) + return (false); + return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); +} + +prof_tdata_t * +prof_tdata_init(void) +{ + prof_tdata_t *prof_tdata; + + /* Initialize an empty cache for this thread. */ + prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t)); + if (prof_tdata == NULL) + return (NULL); + + if (ckh_new(&prof_tdata->bt2cnt, PROF_CKH_MINITEMS, + prof_bt_hash, prof_bt_keycomp)) { + idalloc(prof_tdata); + return (NULL); + } + ql_new(&prof_tdata->lru_ql); + + prof_tdata->vec = imalloc(sizeof(void *) * prof_bt_max); + if (prof_tdata->vec == NULL) { + + ckh_delete(&prof_tdata->bt2cnt); + idalloc(prof_tdata); + return (NULL); + } + + prof_tdata->prn_state = 0; + prof_tdata->threshold = 0; + prof_tdata->accum = 0; + + PROF_TCACHE_SET(prof_tdata); + + return (prof_tdata); +} + +static void +prof_tdata_cleanup(void *arg) +{ + prof_tdata_t *prof_tdata; + + prof_tdata = PROF_TCACHE_GET(); + if (prof_tdata != NULL) { + prof_thr_cnt_t *cnt; + + /* + * Delete the hash table. All of its contents can still be + * iterated over via the LRU. + */ + ckh_delete(&prof_tdata->bt2cnt); + + /* + * Iteratively merge cnt's into the global stats and delete + * them. + */ + while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) { + prof_ctx_merge(cnt->ctx, cnt); + ql_remove(&prof_tdata->lru_ql, cnt, lru_link); + idalloc(cnt); + } + + idalloc(prof_tdata->vec); + + idalloc(prof_tdata); + PROF_TCACHE_SET(NULL); + } +} + +void +prof_boot0(void) +{ + + memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT, + sizeof(PROF_PREFIX_DEFAULT)); +} + +void +prof_boot1(void) +{ + + /* + * opt_prof and prof_promote must be in their final state before any + * arenas are initialized, so this function must be executed early. + */ + + if (opt_prof_leak && opt_prof == false) { + /* + * Enable opt_prof, but in such a way that profiles are never + * automatically dumped. + */ + opt_prof = true; + opt_prof_gdump = false; + prof_interval = 0; + } else if (opt_prof) { + if (opt_lg_prof_interval >= 0) { + prof_interval = (((uint64_t)1U) << + opt_lg_prof_interval); + } else + prof_interval = 0; + } + + prof_promote = (opt_prof && opt_lg_prof_sample > PAGE_SHIFT); +} + +bool +prof_boot2(void) +{ + + if (opt_prof) { + if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash, + prof_bt_keycomp)) + return (true); + if (malloc_mutex_init(&bt2ctx_mtx)) + return (true); + if (pthread_key_create(&prof_tdata_tsd, prof_tdata_cleanup) + != 0) { + malloc_write( + "<jemalloc>: Error in pthread_key_create()\n"); + abort(); + } + + prof_bt_max = (1U << opt_lg_prof_bt_max); + if (malloc_mutex_init(&prof_dump_seq_mtx)) + return (true); + + if (malloc_mutex_init(&enq_mtx)) + return (true); + enq = false; + enq_idump = false; + enq_gdump = false; + + if (atexit(prof_fdump) != 0) { + malloc_write("<jemalloc>: Error in atexit()\n"); + if (opt_abort) + abort(); + } + } + +#ifdef JEMALLOC_PROF_LIBGCC + /* + * Cause the backtracing machinery to allocate its internal state + * before enabling profiling. + */ + _Unwind_Backtrace(prof_unwind_init_callback, NULL); +#endif + + prof_booted = true; + + return (false); +} + +/******************************************************************************/ +#endif /* JEMALLOC_PROF */ diff --git a/src/rtree.c b/src/rtree.c new file mode 100644 index 0000000..eb0ff1e --- /dev/null +++ b/src/rtree.c @@ -0,0 +1,46 @@ +#define JEMALLOC_RTREE_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +rtree_t * +rtree_new(unsigned bits) +{ + rtree_t *ret; + unsigned bits_per_level, height, i; + + bits_per_level = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1; + height = bits / bits_per_level; + if (height * bits_per_level != bits) + height++; + assert(height * bits_per_level >= bits); + + ret = (rtree_t*)base_alloc(offsetof(rtree_t, level2bits) + + (sizeof(unsigned) * height)); + if (ret == NULL) + return (NULL); + memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) * + height)); + + if (malloc_mutex_init(&ret->mutex)) { + /* Leak the rtree. */ + return (NULL); + } + ret->height = height; + if (bits_per_level * height > bits) + ret->level2bits[0] = bits % bits_per_level; + else + ret->level2bits[0] = bits_per_level; + for (i = 1; i < height; i++) + ret->level2bits[i] = bits_per_level; + + ret->root = (void**)base_alloc(sizeof(void *) << ret->level2bits[0]); + if (ret->root == NULL) { + /* + * We leak the rtree here, since there's no generic base + * deallocation. + */ + return (NULL); + } + memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]); + + return (ret); +} diff --git a/src/stats.c b/src/stats.c new file mode 100644 index 0000000..cbbbb5b --- /dev/null +++ b/src/stats.c @@ -0,0 +1,790 @@ +#define JEMALLOC_STATS_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +#define CTL_GET(n, v, t) do { \ + size_t sz = sizeof(t); \ + xmallctl(n, v, &sz, NULL, 0); \ +} while (0) + +#define CTL_I_GET(n, v, t) do { \ + size_t mib[6]; \ + size_t miblen = sizeof(mib) / sizeof(size_t); \ + size_t sz = sizeof(t); \ + xmallctlnametomib(n, mib, &miblen); \ + mib[2] = i; \ + xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ +} while (0) + +#define CTL_J_GET(n, v, t) do { \ + size_t mib[6]; \ + size_t miblen = sizeof(mib) / sizeof(size_t); \ + size_t sz = sizeof(t); \ + xmallctlnametomib(n, mib, &miblen); \ + mib[2] = j; \ + xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ +} while (0) + +#define CTL_IJ_GET(n, v, t) do { \ + size_t mib[6]; \ + size_t miblen = sizeof(mib) / sizeof(size_t); \ + size_t sz = sizeof(t); \ + xmallctlnametomib(n, mib, &miblen); \ + mib[2] = i; \ + mib[4] = j; \ + xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ +} while (0) + +/******************************************************************************/ +/* Data. */ + +bool opt_stats_print = false; + +#ifdef JEMALLOC_STATS +size_t stats_cactive = 0; +#endif + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +#ifdef JEMALLOC_STATS +static void malloc_vcprintf(void (*write_cb)(void *, const char *), + void *cbopaque, const char *format, va_list ap); +static void stats_arena_bins_print(void (*write_cb)(void *, const char *), + void *cbopaque, unsigned i); +static void stats_arena_lruns_print(void (*write_cb)(void *, const char *), + void *cbopaque, unsigned i); +static void stats_arena_print(void (*write_cb)(void *, const char *), + void *cbopaque, unsigned i); +#endif + +/******************************************************************************/ + +/* + * We don't want to depend on vsnprintf() for production builds, since that can + * cause unnecessary bloat for static binaries. u2s() provides minimal integer + * printing functionality, so that malloc_printf() use can be limited to + * JEMALLOC_STATS code. + */ +char * +u2s(uint64_t x, unsigned base, char *s) +{ + unsigned i; + + i = UMAX2S_BUFSIZE - 1; + s[i] = '\0'; + switch (base) { + case 10: + do { + i--; + s[i] = "0123456789"[x % (uint64_t)10]; + x /= (uint64_t)10; + } while (x > 0); + break; + case 16: + do { + i--; + s[i] = "0123456789abcdef"[x & 0xf]; + x >>= 4; + } while (x > 0); + break; + default: + do { + i--; + s[i] = "0123456789abcdefghijklmnopqrstuvwxyz"[x % + (uint64_t)base]; + x /= (uint64_t)base; + } while (x > 0); + } + + return (&s[i]); +} + +#ifdef JEMALLOC_STATS +static void +malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, + const char *format, va_list ap) +{ + char buf[4096]; + + if (write_cb == NULL) { + /* + * The caller did not provide an alternate write_cb callback + * function, so use the default one. malloc_write() is an + * inline function, so use malloc_message() directly here. + */ + write_cb = JEMALLOC_P(malloc_message); + cbopaque = NULL; + } + + vsnprintf(buf, sizeof(buf), format, ap); + write_cb(cbopaque, buf); +} + +/* + * Print to a callback function in such a way as to (hopefully) avoid memory + * allocation. + */ +JEMALLOC_ATTR(format(printf, 3, 4)) +void +malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, + const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + malloc_vcprintf(write_cb, cbopaque, format, ap); + va_end(ap); +} + +/* + * Print to stderr in such a way as to (hopefully) avoid memory allocation. + */ +JEMALLOC_ATTR(format(printf, 1, 2)) +void +malloc_printf(const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + malloc_vcprintf(NULL, NULL, format, ap); + va_end(ap); +} +#endif + +#ifdef JEMALLOC_STATS +static void +stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, + unsigned i) +{ + size_t pagesize; + bool config_tcache; + unsigned nbins, j, gap_start; + + CTL_GET("arenas.pagesize", &pagesize, size_t); + + CTL_GET("config.tcache", &config_tcache, bool); + if (config_tcache) { + malloc_cprintf(write_cb, cbopaque, + "bins: bin size regs pgs allocated nmalloc" + " ndalloc nrequests nfills nflushes" + " newruns reruns maxruns curruns\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "bins: bin size regs pgs allocated nmalloc" + " ndalloc newruns reruns maxruns" + " curruns\n"); + } + CTL_GET("arenas.nbins", &nbins, unsigned); + for (j = 0, gap_start = UINT_MAX; j < nbins; j++) { + uint64_t nruns; + + CTL_IJ_GET("stats.arenas.0.bins.0.nruns", &nruns, uint64_t); + if (nruns == 0) { + if (gap_start == UINT_MAX) + gap_start = j; + } else { + unsigned ntbins_, nqbins, ncbins, nsbins; + size_t reg_size, run_size, allocated; + uint32_t nregs; + uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; + uint64_t reruns; + size_t highruns, curruns; + + if (gap_start != UINT_MAX) { + if (j > gap_start + 1) { + /* Gap of more than one size class. */ + malloc_cprintf(write_cb, cbopaque, + "[%u..%u]\n", gap_start, + j - 1); + } else { + /* Gap of one size class. */ + malloc_cprintf(write_cb, cbopaque, + "[%u]\n", gap_start); + } + gap_start = UINT_MAX; + } + CTL_GET("arenas.ntbins", &ntbins_, unsigned); + CTL_GET("arenas.nqbins", &nqbins, unsigned); + CTL_GET("arenas.ncbins", &ncbins, unsigned); + CTL_GET("arenas.nsbins", &nsbins, unsigned); + CTL_J_GET("arenas.bin.0.size", ®_size, size_t); + CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t); + CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t); + CTL_IJ_GET("stats.arenas.0.bins.0.allocated", + &allocated, size_t); + CTL_IJ_GET("stats.arenas.0.bins.0.nmalloc", + &nmalloc, uint64_t); + CTL_IJ_GET("stats.arenas.0.bins.0.ndalloc", + &ndalloc, uint64_t); + if (config_tcache) { + CTL_IJ_GET("stats.arenas.0.bins.0.nrequests", + &nrequests, uint64_t); + CTL_IJ_GET("stats.arenas.0.bins.0.nfills", + &nfills, uint64_t); + CTL_IJ_GET("stats.arenas.0.bins.0.nflushes", + &nflushes, uint64_t); + } + CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns, + uint64_t); + CTL_IJ_GET("stats.arenas.0.bins.0.highruns", &highruns, + size_t); + CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns, + size_t); + if (config_tcache) { + malloc_cprintf(write_cb, cbopaque, + "%13u %1s %5zu %4u %3zu %12zu %12"PRIu64 + " %12"PRIu64" %12"PRIu64" %12"PRIu64 + " %12"PRIu64" %12"PRIu64" %12"PRIu64 + " %12zu %12zu\n", + j, + j < ntbins_ ? "T" : j < ntbins_ + nqbins ? + "Q" : j < ntbins_ + nqbins + ncbins ? "C" : + "S", + reg_size, nregs, run_size / pagesize, + allocated, nmalloc, ndalloc, nrequests, + nfills, nflushes, nruns, reruns, highruns, + curruns); + } else { + malloc_cprintf(write_cb, cbopaque, + "%13u %1s %5zu %4u %3zu %12zu %12"PRIu64 + " %12"PRIu64" %12"PRIu64" %12"PRIu64 + " %12zu %12zu\n", + j, + j < ntbins_ ? "T" : j < ntbins_ + nqbins ? + "Q" : j < ntbins_ + nqbins + ncbins ? "C" : + "S", + reg_size, nregs, run_size / pagesize, + allocated, nmalloc, ndalloc, nruns, reruns, + highruns, curruns); + } + } + } + if (gap_start != UINT_MAX) { + if (j > gap_start + 1) { + /* Gap of more than one size class. */ + malloc_cprintf(write_cb, cbopaque, "[%u..%u]\n", + gap_start, j - 1); + } else { + /* Gap of one size class. */ + malloc_cprintf(write_cb, cbopaque, "[%u]\n", gap_start); + } + } +} + +static void +stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, + unsigned i) +{ + size_t pagesize, nlruns, j; + ssize_t gap_start; + + CTL_GET("arenas.pagesize", &pagesize, size_t); + + malloc_cprintf(write_cb, cbopaque, + "large: size pages nmalloc ndalloc nrequests" + " maxruns curruns\n"); + CTL_GET("arenas.nlruns", &nlruns, size_t); + for (j = 0, gap_start = -1; j < nlruns; j++) { + uint64_t nmalloc, ndalloc, nrequests; + size_t run_size, highruns, curruns; + + CTL_IJ_GET("stats.arenas.0.lruns.0.nmalloc", &nmalloc, + uint64_t); + CTL_IJ_GET("stats.arenas.0.lruns.0.ndalloc", &ndalloc, + uint64_t); + CTL_IJ_GET("stats.arenas.0.lruns.0.nrequests", &nrequests, + uint64_t); + if (nrequests == 0) { + if (gap_start == -1) + gap_start = j; + } else { + CTL_J_GET("arenas.lrun.0.size", &run_size, size_t); + CTL_IJ_GET("stats.arenas.0.lruns.0.highruns", &highruns, + size_t); + CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns, + size_t); + if (gap_start != -1) { + malloc_cprintf(write_cb, cbopaque, "[%zu]\n", + j - gap_start); + gap_start = -1; + } + malloc_cprintf(write_cb, cbopaque, + "%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64 + " %12zu %12zu\n", + run_size, run_size / pagesize, nmalloc, ndalloc, + nrequests, highruns, curruns); + } + } + if (gap_start != -1) + malloc_cprintf(write_cb, cbopaque, "[%zu]\n", j - gap_start); +} + +static void +stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, + unsigned i) +{ + unsigned nthreads; + size_t pagesize, pactive, pdirty, mapped; + uint64_t npurge, nmadvise, purged; + size_t small_allocated; + uint64_t small_nmalloc, small_ndalloc, small_nrequests; + size_t large_allocated; + uint64_t large_nmalloc, large_ndalloc, large_nrequests; + + CTL_GET("arenas.pagesize", &pagesize, size_t); + + CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned); + malloc_cprintf(write_cb, cbopaque, + "assigned threads: %u\n", nthreads); + CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t); + CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t); + CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t); + CTL_I_GET("stats.arenas.0.nmadvise", &nmadvise, uint64_t); + CTL_I_GET("stats.arenas.0.purged", &purged, uint64_t); + malloc_cprintf(write_cb, cbopaque, + "dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s," + " %"PRIu64" madvise%s, %"PRIu64" purged\n", + pactive, pdirty, npurge, npurge == 1 ? "" : "s", + nmadvise, nmadvise == 1 ? "" : "s", purged); + + malloc_cprintf(write_cb, cbopaque, + " allocated nmalloc ndalloc nrequests\n"); + CTL_I_GET("stats.arenas.0.small.allocated", &small_allocated, size_t); + CTL_I_GET("stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t); + CTL_I_GET("stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t); + CTL_I_GET("stats.arenas.0.small.nrequests", &small_nrequests, uint64_t); + malloc_cprintf(write_cb, cbopaque, + "small: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", + small_allocated, small_nmalloc, small_ndalloc, small_nrequests); + CTL_I_GET("stats.arenas.0.large.allocated", &large_allocated, size_t); + CTL_I_GET("stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t); + CTL_I_GET("stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t); + CTL_I_GET("stats.arenas.0.large.nrequests", &large_nrequests, uint64_t); + malloc_cprintf(write_cb, cbopaque, + "large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", + large_allocated, large_nmalloc, large_ndalloc, large_nrequests); + malloc_cprintf(write_cb, cbopaque, + "total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", + small_allocated + large_allocated, + small_nmalloc + large_nmalloc, + small_ndalloc + large_ndalloc, + small_nrequests + large_nrequests); + malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", + pactive * pagesize ); + CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t); + malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped); + + stats_arena_bins_print(write_cb, cbopaque, i); + stats_arena_lruns_print(write_cb, cbopaque, i); +} +#endif + +void +stats_print(void (*write_cb)(void *, const char *), void *cbopaque, + const char *opts) +{ + int err; + uint64_t epoch; + size_t u64sz; + char s[UMAX2S_BUFSIZE]; + bool general = true; + bool merged = true; + bool unmerged = true; + bool bins = true; + bool large = true; + + /* + * Refresh stats, in case mallctl() was called by the application. + * + * Check for OOM here, since refreshing the ctl cache can trigger + * allocation. In practice, none of the subsequent mallctl()-related + * calls in this function will cause OOM if this one succeeds. + * */ + epoch = 1; + u64sz = sizeof(uint64_t); + err = JEMALLOC_P(mallctl)("epoch", &epoch, &u64sz, &epoch, + sizeof(uint64_t)); + if (err != 0) { + if (err == EAGAIN) { + malloc_write("<jemalloc>: Memory allocation failure in " + "mallctl(\"epoch\", ...)\n"); + return; + } + malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", " + "...)\n"); + abort(); + } + + if (write_cb == NULL) { + /* + * The caller did not provide an alternate write_cb callback + * function, so use the default one. malloc_write() is an + * inline function, so use malloc_message() directly here. + */ + write_cb = JEMALLOC_P(malloc_message); + cbopaque = NULL; + } + + if (opts != NULL) { + unsigned i; + + for (i = 0; opts[i] != '\0'; i++) { + switch (opts[i]) { + case 'g': + general = false; + break; + case 'm': + merged = false; + break; + case 'a': + unmerged = false; + break; + case 'b': + bins = false; + break; + case 'l': + large = false; + break; + default:; + } + } + } + + write_cb(cbopaque, "___ Begin jemalloc statistics ___\n"); + if (general) { + int err; + const char *cpv; + bool bv; + unsigned uv; + ssize_t ssv; + size_t sv, bsz, ssz, sssz, cpsz; + + bsz = sizeof(bool); + ssz = sizeof(size_t); + sssz = sizeof(ssize_t); + cpsz = sizeof(const char *); + + CTL_GET("version", &cpv, const char *); + write_cb(cbopaque, "Version: "); + write_cb(cbopaque, cpv); + write_cb(cbopaque, "\n"); + CTL_GET("config.debug", &bv, bool); + write_cb(cbopaque, "Assertions "); + write_cb(cbopaque, bv ? "enabled" : "disabled"); + write_cb(cbopaque, "\n"); + +#define OPT_WRITE_BOOL(n) \ + if ((err = JEMALLOC_P(mallctl)("opt."#n, &bv, &bsz, \ + NULL, 0)) == 0) { \ + write_cb(cbopaque, " opt."#n": "); \ + write_cb(cbopaque, bv ? "true" : "false"); \ + write_cb(cbopaque, "\n"); \ + } +#define OPT_WRITE_SIZE_T(n) \ + if ((err = JEMALLOC_P(mallctl)("opt."#n, &sv, &ssz, \ + NULL, 0)) == 0) { \ + write_cb(cbopaque, " opt."#n": "); \ + write_cb(cbopaque, u2s(sv, 10, s)); \ + write_cb(cbopaque, "\n"); \ + } +#define OPT_WRITE_SSIZE_T(n) \ + if ((err = JEMALLOC_P(mallctl)("opt."#n, &ssv, &sssz, \ + NULL, 0)) == 0) { \ + if (ssv >= 0) { \ + write_cb(cbopaque, " opt."#n": "); \ + write_cb(cbopaque, u2s(ssv, 10, s)); \ + } else { \ + write_cb(cbopaque, " opt."#n": -"); \ + write_cb(cbopaque, u2s(-ssv, 10, s)); \ + } \ + write_cb(cbopaque, "\n"); \ + } +#define OPT_WRITE_CHAR_P(n) \ + if ((err = JEMALLOC_P(mallctl)("opt."#n, &cpv, &cpsz, \ + NULL, 0)) == 0) { \ + write_cb(cbopaque, " opt."#n": \""); \ + write_cb(cbopaque, cpv); \ + write_cb(cbopaque, "\"\n"); \ + } + + write_cb(cbopaque, "Run-time option settings:\n"); + OPT_WRITE_BOOL(abort) + OPT_WRITE_SIZE_T(lg_qspace_max) + OPT_WRITE_SIZE_T(lg_cspace_max) + OPT_WRITE_SIZE_T(lg_chunk) + OPT_WRITE_SIZE_T(narenas) + OPT_WRITE_SSIZE_T(lg_dirty_mult) + OPT_WRITE_BOOL(stats_print) + OPT_WRITE_BOOL(junk) + OPT_WRITE_BOOL(zero) + OPT_WRITE_BOOL(sysv) + OPT_WRITE_BOOL(xmalloc) + OPT_WRITE_BOOL(tcache) + OPT_WRITE_SSIZE_T(lg_tcache_gc_sweep) + OPT_WRITE_SSIZE_T(lg_tcache_max) + OPT_WRITE_BOOL(prof) + OPT_WRITE_CHAR_P(prof_prefix) + OPT_WRITE_SIZE_T(lg_prof_bt_max) + OPT_WRITE_BOOL(prof_active) + OPT_WRITE_SSIZE_T(lg_prof_sample) + OPT_WRITE_BOOL(prof_accum) + OPT_WRITE_SSIZE_T(lg_prof_tcmax) + OPT_WRITE_SSIZE_T(lg_prof_interval) + OPT_WRITE_BOOL(prof_gdump) + OPT_WRITE_BOOL(prof_leak) + OPT_WRITE_BOOL(overcommit) + +#undef OPT_WRITE_BOOL +#undef OPT_WRITE_SIZE_T +#undef OPT_WRITE_SSIZE_T +#undef OPT_WRITE_CHAR_P + + write_cb(cbopaque, "CPUs: "); + write_cb(cbopaque, u2s(ncpus, 10, s)); + write_cb(cbopaque, "\n"); + + CTL_GET("arenas.narenas", &uv, unsigned); + write_cb(cbopaque, "Max arenas: "); + write_cb(cbopaque, u2s(uv, 10, s)); + write_cb(cbopaque, "\n"); + + write_cb(cbopaque, "Pointer size: "); + write_cb(cbopaque, u2s(sizeof(void *), 10, s)); + write_cb(cbopaque, "\n"); + + CTL_GET("arenas.quantum", &sv, size_t); + write_cb(cbopaque, "Quantum size: "); + write_cb(cbopaque, u2s(sv, 10, s)); + write_cb(cbopaque, "\n"); + + CTL_GET("arenas.cacheline", &sv, size_t); + write_cb(cbopaque, "Cacheline size (assumed): "); + write_cb(cbopaque, u2s(sv, 10, s)); + write_cb(cbopaque, "\n"); + + CTL_GET("arenas.subpage", &sv, size_t); + write_cb(cbopaque, "Subpage spacing: "); + write_cb(cbopaque, u2s(sv, 10, s)); + write_cb(cbopaque, "\n"); + + if ((err = JEMALLOC_P(mallctl)("arenas.tspace_min", &sv, &ssz, + NULL, 0)) == 0) { + write_cb(cbopaque, "Tiny 2^n-spaced sizes: ["); + write_cb(cbopaque, u2s(sv, 10, s)); + write_cb(cbopaque, ".."); + + CTL_GET("arenas.tspace_max", &sv, size_t); + write_cb(cbopaque, u2s(sv, 10, s)); + write_cb(cbopaque, "]\n"); + } + + CTL_GET("arenas.qspace_min", &sv, size_t); + write_cb(cbopaque, "Quantum-spaced sizes: ["); + write_cb(cbopaque, u2s(sv, 10, s)); + write_cb(cbopaque, ".."); + CTL_GET("arenas.qspace_max", &sv, size_t); + write_cb(cbopaque, u2s(sv, 10, s)); + write_cb(cbopaque, "]\n"); + + CTL_GET("arenas.cspace_min", &sv, size_t); + write_cb(cbopaque, "Cacheline-spaced sizes: ["); + write_cb(cbopaque, u2s(sv, 10, s)); + write_cb(cbopaque, ".."); + CTL_GET("arenas.cspace_max", &sv, size_t); + write_cb(cbopaque, u2s(sv, 10, s)); + write_cb(cbopaque, "]\n"); + + CTL_GET("arenas.sspace_min", &sv, size_t); + write_cb(cbopaque, "Subpage-spaced sizes: ["); + write_cb(cbopaque, u2s(sv, 10, s)); + write_cb(cbopaque, ".."); + CTL_GET("arenas.sspace_max", &sv, size_t); + write_cb(cbopaque, u2s(sv, 10, s)); + write_cb(cbopaque, "]\n"); + + CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t); + if (ssv >= 0) { + write_cb(cbopaque, + "Min active:dirty page ratio per arena: "); + write_cb(cbopaque, u2s((1U << ssv), 10, s)); + write_cb(cbopaque, ":1\n"); + } else { + write_cb(cbopaque, + "Min active:dirty page ratio per arena: N/A\n"); + } + if ((err = JEMALLOC_P(mallctl)("arenas.tcache_max", &sv, + &ssz, NULL, 0)) == 0) { + write_cb(cbopaque, + "Maximum thread-cached size class: "); + write_cb(cbopaque, u2s(sv, 10, s)); + write_cb(cbopaque, "\n"); + } + if ((err = JEMALLOC_P(mallctl)("opt.lg_tcache_gc_sweep", &ssv, + &ssz, NULL, 0)) == 0) { + size_t tcache_gc_sweep = (1U << ssv); + bool tcache_enabled; + CTL_GET("opt.tcache", &tcache_enabled, bool); + write_cb(cbopaque, "Thread cache GC sweep interval: "); + write_cb(cbopaque, tcache_enabled && ssv >= 0 ? + u2s(tcache_gc_sweep, 10, s) : "N/A"); + write_cb(cbopaque, "\n"); + } + if ((err = JEMALLOC_P(mallctl)("opt.prof", &bv, &bsz, NULL, 0)) + == 0 && bv) { + CTL_GET("opt.lg_prof_bt_max", &sv, size_t); + write_cb(cbopaque, "Maximum profile backtrace depth: "); + write_cb(cbopaque, u2s((1U << sv), 10, s)); + write_cb(cbopaque, "\n"); + + CTL_GET("opt.lg_prof_tcmax", &ssv, ssize_t); + write_cb(cbopaque, + "Maximum per thread backtrace cache: "); + if (ssv >= 0) { + write_cb(cbopaque, u2s((1U << ssv), 10, s)); + write_cb(cbopaque, " (2^"); + write_cb(cbopaque, u2s(ssv, 10, s)); + write_cb(cbopaque, ")\n"); + } else + write_cb(cbopaque, "N/A\n"); + + CTL_GET("opt.lg_prof_sample", &sv, size_t); + write_cb(cbopaque, "Average profile sample interval: "); + write_cb(cbopaque, u2s((((uint64_t)1U) << sv), 10, s)); + write_cb(cbopaque, " (2^"); + write_cb(cbopaque, u2s(sv, 10, s)); + write_cb(cbopaque, ")\n"); + + CTL_GET("opt.lg_prof_interval", &ssv, ssize_t); + write_cb(cbopaque, "Average profile dump interval: "); + if (ssv >= 0) { + write_cb(cbopaque, u2s((((uint64_t)1U) << ssv), + 10, s)); + write_cb(cbopaque, " (2^"); + write_cb(cbopaque, u2s(ssv, 10, s)); + write_cb(cbopaque, ")\n"); + } else + write_cb(cbopaque, "N/A\n"); + } + CTL_GET("arenas.chunksize", &sv, size_t); + write_cb(cbopaque, "Chunk size: "); + write_cb(cbopaque, u2s(sv, 10, s)); + CTL_GET("opt.lg_chunk", &sv, size_t); + write_cb(cbopaque, " (2^"); + write_cb(cbopaque, u2s(sv, 10, s)); + write_cb(cbopaque, ")\n"); + } + +#ifdef JEMALLOC_STATS + { + int err; + size_t sszp, ssz; + size_t *cactive; + size_t allocated, active, mapped; + size_t chunks_current, chunks_high, swap_avail; + uint64_t chunks_total; + size_t huge_allocated; + uint64_t huge_nmalloc, huge_ndalloc; + + sszp = sizeof(size_t *); + ssz = sizeof(size_t); + + CTL_GET("stats.cactive", &cactive, size_t *); + CTL_GET("stats.allocated", &allocated, size_t); + CTL_GET("stats.active", &active, size_t); + CTL_GET("stats.mapped", &mapped, size_t); + malloc_cprintf(write_cb, cbopaque, + "Allocated: %zu, active: %zu, mapped: %zu\n", + allocated, active, mapped); + malloc_cprintf(write_cb, cbopaque, + "Current active ceiling: %zu\n", atomic_read_z(cactive)); + + /* Print chunk stats. */ + CTL_GET("stats.chunks.total", &chunks_total, uint64_t); + CTL_GET("stats.chunks.high", &chunks_high, size_t); + CTL_GET("stats.chunks.current", &chunks_current, size_t); + if ((err = JEMALLOC_P(mallctl)("swap.avail", &swap_avail, &ssz, + NULL, 0)) == 0) { + size_t lg_chunk; + + malloc_cprintf(write_cb, cbopaque, "chunks: nchunks " + "highchunks curchunks swap_avail\n"); + CTL_GET("opt.lg_chunk", &lg_chunk, size_t); + malloc_cprintf(write_cb, cbopaque, + " %13"PRIu64"%13zu%13zu%13zu\n", + chunks_total, chunks_high, chunks_current, + swap_avail << lg_chunk); + } else { + malloc_cprintf(write_cb, cbopaque, "chunks: nchunks " + "highchunks curchunks\n"); + malloc_cprintf(write_cb, cbopaque, + " %13"PRIu64"%13zu%13zu\n", + chunks_total, chunks_high, chunks_current); + } + + /* Print huge stats. */ + CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t); + CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t); + CTL_GET("stats.huge.allocated", &huge_allocated, size_t); + malloc_cprintf(write_cb, cbopaque, + "huge: nmalloc ndalloc allocated\n"); + malloc_cprintf(write_cb, cbopaque, + " %12"PRIu64" %12"PRIu64" %12zu\n", + huge_nmalloc, huge_ndalloc, huge_allocated); + + if (merged) { + unsigned narenas; + + CTL_GET("arenas.narenas", &narenas, unsigned); + { + bool initialized[narenas]; + size_t isz; + unsigned i, ninitialized; + + isz = sizeof(initialized); + xmallctl("arenas.initialized", initialized, + &isz, NULL, 0); + for (i = ninitialized = 0; i < narenas; i++) { + if (initialized[i]) + ninitialized++; + } + + if (ninitialized > 1) { + /* Print merged arena stats. */ + malloc_cprintf(write_cb, cbopaque, + "\nMerged arenas stats:\n"); + stats_arena_print(write_cb, cbopaque, + narenas); + } + } + } + + if (unmerged) { + unsigned narenas; + + /* Print stats for each arena. */ + + CTL_GET("arenas.narenas", &narenas, unsigned); + { + bool initialized[narenas]; + size_t isz; + unsigned i; + + isz = sizeof(initialized); + xmallctl("arenas.initialized", initialized, + &isz, NULL, 0); + + for (i = 0; i < narenas; i++) { + if (initialized[i]) { + malloc_cprintf(write_cb, + cbopaque, + "\narenas[%u]:\n", i); + stats_arena_print(write_cb, + cbopaque, i); + } + } + } + } + } +#endif /* #ifdef JEMALLOC_STATS */ + write_cb(cbopaque, "--- End jemalloc statistics ---\n"); +} diff --git a/src/tcache.c b/src/tcache.c new file mode 100644 index 0000000..31c329e --- /dev/null +++ b/src/tcache.c @@ -0,0 +1,480 @@ +#define JEMALLOC_TCACHE_C_ +#include "jemalloc/internal/jemalloc_internal.h" +#ifdef JEMALLOC_TCACHE +/******************************************************************************/ +/* Data. */ + +bool opt_tcache = true; +ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; +ssize_t opt_lg_tcache_gc_sweep = LG_TCACHE_GC_SWEEP_DEFAULT; + +tcache_bin_info_t *tcache_bin_info; +static unsigned stack_nelms; /* Total stack elms per tcache. */ + +/* Map of thread-specific caches. */ +#ifndef NO_TLS +__thread tcache_t *tcache_tls JEMALLOC_ATTR(tls_model("initial-exec")); +#endif + +/* + * Same contents as tcache, but initialized such that the TSD destructor is + * called when a thread exits, so that the cache can be cleaned up. + */ +pthread_key_t tcache_tsd; + +size_t nhbins; +size_t tcache_maxclass; +unsigned tcache_gc_incr; + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static void tcache_thread_cleanup(void *arg); + +/******************************************************************************/ + +void * +tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind) +{ + void *ret; + + arena_tcache_fill_small(tcache->arena, tbin, binind +#ifdef JEMALLOC_PROF + , tcache->prof_accumbytes +#endif + ); +#ifdef JEMALLOC_PROF + tcache->prof_accumbytes = 0; +#endif + ret = tcache_alloc_easy(tbin); + + return (ret); +} + +void +tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem +#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF)) + , tcache_t *tcache +#endif + ) +{ + void *ptr; + unsigned i, nflush, ndeferred; +#ifdef JEMALLOC_STATS + bool merged_stats = false; +#endif + + assert(binind < nbins); + assert(rem <= tbin->ncached); + + for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { + /* Lock the arena bin associated with the first object. */ + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( + tbin->avail[0]); + arena_t *arena = chunk->arena; + arena_bin_t *bin = &arena->bins[binind]; + +#ifdef JEMALLOC_PROF + if (arena == tcache->arena) { + malloc_mutex_lock(&arena->lock); + arena_prof_accum(arena, tcache->prof_accumbytes); + malloc_mutex_unlock(&arena->lock); + tcache->prof_accumbytes = 0; + } +#endif + + malloc_mutex_lock(&bin->lock); +#ifdef JEMALLOC_STATS + if (arena == tcache->arena) { + assert(merged_stats == false); + merged_stats = true; + bin->stats.nflushes++; + bin->stats.nrequests += tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + } +#endif + ndeferred = 0; + for (i = 0; i < nflush; i++) { + ptr = tbin->avail[i]; + assert(ptr != NULL); + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (chunk->arena == arena) { + size_t pageind = ((uintptr_t)ptr - + (uintptr_t)chunk) >> PAGE_SHIFT; + arena_chunk_map_t *mapelm = + &chunk->map[pageind-map_bias]; + arena_dalloc_bin(arena, chunk, ptr, mapelm); + } else { + /* + * This object was allocated via a different + * arena bin than the one that is currently + * locked. Stash the object, so that it can be + * handled in a future pass. + */ + tbin->avail[ndeferred] = ptr; + ndeferred++; + } + } + malloc_mutex_unlock(&bin->lock); + } +#ifdef JEMALLOC_STATS + if (merged_stats == false) { + /* + * The flush loop didn't happen to flush to this thread's + * arena, so the stats didn't get merged. Manually do so now. + */ + arena_bin_t *bin = &tcache->arena->bins[binind]; + malloc_mutex_lock(&bin->lock); + bin->stats.nflushes++; + bin->stats.nrequests += tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + malloc_mutex_unlock(&bin->lock); + } +#endif + + memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], + rem * sizeof(void *)); + tbin->ncached = rem; + if ((int)tbin->ncached < tbin->low_water) + tbin->low_water = tbin->ncached; +} + +void +tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem +#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF)) + , tcache_t *tcache +#endif + ) +{ + void *ptr; + unsigned i, nflush, ndeferred; +#ifdef JEMALLOC_STATS + bool merged_stats = false; +#endif + + assert(binind < nhbins); + assert(rem <= tbin->ncached); + + for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { + /* Lock the arena associated with the first object. */ + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( + tbin->avail[0]); + arena_t *arena = chunk->arena; + + malloc_mutex_lock(&arena->lock); +#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS)) + if (arena == tcache->arena) { +#endif +#ifdef JEMALLOC_PROF + arena_prof_accum(arena, tcache->prof_accumbytes); + tcache->prof_accumbytes = 0; +#endif +#ifdef JEMALLOC_STATS + merged_stats = true; + arena->stats.nrequests_large += tbin->tstats.nrequests; + arena->stats.lstats[binind - nbins].nrequests += + tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; +#endif +#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS)) + } +#endif + ndeferred = 0; + for (i = 0; i < nflush; i++) { + ptr = tbin->avail[i]; + assert(ptr != NULL); + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (chunk->arena == arena) + arena_dalloc_large(arena, chunk, ptr); + else { + /* + * This object was allocated via a different + * arena than the one that is currently locked. + * Stash the object, so that it can be handled + * in a future pass. + */ + tbin->avail[ndeferred] = ptr; + ndeferred++; + } + } + malloc_mutex_unlock(&arena->lock); + } +#ifdef JEMALLOC_STATS + if (merged_stats == false) { + /* + * The flush loop didn't happen to flush to this thread's + * arena, so the stats didn't get merged. Manually do so now. + */ + arena_t *arena = tcache->arena; + malloc_mutex_lock(&arena->lock); + arena->stats.nrequests_large += tbin->tstats.nrequests; + arena->stats.lstats[binind - nbins].nrequests += + tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + malloc_mutex_unlock(&arena->lock); + } +#endif + + memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], + rem * sizeof(void *)); + tbin->ncached = rem; + if ((int)tbin->ncached < tbin->low_water) + tbin->low_water = tbin->ncached; +} + +tcache_t * +tcache_create(arena_t *arena) +{ + tcache_t *tcache; + size_t size, stack_offset; + unsigned i; + + size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins); + /* Naturally align the pointer stacks. */ + size = PTR_CEILING(size); + stack_offset = size; + size += stack_nelms * sizeof(void *); + /* + * Round up to the nearest multiple of the cacheline size, in order to + * avoid the possibility of false cacheline sharing. + * + * That this works relies on the same logic as in ipalloc(), but we + * cannot directly call ipalloc() here due to tcache bootstrapping + * issues. + */ + size = (size + CACHELINE_MASK) & (-CACHELINE); + + if (size <= small_maxclass) + tcache = (tcache_t *)arena_malloc_small(arena, size, true); + else if (size <= tcache_maxclass) + tcache = (tcache_t *)arena_malloc_large(arena, size, true); + else + tcache = (tcache_t *)icalloc(size); + + if (tcache == NULL) + return (NULL); + +#ifdef JEMALLOC_STATS + /* Link into list of extant tcaches. */ + malloc_mutex_lock(&arena->lock); + ql_elm_new(tcache, link); + ql_tail_insert(&arena->tcache_ql, tcache, link); + malloc_mutex_unlock(&arena->lock); +#endif + + tcache->arena = arena; + assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); + for (i = 0; i < nhbins; i++) { + tcache->tbins[i].lg_fill_div = 1; + tcache->tbins[i].avail = (void **)((uintptr_t)tcache + + (uintptr_t)stack_offset); + stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); + } + + TCACHE_SET(tcache); + + return (tcache); +} + +void +tcache_destroy(tcache_t *tcache) +{ + unsigned i; + size_t tcache_size; + +#ifdef JEMALLOC_STATS + /* Unlink from list of extant tcaches. */ + malloc_mutex_lock(&tcache->arena->lock); + ql_remove(&tcache->arena->tcache_ql, tcache, link); + malloc_mutex_unlock(&tcache->arena->lock); + tcache_stats_merge(tcache, tcache->arena); +#endif + + for (i = 0; i < nbins; i++) { + tcache_bin_t *tbin = &tcache->tbins[i]; + tcache_bin_flush_small(tbin, i, 0 +#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF)) + , tcache +#endif + ); + +#ifdef JEMALLOC_STATS + if (tbin->tstats.nrequests != 0) { + arena_t *arena = tcache->arena; + arena_bin_t *bin = &arena->bins[i]; + malloc_mutex_lock(&bin->lock); + bin->stats.nrequests += tbin->tstats.nrequests; + malloc_mutex_unlock(&bin->lock); + } +#endif + } + + for (; i < nhbins; i++) { + tcache_bin_t *tbin = &tcache->tbins[i]; + tcache_bin_flush_large(tbin, i, 0 +#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF)) + , tcache +#endif + ); + +#ifdef JEMALLOC_STATS + if (tbin->tstats.nrequests != 0) { + arena_t *arena = tcache->arena; + malloc_mutex_lock(&arena->lock); + arena->stats.nrequests_large += tbin->tstats.nrequests; + arena->stats.lstats[i - nbins].nrequests += + tbin->tstats.nrequests; + malloc_mutex_unlock(&arena->lock); + } +#endif + } + +#ifdef JEMALLOC_PROF + if (tcache->prof_accumbytes > 0) { + malloc_mutex_lock(&tcache->arena->lock); + arena_prof_accum(tcache->arena, tcache->prof_accumbytes); + malloc_mutex_unlock(&tcache->arena->lock); + } +#endif + + tcache_size = arena_salloc(tcache); + if (tcache_size <= small_maxclass) { + arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache); + arena_t *arena = chunk->arena; + size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >> + PAGE_SHIFT; + arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias]; + arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + + (uintptr_t)((pageind - (mapelm->bits >> PAGE_SHIFT)) << + PAGE_SHIFT)); + arena_bin_t *bin = run->bin; + + malloc_mutex_lock(&bin->lock); + arena_dalloc_bin(arena, chunk, tcache, mapelm); + malloc_mutex_unlock(&bin->lock); + } else if (tcache_size <= tcache_maxclass) { + arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache); + arena_t *arena = chunk->arena; + + malloc_mutex_lock(&arena->lock); + arena_dalloc_large(arena, chunk, tcache); + malloc_mutex_unlock(&arena->lock); + } else + idalloc(tcache); +} + +static void +tcache_thread_cleanup(void *arg) +{ + tcache_t *tcache = (tcache_t *)arg; + + if (tcache == (void *)(uintptr_t)1) { + /* + * The previous time this destructor was called, we set the key + * to 1 so that other destructors wouldn't cause re-creation of + * the tcache. This time, do nothing, so that the destructor + * will not be called again. + */ + } else if (tcache == (void *)(uintptr_t)2) { + /* + * Another destructor called an allocator function after this + * destructor was called. Reset tcache to 1 in order to + * receive another callback. + */ + TCACHE_SET((uintptr_t)1); + } else if (tcache != NULL) { + assert(tcache != (void *)(uintptr_t)1); + tcache_destroy(tcache); + TCACHE_SET((uintptr_t)1); + } +} + +#ifdef JEMALLOC_STATS +void +tcache_stats_merge(tcache_t *tcache, arena_t *arena) +{ + unsigned i; + + /* Merge and reset tcache stats. */ + for (i = 0; i < nbins; i++) { + arena_bin_t *bin = &arena->bins[i]; + tcache_bin_t *tbin = &tcache->tbins[i]; + malloc_mutex_lock(&bin->lock); + bin->stats.nrequests += tbin->tstats.nrequests; + malloc_mutex_unlock(&bin->lock); + tbin->tstats.nrequests = 0; + } + + for (; i < nhbins; i++) { + malloc_large_stats_t *lstats = &arena->stats.lstats[i - nbins]; + tcache_bin_t *tbin = &tcache->tbins[i]; + arena->stats.nrequests_large += tbin->tstats.nrequests; + lstats->nrequests += tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + } +} +#endif + +bool +tcache_boot(void) +{ + + if (opt_tcache) { + unsigned i; + + /* + * If necessary, clamp opt_lg_tcache_max, now that + * small_maxclass and arena_maxclass are known. + */ + if (opt_lg_tcache_max < 0 || (1U << + opt_lg_tcache_max) < small_maxclass) + tcache_maxclass = small_maxclass; + else if ((1U << opt_lg_tcache_max) > arena_maxclass) + tcache_maxclass = arena_maxclass; + else + tcache_maxclass = (1U << opt_lg_tcache_max); + + nhbins = nbins + (tcache_maxclass >> PAGE_SHIFT); + + /* Initialize tcache_bin_info. */ + tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins * + sizeof(tcache_bin_info_t)); + if (tcache_bin_info == NULL) + return (true); + stack_nelms = 0; + for (i = 0; i < nbins; i++) { + if ((arena_bin_info[i].nregs << 1) <= + TCACHE_NSLOTS_SMALL_MAX) { + tcache_bin_info[i].ncached_max = + (arena_bin_info[i].nregs << 1); + } else { + tcache_bin_info[i].ncached_max = + TCACHE_NSLOTS_SMALL_MAX; + } + stack_nelms += tcache_bin_info[i].ncached_max; + } + for (; i < nhbins; i++) { + tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE; + stack_nelms += tcache_bin_info[i].ncached_max; + } + + /* Compute incremental GC event threshold. */ + if (opt_lg_tcache_gc_sweep >= 0) { + tcache_gc_incr = ((1U << opt_lg_tcache_gc_sweep) / + nbins) + (((1U << opt_lg_tcache_gc_sweep) % nbins == + 0) ? 0 : 1); + } else + tcache_gc_incr = 0; + + if (pthread_key_create(&tcache_tsd, tcache_thread_cleanup) != + 0) { + malloc_write( + "<jemalloc>: Error in pthread_key_create()\n"); + abort(); + } + } + + return (false); +} +/******************************************************************************/ +#endif /* JEMALLOC_TCACHE */ diff --git a/src/zone.c b/src/zone.c new file mode 100644 index 0000000..2c1b231 --- /dev/null +++ b/src/zone.c @@ -0,0 +1,354 @@ +#include "jemalloc/internal/jemalloc_internal.h" +#ifndef JEMALLOC_ZONE +# error "This source file is for zones on Darwin (OS X)." +#endif + +/******************************************************************************/ +/* Data. */ + +static malloc_zone_t zone, szone; +static struct malloc_introspection_t zone_introspect, ozone_introspect; + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static size_t zone_size(malloc_zone_t *zone, void *ptr); +static void *zone_malloc(malloc_zone_t *zone, size_t size); +static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size); +static void *zone_valloc(malloc_zone_t *zone, size_t size); +static void zone_free(malloc_zone_t *zone, void *ptr); +static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size); +#if (JEMALLOC_ZONE_VERSION >= 6) +static void *zone_memalign(malloc_zone_t *zone, size_t alignment, + size_t size); +static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, + size_t size); +#endif +static void *zone_destroy(malloc_zone_t *zone); +static size_t zone_good_size(malloc_zone_t *zone, size_t size); +static void zone_force_lock(malloc_zone_t *zone); +static void zone_force_unlock(malloc_zone_t *zone); +static size_t ozone_size(malloc_zone_t *zone, void *ptr); +static void ozone_free(malloc_zone_t *zone, void *ptr); +static void *ozone_realloc(malloc_zone_t *zone, void *ptr, size_t size); +static unsigned ozone_batch_malloc(malloc_zone_t *zone, size_t size, + void **results, unsigned num_requested); +static void ozone_batch_free(malloc_zone_t *zone, void **to_be_freed, + unsigned num); +#if (JEMALLOC_ZONE_VERSION >= 6) +static void ozone_free_definite_size(malloc_zone_t *zone, void *ptr, + size_t size); +#endif +static void ozone_force_lock(malloc_zone_t *zone); +static void ozone_force_unlock(malloc_zone_t *zone); + +/******************************************************************************/ +/* + * Functions. + */ + +static size_t +zone_size(malloc_zone_t *zone, void *ptr) +{ + + /* + * There appear to be places within Darwin (such as setenv(3)) that + * cause calls to this function with pointers that *no* zone owns. If + * we knew that all pointers were owned by *some* zone, we could split + * our zone into two parts, and use one as the default allocator and + * the other as the default deallocator/reallocator. Since that will + * not work in practice, we must check all pointers to assure that they + * reside within a mapped chunk before determining size. + */ + return (ivsalloc(ptr)); +} + +static void * +zone_malloc(malloc_zone_t *zone, size_t size) +{ + + return (JEMALLOC_P(malloc)(size)); +} + +static void * +zone_calloc(malloc_zone_t *zone, size_t num, size_t size) +{ + + return (JEMALLOC_P(calloc)(num, size)); +} + +static void * +zone_valloc(malloc_zone_t *zone, size_t size) +{ + void *ret = NULL; /* Assignment avoids useless compiler warning. */ + + JEMALLOC_P(posix_memalign)(&ret, PAGE_SIZE, size); + + return (ret); +} + +static void +zone_free(malloc_zone_t *zone, void *ptr) +{ + + JEMALLOC_P(free)(ptr); +} + +static void * +zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) +{ + + return (JEMALLOC_P(realloc)(ptr, size)); +} + +#if (JEMALLOC_ZONE_VERSION >= 6) +static void * +zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) +{ + void *ret = NULL; /* Assignment avoids useless compiler warning. */ + + JEMALLOC_P(posix_memalign)(&ret, alignment, size); + + return (ret); +} + +static void +zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) +{ + + assert(ivsalloc(ptr) == size); + JEMALLOC_P(free)(ptr); +} +#endif + +static void * +zone_destroy(malloc_zone_t *zone) +{ + + /* This function should never be called. */ + assert(false); + return (NULL); +} + +static size_t +zone_good_size(malloc_zone_t *zone, size_t size) +{ + size_t ret; + void *p; + + /* + * Actually create an object of the appropriate size, then find out + * how large it could have been without moving up to the next size + * class. + */ + p = JEMALLOC_P(malloc)(size); + if (p != NULL) { + ret = isalloc(p); + JEMALLOC_P(free)(p); + } else + ret = size; + + return (ret); +} + +static void +zone_force_lock(malloc_zone_t *zone) +{ + + if (isthreaded) + jemalloc_prefork(); +} + +static void +zone_force_unlock(malloc_zone_t *zone) +{ + + if (isthreaded) + jemalloc_postfork(); +} + +malloc_zone_t * +create_zone(void) +{ + + zone.size = (void *)zone_size; + zone.malloc = (void *)zone_malloc; + zone.calloc = (void *)zone_calloc; + zone.valloc = (void *)zone_valloc; + zone.free = (void *)zone_free; + zone.realloc = (void *)zone_realloc; + zone.destroy = (void *)zone_destroy; + zone.zone_name = "jemalloc_zone"; + zone.batch_malloc = NULL; + zone.batch_free = NULL; + zone.introspect = &zone_introspect; + zone.version = JEMALLOC_ZONE_VERSION; +#if (JEMALLOC_ZONE_VERSION >= 6) + zone.memalign = zone_memalign; + zone.free_definite_size = zone_free_definite_size; +#endif + + zone_introspect.enumerator = NULL; + zone_introspect.good_size = (void *)zone_good_size; + zone_introspect.check = NULL; + zone_introspect.print = NULL; + zone_introspect.log = NULL; + zone_introspect.force_lock = (void *)zone_force_lock; + zone_introspect.force_unlock = (void *)zone_force_unlock; + zone_introspect.statistics = NULL; +#if (JEMALLOC_ZONE_VERSION >= 6) + zone_introspect.zone_locked = NULL; +#endif + + return (&zone); +} + +static size_t +ozone_size(malloc_zone_t *zone, void *ptr) +{ + size_t ret; + + ret = ivsalloc(ptr); + if (ret == 0) + ret = szone.size(zone, ptr); + + return (ret); +} + +static void +ozone_free(malloc_zone_t *zone, void *ptr) +{ + + if (ivsalloc(ptr) != 0) + JEMALLOC_P(free)(ptr); + else { + size_t size = szone.size(zone, ptr); + if (size != 0) + (szone.free)(zone, ptr); + } +} + +static void * +ozone_realloc(malloc_zone_t *zone, void *ptr, size_t size) +{ + size_t oldsize; + + if (ptr == NULL) + return (JEMALLOC_P(malloc)(size)); + + oldsize = ivsalloc(ptr); + if (oldsize != 0) + return (JEMALLOC_P(realloc)(ptr, size)); + else { + oldsize = szone.size(zone, ptr); + if (oldsize == 0) + return (JEMALLOC_P(malloc)(size)); + else { + void *ret = JEMALLOC_P(malloc)(size); + if (ret != NULL) { + memcpy(ret, ptr, (oldsize < size) ? oldsize : + size); + (szone.free)(zone, ptr); + } + return (ret); + } + } +} + +static unsigned +ozone_batch_malloc(malloc_zone_t *zone, size_t size, void **results, + unsigned num_requested) +{ + + /* Don't bother implementing this interface, since it isn't required. */ + return (0); +} + +static void +ozone_batch_free(malloc_zone_t *zone, void **to_be_freed, unsigned num) +{ + unsigned i; + + for (i = 0; i < num; i++) + ozone_free(zone, to_be_freed[i]); +} + +#if (JEMALLOC_ZONE_VERSION >= 6) +static void +ozone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) +{ + + if (ivsalloc(ptr) != 0) { + assert(ivsalloc(ptr) == size); + JEMALLOC_P(free)(ptr); + } else { + assert(size == szone.size(zone, ptr)); + szone.free_definite_size(zone, ptr, size); + } +} +#endif + +static void +ozone_force_lock(malloc_zone_t *zone) +{ + + /* jemalloc locking is taken care of by the normal jemalloc zone. */ + szone.introspect->force_lock(zone); +} + +static void +ozone_force_unlock(malloc_zone_t *zone) +{ + + /* jemalloc locking is taken care of by the normal jemalloc zone. */ + szone.introspect->force_unlock(zone); +} + +/* + * Overlay the default scalable zone (szone) such that existing allocations are + * drained, and further allocations come from jemalloc. This is necessary + * because Core Foundation directly accesses and uses the szone before the + * jemalloc library is even loaded. + */ +void +szone2ozone(malloc_zone_t *zone) +{ + + /* + * Stash a copy of the original szone so that we can call its + * functions as needed. Note that the internally, the szone stores its + * bookkeeping data structures immediately following the malloc_zone_t + * header, so when calling szone functions, we need to pass a pointer + * to the original zone structure. + */ + memcpy(&szone, zone, sizeof(malloc_zone_t)); + + zone->size = (void *)ozone_size; + zone->malloc = (void *)zone_malloc; + zone->calloc = (void *)zone_calloc; + zone->valloc = (void *)zone_valloc; + zone->free = (void *)ozone_free; + zone->realloc = (void *)ozone_realloc; + zone->destroy = (void *)zone_destroy; + zone->zone_name = "jemalloc_ozone"; + zone->batch_malloc = ozone_batch_malloc; + zone->batch_free = ozone_batch_free; + zone->introspect = &ozone_introspect; + zone->version = JEMALLOC_ZONE_VERSION; +#if (JEMALLOC_ZONE_VERSION >= 6) + zone->memalign = zone_memalign; + zone->free_definite_size = ozone_free_definite_size; +#endif + + ozone_introspect.enumerator = NULL; + ozone_introspect.good_size = (void *)zone_good_size; + ozone_introspect.check = NULL; + ozone_introspect.print = NULL; + ozone_introspect.log = NULL; + ozone_introspect.force_lock = (void *)ozone_force_lock; + ozone_introspect.force_unlock = (void *)ozone_force_unlock; + ozone_introspect.statistics = NULL; +#if (JEMALLOC_ZONE_VERSION >= 6) + ozone_introspect.zone_locked = NULL; +#endif +} |