summaryrefslogtreecommitdiffstats
path: root/src/arena.c
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2016-05-30 01:34:50 (GMT)
committerJason Evans <jasone@canonware.com>2016-06-06 03:42:23 (GMT)
commit498856f44a30b31fe713a18eb2fc7c6ecf3a9f63 (patch)
treebf1eff459e9a04c950b946cb039374a7f9a6e69e /src/arena.c
parentd28e5a6696fd59a45c156b5c4dc183bb9ed21596 (diff)
downloadjemalloc-498856f44a30b31fe713a18eb2fc7c6ecf3a9f63.zip
jemalloc-498856f44a30b31fe713a18eb2fc7c6ecf3a9f63.tar.gz
jemalloc-498856f44a30b31fe713a18eb2fc7c6ecf3a9f63.tar.bz2
Move slabs out of chunks.
Diffstat (limited to 'src/arena.c')
-rw-r--r--src/arena.c1619
1 files changed, 308 insertions, 1311 deletions
diff --git a/src/arena.c b/src/arena.c
index 0b98ec5..ffde2e3 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -16,9 +16,9 @@ ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
static ssize_t decay_time_default;
const arena_bin_info_t arena_bin_info[NBINS] = {
-#define BIN_INFO_bin_yes(reg_size, run_size, nregs) \
- {reg_size, run_size, nregs, BITMAP_INFO_INITIALIZER(nregs)},
-#define BIN_INFO_bin_no(reg_size, run_size, nregs)
+#define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \
+ {reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)},
+#define BIN_INFO_bin_no(reg_size, slab_size, nregs)
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
lg_delta_lookup) \
BIN_INFO_bin_##bin((1U<<lg_grp) + (ndelta<<lg_delta), \
@@ -30,10 +30,6 @@ const arena_bin_info_t arena_bin_info[NBINS] = {
#undef SC
};
-size_t map_bias;
-size_t map_misc_offset;
-size_t arena_maxrun; /* Max run size for arenas. */
-
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
@@ -42,173 +38,13 @@ size_t arena_maxrun; /* Max run size for arenas. */
static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
size_t ndirty_limit);
-static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
- arena_run_t *run, bool dirty, bool cleaned, bool decommitted);
-static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk, extent_t *extent, arena_run_t *run, arena_bin_t *bin);
-static void arena_bin_lower_run(tsdn_t *tsdn, arena_t *arena,
- extent_t *extent, arena_run_t *run, arena_bin_t *bin);
+static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena,
+ extent_t *slab, arena_bin_t *bin);
+static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena,
+ extent_t *slab, arena_bin_t *bin);
/******************************************************************************/
-JEMALLOC_INLINE_C size_t
-arena_miscelm_size_get(extent_t *extent, const arena_chunk_map_misc_t *miscelm)
-{
- arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
- size_t pageind = arena_miscelm_to_pageind(extent, miscelm);
- size_t mapbits = arena_mapbits_get(chunk, pageind);
- return (arena_mapbits_size_decode(mapbits));
-}
-
-JEMALLOC_INLINE_C int
-arena_run_addr_comp(const arena_chunk_map_misc_t *a,
- const arena_chunk_map_misc_t *b)
-{
- uintptr_t a_miscelm = (uintptr_t)a;
- uintptr_t b_miscelm = (uintptr_t)b;
-
- assert(a != NULL);
- assert(b != NULL);
-
- return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
-}
-
-/* Generate pairing heap functions. */
-ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
- ph_link, arena_run_addr_comp)
-
-#ifdef JEMALLOC_JET
-#undef run_quantize_floor
-#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor)
-#endif
-static size_t
-run_quantize_floor(size_t size)
-{
- size_t ret;
- pszind_t pind;
-
- assert(size > 0);
- assert(size <= HUGE_MAXCLASS);
- assert((size & PAGE_MASK) == 0);
-
- assert(size != 0);
- assert(size == PAGE_CEILING(size));
-
- pind = psz2ind(size - large_pad + 1);
- if (pind == 0) {
- /*
- * Avoid underflow. This short-circuit would also do the right
- * thing for all sizes in the range for which there are
- * PAGE-spaced size classes, but it's simplest to just handle
- * the one case that would cause erroneous results.
- */
- return (size);
- }
- ret = pind2sz(pind - 1) + large_pad;
- assert(ret <= size);
- return (ret);
-}
-#ifdef JEMALLOC_JET
-#undef run_quantize_floor
-#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
-run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor);
-#endif
-
-#ifdef JEMALLOC_JET
-#undef run_quantize_ceil
-#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil)
-#endif
-static size_t
-run_quantize_ceil(size_t size)
-{
- size_t ret;
-
- assert(size > 0);
- assert(size <= HUGE_MAXCLASS);
- assert((size & PAGE_MASK) == 0);
-
- ret = run_quantize_floor(size);
- if (ret < size) {
- /*
- * Skip a quantization that may have an adequately large run,
- * because under-sized runs may be mixed in. This only happens
- * when an unusual size is requested, i.e. for aligned
- * allocation, and is just one of several places where linear
- * search would potentially find sufficiently aligned available
- * memory somewhere lower.
- */
- ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad;
- }
- return (ret);
-}
-#ifdef JEMALLOC_JET
-#undef run_quantize_ceil
-#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
-run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil);
-#endif
-
-static void
-arena_avail_insert(arena_t *arena, extent_t *extent, size_t pageind,
- size_t npages)
-{
- arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
- pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
- extent, arena_miscelm_get_const(chunk, pageind))));
- assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
- LG_PAGE));
- arena_run_heap_insert(&arena->runs_avail[pind],
- arena_miscelm_get_mutable(chunk, pageind));
-}
-
-static void
-arena_avail_remove(arena_t *arena, extent_t *extent, size_t pageind,
- size_t npages)
-{
- arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
- pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
- extent, arena_miscelm_get_const(chunk, pageind))));
- assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
- LG_PAGE));
- arena_run_heap_remove(&arena->runs_avail[pind],
- arena_miscelm_get_mutable(chunk, pageind));
-}
-
-static void
-arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
- size_t npages)
-{
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
- pageind);
-
- assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
- LG_PAGE));
- assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
- assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
- CHUNK_MAP_DIRTY);
-
- qr_new(&miscelm->rd, rd_link);
- qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
- arena->ndirty += npages;
-}
-
-static void
-arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
- size_t npages)
-{
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
- pageind);
-
- assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
- LG_PAGE));
- assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
- assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
- CHUNK_MAP_DIRTY);
-
- qr_remove(&miscelm->rd, rd_link);
- assert(arena->ndirty >= npages);
- arena->ndirty -= npages;
-}
-
static size_t
arena_chunk_dirty_npages(const extent_t *extent)
{
@@ -269,8 +105,7 @@ arena_chunk_cache_maybe_insert(arena_t *arena, extent_t *extent, bool cache)
{
if (cache) {
- extent_dirty_insert(extent, &arena->runs_dirty,
- &arena->chunks_cache);
+ extent_ring_insert(&arena->extents_dirty, extent);
arena->ndirty += arena_chunk_dirty_npages(extent);
}
}
@@ -280,54 +115,49 @@ arena_chunk_cache_maybe_remove(arena_t *arena, extent_t *extent, bool dirty)
{
if (dirty) {
- extent_dirty_remove(extent);
+ extent_ring_remove(extent);
assert(arena->ndirty >= arena_chunk_dirty_npages(extent));
arena->ndirty -= arena_chunk_dirty_npages(extent);
}
}
JEMALLOC_INLINE_C void *
-arena_run_reg_alloc(tsdn_t *tsdn, arena_run_t *run,
+arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab,
const arena_bin_info_t *bin_info)
{
void *ret;
- extent_t *extent;
+ arena_slab_data_t *slab_data = extent_slab_data_get(slab);
size_t regind;
- arena_chunk_map_misc_t *miscelm;
- void *rpages;
-
- assert(run->nfree > 0);
- assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
-
- extent = iealloc(tsdn, run);
- regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
- miscelm = arena_run_to_miscelm(extent, run);
- rpages = arena_miscelm_to_rpages(extent, miscelm);
- ret = (void *)((uintptr_t)rpages + (uintptr_t)(bin_info->reg_size *
- regind));
- run->nfree--;
+
+ assert(slab_data->nfree > 0);
+ assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
+
+ regind = (unsigned)bitmap_sfu(slab_data->bitmap,
+ &bin_info->bitmap_info);
+ ret = (void *)((uintptr_t)extent_addr_get(slab) +
+ (uintptr_t)(bin_info->reg_size * regind));
+ slab_data->nfree--;
return (ret);
}
JEMALLOC_INLINE_C size_t
-arena_run_regind(extent_t *extent, arena_run_t *run,
- const arena_bin_info_t *bin_info, const void *ptr)
+arena_slab_regind(extent_t *slab, const arena_bin_info_t *bin_info,
+ const void *ptr)
{
size_t diff, interval, shift, regind;
- arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(extent, run);
- void *rpages = arena_miscelm_to_rpages(extent, miscelm);
- /*
- * Freeing a pointer lower than region zero can cause assertion
- * failure.
- */
- assert((uintptr_t)ptr >= (uintptr_t)rpages);
+ /* Freeing a pointer outside the slab can cause assertion failure. */
+ assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
+ assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
+ /* Freeing an interior pointer can cause assertion failure. */
+ assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
+ (uintptr_t)bin_info->reg_size == 0);
/*
* Avoid doing division with a variable divisor if possible. Using
* actual division here can reduce allocator throughput by over 20%!
*/
- diff = (size_t)((uintptr_t)ptr - (uintptr_t)rpages);
+ diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
/* Rescale (factor powers of 2 out of the numerator and denominator). */
interval = bin_info->reg_size;
@@ -353,7 +183,7 @@ arena_run_regind(extent_t *extent, arena_run_t *run,
* divide by 0, and 1 and 2 are both powers of two, which are
* handled above.
*/
-#define SIZE_INV_SHIFT ((sizeof(size_t) << 3) - LG_RUN_MAXREGS)
+#define SIZE_INV_SHIFT ((sizeof(size_t) << 3) - LG_SLAB_MAXREGS)
#define SIZE_INV(s) (((ZU(1) << SIZE_INV_SHIFT) / (s)) + 1)
static const size_t interval_invs[] = {
SIZE_INV(3),
@@ -382,48 +212,19 @@ arena_run_regind(extent_t *extent, arena_run_t *run,
}
JEMALLOC_INLINE_C void
-arena_run_reg_dalloc(tsdn_t *tsdn, arena_run_t *run, extent_t *extent,
- void *ptr)
+arena_slab_reg_dalloc(tsdn_t *tsdn, extent_t *slab,
+ arena_slab_data_t *slab_data, void *ptr)
{
- arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- size_t mapbits = arena_mapbits_get(chunk, pageind);
- szind_t binind = arena_ptr_small_binind_get(tsdn, ptr, mapbits);
+ szind_t binind = slab_data->binind;
const arena_bin_info_t *bin_info = &arena_bin_info[binind];
- size_t regind = arena_run_regind(extent, run, bin_info, ptr);
+ size_t regind = arena_slab_regind(slab, bin_info, ptr);
- assert(run->nfree < bin_info->nregs);
- /* Freeing an interior pointer can cause assertion failure. */
- assert(((uintptr_t)ptr -
- (uintptr_t)arena_miscelm_to_rpages(extent,
- arena_run_to_miscelm(extent, run))) % (uintptr_t)bin_info->reg_size
- == 0);
- assert((uintptr_t)ptr >=
- (uintptr_t)arena_miscelm_to_rpages(extent,
- arena_run_to_miscelm(extent, run)));
+ assert(slab_data->nfree < bin_info->nregs);
/* Freeing an unallocated pointer can cause assertion failure. */
- assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
-
- bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
- run->nfree++;
-}
-
-JEMALLOC_INLINE_C void
-arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
-{
-
- memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
- (npages << LG_PAGE));
-}
+ assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
-JEMALLOC_INLINE_C void
-arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
-{
- size_t i;
- UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
-
- for (i = 0; i < PAGE / sizeof(size_t); i++)
- assert(p[i] == 0);
+ bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
+ slab_data->nfree++;
}
static void
@@ -455,373 +256,6 @@ arena_nactive_sub(arena_t *arena, size_t sub_pages)
}
static void
-arena_run_split_remove(arena_t *arena, extent_t *extent, size_t run_ind,
- size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
-{
- arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
- size_t total_pages, rem_pages;
-
- assert(flag_dirty == 0 || flag_decommitted == 0);
-
- total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
- LG_PAGE;
- assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
- flag_dirty);
- assert(need_pages <= total_pages);
- rem_pages = total_pages - need_pages;
-
- arena_avail_remove(arena, extent, run_ind, total_pages);
- if (flag_dirty != 0)
- arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
- arena_nactive_add(arena, need_pages);
-
- /* Keep track of trailing unused pages for later use. */
- if (rem_pages > 0) {
- size_t flags = flag_dirty | flag_decommitted;
- size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED :
- 0;
-
- arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
- (rem_pages << LG_PAGE), flags |
- (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) &
- flag_unzeroed_mask));
- arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1,
- (rem_pages << LG_PAGE), flags |
- (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) &
- flag_unzeroed_mask));
- if (flag_dirty != 0) {
- arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
- rem_pages);
- }
- arena_avail_insert(arena, extent, run_ind+need_pages,
- rem_pages);
- }
-}
-
-static bool
-arena_run_split_large_helper(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
- arena_run_t *run, size_t size, bool remove, bool zero)
-{
- arena_chunk_t *chunk;
- arena_chunk_map_misc_t *miscelm;
- size_t flag_dirty, flag_decommitted, run_ind, need_pages;
- size_t flag_unzeroed_mask;
-
- chunk = (arena_chunk_t *)extent_base_get(extent);
- miscelm = arena_run_to_miscelm(extent, run);
- run_ind = arena_miscelm_to_pageind(extent, miscelm);
- flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
- flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
- need_pages = (size >> LG_PAGE);
- assert(need_pages > 0);
-
- if (flag_decommitted != 0 && chunk_commit_wrapper(tsdn, arena,
- &arena->chunk_hooks, extent, run_ind << LG_PAGE, size))
- return (true);
-
- if (remove) {
- arena_run_split_remove(arena, extent, run_ind, flag_dirty,
- flag_decommitted, need_pages);
- }
-
- if (zero) {
- if (flag_decommitted != 0)
- ; /* The run is untouched, and therefore zeroed. */
- else if (flag_dirty != 0) {
- /* The run is dirty, so all pages must be zeroed. */
- arena_run_zero(chunk, run_ind, need_pages);
- } else {
- /*
- * The run is clean, so some pages may be zeroed (i.e.
- * never before touched).
- */
- size_t i;
- for (i = 0; i < need_pages; i++) {
- if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
- != 0)
- arena_run_zero(chunk, run_ind+i, 1);
- else if (config_debug) {
- arena_run_page_validate_zeroed(chunk,
- run_ind+i);
- }
- }
- }
- }
-
- /*
- * Set the last element first, in case the run only contains one page
- * (i.e. both statements set the same element).
- */
- flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
- CHUNK_MAP_UNZEROED : 0;
- arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
- (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
- run_ind+need_pages-1)));
- arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
- (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
- return (false);
-}
-
-static bool
-arena_run_split_large(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
- arena_run_t *run, size_t size, bool zero)
-{
-
- return (arena_run_split_large_helper(tsdn, arena, extent, run, size,
- true, zero));
-}
-
-static bool
-arena_run_split_small(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
- arena_run_t *run, size_t size, szind_t binind)
-{
- arena_chunk_t *chunk;
- arena_chunk_map_misc_t *miscelm;
- size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
-
- assert(binind != BININD_INVALID);
-
- chunk = (arena_chunk_t *)extent_base_get(extent);
- miscelm = arena_run_to_miscelm(extent, run);
- run_ind = arena_miscelm_to_pageind(extent, miscelm);
- flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
- flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
- need_pages = (size >> LG_PAGE);
- assert(need_pages > 0);
-
- if (flag_decommitted != 0 && chunk_commit_wrapper(tsdn, arena,
- &arena->chunk_hooks, extent, run_ind << LG_PAGE, size))
- return (true);
-
- arena_run_split_remove(arena, extent, run_ind, flag_dirty,
- flag_decommitted, need_pages);
-
- for (i = 0; i < need_pages; i++) {
- size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk,
- run_ind+i);
- arena_mapbits_small_set(chunk, run_ind+i, i, binind,
- flag_unzeroed);
- if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
- arena_run_page_validate_zeroed(chunk, run_ind+i);
- }
- return (false);
-}
-
-static extent_t *
-arena_chunk_init_spare(arena_t *arena)
-{
- extent_t *extent;
-
- assert(arena->spare != NULL);
-
- extent = arena->spare;
- arena->spare = NULL;
-
- assert(arena_mapbits_allocated_get((arena_chunk_t *)
- extent_base_get(extent), map_bias) == 0);
- assert(arena_mapbits_allocated_get((arena_chunk_t *)
- extent_base_get(extent), chunk_npages-1) == 0);
- assert(arena_mapbits_unallocated_size_get((arena_chunk_t *)
- extent_base_get(extent), map_bias) == arena_maxrun);
- assert(arena_mapbits_unallocated_size_get((arena_chunk_t *)
- extent_base_get(extent), chunk_npages-1) == arena_maxrun);
- assert(arena_mapbits_dirty_get((arena_chunk_t *)
- extent_base_get(extent), map_bias) ==
- arena_mapbits_dirty_get((arena_chunk_t *)extent_base_get(extent),
- chunk_npages-1));
-
- return (extent);
-}
-
-static extent_t *
-arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
-{
- extent_t *extent;
-
- malloc_mutex_unlock(tsdn, &arena->lock);
-
- extent = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, chunksize,
- 0, CACHELINE, zero, commit, true);
- if (extent != NULL && !*commit) {
- /* Commit header. */
- if (chunk_commit_wrapper(tsdn, arena, chunk_hooks, extent, 0,
- map_bias << LG_PAGE)) {
- chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, extent);
- extent = NULL;
- }
- }
-
- malloc_mutex_lock(tsdn, &arena->lock);
-
- return (extent);
-}
-
-static extent_t *
-arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
- bool *commit)
-{
- extent_t *extent;
- chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
-
- extent = arena_chunk_cache_alloc_locked(tsdn, arena, &chunk_hooks, NULL,
- chunksize, 0, CACHELINE, zero, true);
- if (extent != NULL)
- *commit = true;
- if (extent == NULL) {
- extent = arena_chunk_alloc_internal_hard(tsdn, arena,
- &chunk_hooks, zero, commit);
- if (extent == NULL)
- return (NULL);
- }
- assert(extent_slab_get(extent));
-
- if (config_stats) {
- arena->stats.mapped += extent_size_get(extent);
- arena->stats.metadata_mapped += (map_bias << LG_PAGE);
- }
-
- return (extent);
-}
-
-static extent_t *
-arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
-{
- extent_t *extent;
- bool zero, commit;
- size_t flag_unzeroed, flag_decommitted, i;
-
- assert(arena->spare == NULL);
-
- zero = false;
- commit = false;
- extent = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
- if (extent == NULL)
- return (NULL);
-
- /*
- * Initialize the map to contain one maximal free untouched run. Mark
- * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
- * or decommitted chunk.
- */
- flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
- flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
- arena_mapbits_unallocated_set((arena_chunk_t *)extent_base_get(extent),
- map_bias, arena_maxrun, flag_unzeroed | flag_decommitted);
- /*
- * There is no need to initialize the internal page map entries unless
- * the chunk is not zeroed.
- */
- if (!zero) {
- for (i = map_bias+1; i < chunk_npages-1; i++) {
- arena_mapbits_internal_set((arena_chunk_t *)
- extent_base_get(extent), i, flag_unzeroed);
- }
- } else {
- if (config_debug) {
- for (i = map_bias+1; i < chunk_npages-1; i++) {
- assert(arena_mapbits_unzeroed_get(
- (arena_chunk_t *)extent_base_get(extent), i)
- == flag_unzeroed);
- }
- }
- }
- arena_mapbits_unallocated_set((arena_chunk_t *)extent_base_get(extent),
- chunk_npages-1, arena_maxrun, flag_unzeroed);
-
- return (extent);
-}
-
-static extent_t *
-arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
-{
- extent_t *extent;
-
- if (arena->spare != NULL)
- extent = arena_chunk_init_spare(arena);
- else {
- extent = arena_chunk_init_hard(tsdn, arena);
- if (extent == NULL)
- return (NULL);
- }
-
- ql_elm_new(extent, ql_link);
- ql_tail_insert(&arena->achunks, extent, ql_link);
- arena_avail_insert(arena, extent, map_bias, chunk_npages-map_bias);
-
- return (extent);
-}
-
-static void
-arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
-{
- chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
-
- extent_committed_set(extent,
- (arena_mapbits_decommitted_get((arena_chunk_t *)
- extent_base_get(extent), map_bias) == 0));
- if (!extent_committed_get(extent)) {
- /*
- * Decommit the header. Mark the chunk as decommitted even if
- * header decommit fails, since treating a partially committed
- * chunk as committed has a high potential for causing later
- * access of decommitted memory.
- */
- chunk_decommit_wrapper(tsdn, arena, &chunk_hooks, extent, 0,
- map_bias << LG_PAGE);
- }
-
- if (config_stats) {
- arena->stats.mapped -= extent_size_get(extent);
- arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
- }
-
- arena_chunk_cache_dalloc_locked(tsdn, arena, &chunk_hooks, extent);
-}
-
-static void
-arena_spare_discard(tsdn_t *tsdn, arena_t *arena, extent_t *spare)
-{
-
- assert(arena->spare != spare);
-
- if (arena_mapbits_dirty_get((arena_chunk_t *)extent_base_get(spare),
- map_bias) != 0) {
- arena_run_dirty_remove(arena, (arena_chunk_t *)
- extent_base_get(spare), map_bias, chunk_npages-map_bias);
- }
-
- arena_chunk_discard(tsdn, arena, spare);
-}
-
-static void
-arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
-{
- arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
- extent_t *spare;
-
- assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
- assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
- assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
- arena_maxrun);
- assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
- arena_maxrun);
- assert(arena_mapbits_dirty_get(chunk, map_bias) ==
- arena_mapbits_dirty_get(chunk, chunk_npages-1));
- assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
- arena_mapbits_decommitted_get(chunk, chunk_npages-1));
-
- /* Remove run from runs_avail, so that the arena does not use it. */
- arena_avail_remove(arena, extent, map_bias, chunk_npages-map_bias);
-
- ql_remove(&arena->achunks, extent, ql_link);
- spare = arena->spare;
- arena->spare = extent;
- if (spare != NULL)
- arena_spare_discard(tsdn, arena, spare);
-}
-
-static void
arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
{
szind_t index = size2index(usize);
@@ -986,77 +420,6 @@ arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
malloc_mutex_unlock(tsdn, &arena->lock);
}
-/*
- * Do first-best-fit run selection, i.e. select the lowest run that best fits.
- * Run sizes are indexed, so not all candidate runs are necessarily exactly the
- * same size.
- */
-static arena_run_t *
-arena_run_first_best_fit(arena_t *arena, size_t size)
-{
- pszind_t pind, i;
-
- pind = psz2ind(run_quantize_ceil(size));
-
- for (i = pind; pind2sz(i) <= arena_maxrun; i++) {
- arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
- &arena->runs_avail[i]);
- if (miscelm != NULL)
- return (&miscelm->run);
- }
-
- return (NULL);
-}
-
-static arena_run_t *
-arena_run_alloc_small_helper(tsdn_t *tsdn, arena_t *arena, size_t size,
- szind_t binind)
-{
- arena_run_t *run = arena_run_first_best_fit(arena, size);
- if (run != NULL) {
- if (arena_run_split_small(tsdn, arena, iealloc(tsdn, run), run,
- size, binind))
- run = NULL;
- }
- return (run);
-}
-
-static arena_run_t *
-arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
-{
- arena_run_t *run;
- extent_t *extent;
-
- assert(size <= arena_maxrun);
- assert(size == PAGE_CEILING(size));
- assert(binind != BININD_INVALID);
-
- /* Search the arena's chunks for the lowest best fit. */
- run = arena_run_alloc_small_helper(tsdn, arena, size, binind);
- if (run != NULL)
- return (run);
-
- /*
- * No usable runs. Create a new chunk from which to allocate the run.
- */
- extent = arena_chunk_alloc(tsdn, arena);
- if (extent != NULL) {
- run = &arena_miscelm_get_mutable(
- (arena_chunk_t *)extent_base_get(extent), map_bias)->run;
- if (arena_run_split_small(tsdn, arena, iealloc(tsdn, run), run,
- size, binind))
- run = NULL;
- return (run);
- }
-
- /*
- * arena_chunk_alloc() failed, but another thread may have made
- * sufficient memory available while this one dropped arena->lock in
- * arena_chunk_alloc(), so search one more time.
- */
- return (arena_run_alloc_small_helper(tsdn, arena, size, binind));
-}
-
static bool
arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
{
@@ -1360,120 +723,45 @@ arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
static size_t
arena_dirty_count(tsdn_t *tsdn, arena_t *arena)
{
+ extent_t *extent;
size_t ndirty = 0;
- arena_runs_dirty_link_t *rdelm;
- extent_t *chunkselm;
- for (rdelm = qr_next(&arena->runs_dirty, rd_link),
- chunkselm = qr_next(&arena->chunks_cache, cc_link);
- rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
- size_t npages;
-
- if (rdelm == &chunkselm->rd) {
- npages = extent_size_get(chunkselm) >> LG_PAGE;
- chunkselm = qr_next(chunkselm, cc_link);
- } else {
- extent_t *extent = iealloc(tsdn, rdelm);
- arena_chunk_t *chunk =
- (arena_chunk_t *)extent_base_get(extent);
- arena_chunk_map_misc_t *miscelm =
- arena_rd_to_miscelm(extent, rdelm);
- size_t pageind = arena_miscelm_to_pageind(extent,
- miscelm);
- assert(arena_mapbits_allocated_get(chunk, pageind) ==
- 0);
- assert(arena_mapbits_large_get(chunk, pageind) == 0);
- assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
- npages = arena_mapbits_unallocated_size_get(chunk,
- pageind) >> LG_PAGE;
- }
- ndirty += npages;
- }
+ for (extent = qr_next(&arena->extents_dirty, qr_link); extent !=
+ &arena->extents_dirty; extent = qr_next(extent, qr_link))
+ ndirty += extent_size_get(extent) >> LG_PAGE;
return (ndirty);
}
static size_t
arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
- extent_t *purge_chunks_sentinel)
+ size_t ndirty_limit, extent_t *purge_extents_sentinel)
{
- arena_runs_dirty_link_t *rdelm, *rdelm_next;
- extent_t *chunkselm;
+ extent_t *extent, *next;
size_t nstashed = 0;
- /* Stash runs/chunks according to ndirty_limit. */
- for (rdelm = qr_next(&arena->runs_dirty, rd_link),
- chunkselm = qr_next(&arena->chunks_cache, cc_link);
- rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
+ /* Stash extents according to ndirty_limit. */
+ for (extent = qr_next(&arena->extents_dirty, qr_link); extent !=
+ &arena->extents_dirty; extent = next) {
size_t npages;
- rdelm_next = qr_next(rdelm, rd_link);
-
- if (rdelm == &chunkselm->rd) {
- extent_t *chunkselm_next;
- bool zero;
- UNUSED extent_t *extent;
-
- npages = extent_size_get(chunkselm) >> LG_PAGE;
- if (opt_purge == purge_mode_decay && arena->ndirty -
- (nstashed + npages) < ndirty_limit)
- break;
-
- chunkselm_next = qr_next(chunkselm, cc_link);
- /* Allocate. */
- zero = false;
- extent = arena_chunk_cache_alloc_locked(tsdn, arena,
- chunk_hooks, extent_base_get(chunkselm),
- extent_size_get(chunkselm), 0, CACHELINE, &zero,
- false);
- assert(extent == chunkselm);
- assert(zero == extent_zeroed_get(chunkselm));
- extent_dirty_insert(chunkselm, purge_runs_sentinel,
- purge_chunks_sentinel);
- assert(npages == (extent_size_get(chunkselm) >>
- LG_PAGE));
- chunkselm = chunkselm_next;
- } else {
- extent_t *extent = iealloc(tsdn, rdelm);
- arena_chunk_map_misc_t *miscelm =
- arena_rd_to_miscelm(extent, rdelm);
- size_t pageind = arena_miscelm_to_pageind(extent,
- miscelm);
- arena_run_t *run = &miscelm->run;
- size_t run_size =
- arena_mapbits_unallocated_size_get((arena_chunk_t *)
- extent_base_get(extent), pageind);
-
- npages = run_size >> LG_PAGE;
- if (opt_purge == purge_mode_decay && arena->ndirty -
- (nstashed + npages) < ndirty_limit)
- break;
-
- assert(pageind + npages <= chunk_npages);
- assert(arena_mapbits_dirty_get((arena_chunk_t *)
- extent_base_get(extent), pageind) ==
- arena_mapbits_dirty_get((arena_chunk_t *)
- extent_base_get(extent), pageind+npages-1));
+ bool zero;
+ UNUSED extent_t *textent;
- /*
- * If purging the spare chunk's run, make it available
- * prior to allocation.
- */
- if (extent == arena->spare)
- arena_chunk_alloc(tsdn, arena);
-
- /* Temporarily allocate the free dirty run. */
- arena_run_split_large(tsdn, arena, extent, run,
- run_size, false);
- /* Stash. */
- if (false)
- qr_new(rdelm, rd_link); /* Redundant. */
- else {
- assert(qr_next(rdelm, rd_link) == rdelm);
- assert(qr_prev(rdelm, rd_link) == rdelm);
- }
- qr_meld(purge_runs_sentinel, rdelm, rd_link);
- }
+ npages = extent_size_get(extent) >> LG_PAGE;
+ if (opt_purge == purge_mode_decay && arena->ndirty - (nstashed +
+ npages) < ndirty_limit)
+ break;
+
+ next = qr_next(extent, qr_link);
+ /* Allocate. */
+ zero = false;
+ textent = arena_chunk_cache_alloc_locked(tsdn, arena,
+ chunk_hooks, extent_base_get(extent),
+ extent_size_get(extent), 0, CACHELINE, &zero, false);
+ assert(textent == extent);
+ assert(zero == extent_zeroed_get(extent));
+ extent_ring_remove(extent);
+ extent_ring_insert(purge_extents_sentinel, extent);
nstashed += npages;
if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
@@ -1486,90 +774,26 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
static size_t
arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- arena_runs_dirty_link_t *purge_runs_sentinel,
- extent_t *purge_chunks_sentinel)
+ extent_t *purge_extents_sentinel)
{
- size_t npurged, nmadvise;
- arena_runs_dirty_link_t *rdelm;
- extent_t *chunkselm;
+ UNUSED size_t nmadvise;
+ size_t npurged;
+ extent_t *extent, *next;
if (config_stats)
nmadvise = 0;
npurged = 0;
- malloc_mutex_unlock(tsdn, &arena->lock);
- for (rdelm = qr_next(purge_runs_sentinel, rd_link),
- chunkselm = qr_next(purge_chunks_sentinel, cc_link);
- rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
- size_t npages;
-
- if (rdelm == &chunkselm->rd) {
- /*
- * Don't actually purge the chunk here because 1)
- * chunkselm is embedded in the chunk and must remain
- * valid, and 2) we deallocate the chunk in
- * arena_unstash_purged(), where it is destroyed,
- * decommitted, or purged, depending on chunk
- * deallocation policy.
- */
- size_t size = extent_size_get(chunkselm);
- npages = size >> LG_PAGE;
- chunkselm = qr_next(chunkselm, cc_link);
- } else {
- size_t pageind, run_size, flag_unzeroed, flags, i;
- bool decommitted;
- extent_t *extent = iealloc(tsdn, rdelm);
- arena_chunk_t *chunk =
- (arena_chunk_t *)extent_base_get(extent);
- arena_chunk_map_misc_t *miscelm =
- arena_rd_to_miscelm(extent, rdelm);
- pageind = arena_miscelm_to_pageind(extent, miscelm);
- run_size = arena_mapbits_large_size_get(chunk, pageind);
- npages = run_size >> LG_PAGE;
-
- assert(pageind + npages <= chunk_npages);
- assert(!arena_mapbits_decommitted_get(chunk, pageind));
- assert(!arena_mapbits_decommitted_get(chunk,
- pageind+npages-1));
- decommitted = !chunk_decommit_wrapper(tsdn, arena,
- chunk_hooks, extent, pageind << LG_PAGE, npages <<
- LG_PAGE);
- if (decommitted) {
- flag_unzeroed = 0;
- flags = CHUNK_MAP_DECOMMITTED;
- } else {
- flag_unzeroed = chunk_purge_wrapper(tsdn, arena,
- chunk_hooks, extent, pageind << LG_PAGE,
- run_size) ? CHUNK_MAP_UNZEROED : 0;
- flags = flag_unzeroed;
- }
- arena_mapbits_large_set(chunk, pageind+npages-1, 0,
- flags);
- arena_mapbits_large_set(chunk, pageind, run_size,
- flags);
-
- /*
- * Set the unzeroed flag for internal pages, now that
- * chunk_purge_wrapper() has returned whether the pages
- * were zeroed as a side effect of purging. This chunk
- * map modification is safe even though the arena mutex
- * isn't currently owned by this thread, because the run
- * is marked as allocated, thus protecting it from being
- * modified by any other thread. As long as these
- * writes don't perturb the first and last elements'
- * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
- */
- for (i = 1; i < npages-1; i++) {
- arena_mapbits_internal_set(chunk, pageind+i,
- flag_unzeroed);
- }
- }
-
- npurged += npages;
+ for (extent = qr_next(purge_extents_sentinel, qr_link); extent !=
+ purge_extents_sentinel; extent = next) {
if (config_stats)
nmadvise++;
+ npurged += extent_size_get(extent) >> LG_PAGE;
+
+ next = qr_next(extent, qr_link);
+ extent_ring_remove(extent);
+ chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, extent);
}
- malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
arena->stats.nmadvise += nmadvise;
@@ -1579,49 +803,12 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
return (npurged);
}
-static void
-arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- arena_runs_dirty_link_t *purge_runs_sentinel,
- extent_t *purge_chunks_sentinel)
-{
- arena_runs_dirty_link_t *rdelm, *rdelm_next;
- extent_t *chunkselm;
-
- /* Deallocate chunks/runs. */
- for (rdelm = qr_next(purge_runs_sentinel, rd_link),
- chunkselm = qr_next(purge_chunks_sentinel, cc_link);
- rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
- rdelm_next = qr_next(rdelm, rd_link);
- if (rdelm == &chunkselm->rd) {
- extent_t *chunkselm_next = qr_next(chunkselm, cc_link);
- extent_dirty_remove(chunkselm);
- chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
- chunkselm);
- chunkselm = chunkselm_next;
- } else {
- extent_t *extent = iealloc(tsdn, rdelm);
- arena_chunk_t *chunk =
- (arena_chunk_t *)extent_base_get(extent);
- arena_chunk_map_misc_t *miscelm =
- arena_rd_to_miscelm(extent, rdelm);
- size_t pageind = arena_miscelm_to_pageind(extent,
- miscelm);
- bool decommitted = (arena_mapbits_decommitted_get(chunk,
- pageind) != 0);
- arena_run_t *run = &miscelm->run;
- qr_remove(rdelm, rd_link);
- arena_run_dalloc(tsdn, arena, extent, run, false, true,
- decommitted);
- }
- }
-}
-
/*
* NB: ndirty_limit is interpreted differently depending on opt_purge:
- * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
+ * - purge_mode_ratio: Purge as few dirty extents as possible to reach the
* desired state:
* (arena->ndirty <= ndirty_limit)
- * - purge_mode_decay: Purge as many dirty runs/chunks as possible without
+ * - purge_mode_decay: Purge as many dirty extents as possible without
* violating the invariant:
* (arena->ndirty >= ndirty_limit)
*/
@@ -1630,8 +817,7 @@ arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
{
chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
size_t npurge, npurged;
- arena_runs_dirty_link_t purge_runs_sentinel;
- extent_t purge_chunks_sentinel;
+ extent_t purge_extents_sentinel;
arena->purging = true;
@@ -1646,19 +832,16 @@ arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
assert(opt_purge != purge_mode_ratio || (arena->nactive >>
arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
- qr_new(&purge_runs_sentinel, rd_link);
- extent_init(&purge_chunks_sentinel, arena, NULL, 0, 0, false, false,
- false, false, false);
+ extent_init(&purge_extents_sentinel, arena, NULL, 0, 0, false, false,
+ false, false);
npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
- &purge_runs_sentinel, &purge_chunks_sentinel);
+ &purge_extents_sentinel);
if (npurge == 0)
goto label_return;
npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks,
- &purge_runs_sentinel, &purge_chunks_sentinel);
+ &purge_extents_sentinel);
assert(npurged == npurge);
- arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel,
- &purge_chunks_sentinel);
if (config_stats)
arena->stats.npurge++;
@@ -1679,6 +862,15 @@ arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
malloc_mutex_unlock(tsdn, &arena->lock);
}
+static void
+arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab)
+{
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+
+ arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
+ arena_chunk_cache_dalloc_locked(tsdn, arena, &chunk_hooks, slab);
+}
+
void
arena_reset(tsd_t *tsd, arena_t *arena)
{
@@ -1724,367 +916,225 @@ arena_reset(tsd_t *tsd, arena_t *arena)
/* Bins. */
for (i = 0; i < NBINS; i++) {
+ extent_t *slab, *next;
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
- bin->runcur = NULL;
- arena_run_heap_new(&bin->runs);
+ if (bin->slabcur != NULL) {
+ arena_slab_dalloc(tsd_tsdn(tsd), arena, bin->slabcur);
+ bin->slabcur = NULL;
+ }
+ while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) !=
+ NULL)
+ arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
+ for (slab = qr_next(&bin->slabs_full, qr_link); slab !=
+ &bin->slabs_full; slab = next) {
+ next = qr_next(slab, qr_link);
+ arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
+ }
if (config_stats) {
bin->stats.curregs = 0;
- bin->stats.curruns = 0;
+ bin->stats.curslabs = 0;
}
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
}
- /*
- * Re-initialize runs_dirty such that the chunks_cache and runs_dirty
- * chains directly correspond.
- */
- qr_new(&arena->runs_dirty, rd_link);
- for (extent = qr_next(&arena->chunks_cache, cc_link);
- extent != &arena->chunks_cache; extent = qr_next(extent, cc_link)) {
- qr_new(&extent->rd, rd_link);
- qr_meld(&arena->runs_dirty, &extent->rd, rd_link);
- }
-
- /* Arena chunks. */
- for (extent = ql_last(&arena->achunks, ql_link); extent != NULL; extent
- = ql_last(&arena->achunks, ql_link)) {
- ql_remove(&arena->achunks, extent, ql_link);
- arena_chunk_discard(tsd_tsdn(tsd), arena, extent);
- }
-
- /* Spare. */
- if (arena->spare != NULL) {
- arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare);
- arena->spare = NULL;
- }
-
assert(!arena->purging);
arena->nactive = 0;
- for (i = 0; i < sizeof(arena->runs_avail) / sizeof(arena_run_heap_t);
- i++)
- arena_run_heap_new(&arena->runs_avail[i]);
-
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
}
static void
-arena_run_coalesce(arena_t *arena, extent_t *extent, size_t *p_size,
- size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
- size_t flag_decommitted)
-{
- arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
- size_t size = *p_size;
- size_t run_ind = *p_run_ind;
- size_t run_pages = *p_run_pages;
-
- /* Try to coalesce forward. */
- if (run_ind + run_pages < chunk_npages &&
- arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
- arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
- arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
- flag_decommitted) {
- size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
- run_ind+run_pages);
- size_t nrun_pages = nrun_size >> LG_PAGE;
-
- /*
- * Remove successor from runs_avail; the coalesced run is
- * inserted later.
- */
- assert(arena_mapbits_unallocated_size_get(chunk,
- run_ind+run_pages+nrun_pages-1) == nrun_size);
- assert(arena_mapbits_dirty_get(chunk,
- run_ind+run_pages+nrun_pages-1) == flag_dirty);
- assert(arena_mapbits_decommitted_get(chunk,
- run_ind+run_pages+nrun_pages-1) == flag_decommitted);
- arena_avail_remove(arena, extent, run_ind+run_pages,
- nrun_pages);
-
- /*
- * If the successor is dirty, remove it from the set of dirty
- * pages.
- */
- if (flag_dirty != 0) {
- arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
- nrun_pages);
- }
-
- size += nrun_size;
- run_pages += nrun_pages;
-
- arena_mapbits_unallocated_size_set(chunk, run_ind, size);
- arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
- size);
- }
-
- /* Try to coalesce backward. */
- if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
- run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
- flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
- flag_decommitted) {
- size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
- run_ind-1);
- size_t prun_pages = prun_size >> LG_PAGE;
-
- run_ind -= prun_pages;
-
- /*
- * Remove predecessor from runs_avail; the coalesced run is
- * inserted later.
- */
- assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
- prun_size);
- assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
- assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
- flag_decommitted);
- arena_avail_remove(arena, extent, run_ind, prun_pages);
-
- /*
- * If the predecessor is dirty, remove it from the set of dirty
- * pages.
- */
- if (flag_dirty != 0) {
- arena_run_dirty_remove(arena, chunk, run_ind,
- prun_pages);
- }
-
- size += prun_size;
- run_pages += prun_pages;
-
- arena_mapbits_unallocated_size_set(chunk, run_ind, size);
- arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
- size);
- }
+arena_bin_slabs_nonfull_insert(arena_bin_t *bin, extent_t *slab)
+{
- *p_size = size;
- *p_run_ind = run_ind;
- *p_run_pages = run_pages;
+ assert(extent_slab_data_get(slab)->nfree > 0);
+ extent_heap_insert(&bin->slabs_nonfull, slab);
}
-static size_t
-arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- size_t run_ind)
+static void
+arena_bin_slabs_nonfull_remove(arena_bin_t *bin, extent_t *slab)
{
- size_t size;
- assert(run_ind >= map_bias);
- assert(run_ind < chunk_npages);
-
- if (arena_mapbits_large_get(chunk, run_ind) != 0) {
- size = arena_mapbits_large_size_get(chunk, run_ind);
- assert(size == PAGE || arena_mapbits_large_size_get(chunk,
- run_ind+(size>>LG_PAGE)-1) == 0);
- } else {
- const arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
- size = bin_info->run_size;
- }
+ extent_heap_remove(&bin->slabs_nonfull, slab);
+}
- return (size);
+static extent_t *
+arena_bin_slabs_nonfull_tryget(arena_bin_t *bin)
+{
+ extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
+ if (slab == NULL)
+ return (NULL);
+ if (config_stats)
+ bin->stats.reslabs++;
+ return (slab);
}
static void
-arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
- arena_run_t *run, bool dirty, bool cleaned, bool decommitted)
-{
- arena_chunk_t *chunk;
- arena_chunk_map_misc_t *miscelm;
- size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
-
- chunk = (arena_chunk_t *)extent_base_get(extent);
- miscelm = arena_run_to_miscelm(extent, run);
- run_ind = arena_miscelm_to_pageind(extent, miscelm);
- assert(run_ind >= map_bias);
- assert(run_ind < chunk_npages);
- size = arena_run_size_get(arena, chunk, run, run_ind);
- run_pages = (size >> LG_PAGE);
- arena_nactive_sub(arena, run_pages);
+arena_bin_slabs_full_insert(arena_bin_t *bin, extent_t *slab)
+{
- /*
- * The run is dirty if the caller claims to have dirtied it, as well as
- * if it was already dirty before being allocated and the caller
- * doesn't claim to have cleaned it.
- */
- assert(arena_mapbits_dirty_get(chunk, run_ind) ==
- arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
- if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
- != 0)
- dirty = true;
- flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
- flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
-
- /* Mark pages as unallocated in the chunk map. */
- if (dirty || decommitted) {
- size_t flags = flag_dirty | flag_decommitted;
- arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
- arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
- flags);
- } else {
- arena_mapbits_unallocated_set(chunk, run_ind, size,
- arena_mapbits_unzeroed_get(chunk, run_ind));
- arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
- arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
- }
+ assert(extent_slab_data_get(slab)->nfree == 0);
+ extent_ring_insert(&bin->slabs_full, slab);
+}
- arena_run_coalesce(arena, extent, &size, &run_ind, &run_pages,
- flag_dirty, flag_decommitted);
-
- /* Insert into runs_avail, now that coalescing is complete. */
- assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
- arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
- assert(arena_mapbits_dirty_get(chunk, run_ind) ==
- arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
- assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
- arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
- arena_avail_insert(arena, extent, run_ind, run_pages);
-
- if (dirty)
- arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
-
- /* Deallocate chunk if it is now completely unused. */
- if (size == arena_maxrun) {
- assert(run_ind == map_bias);
- assert(run_pages == (arena_maxrun >> LG_PAGE));
- arena_chunk_dalloc(tsdn, arena, extent);
- }
+static void
+arena_bin_slabs_full_remove(extent_t *slab)
+{
- /*
- * It is okay to do dirty page processing here even if the chunk was
- * deallocated above, since in that case it is the spare. Waiting
- * until after possible chunk deallocation to do dirty processing
- * allows for an old spare to be fully deallocated, thus decreasing the
- * chances of spuriously crossing the dirty page purging threshold.
- */
- if (dirty)
- arena_maybe_purge(tsdn, arena);
+ extent_ring_remove(slab);
}
-static void
-arena_bin_runs_insert(arena_bin_t *bin, extent_t *extent, arena_run_t *run)
+static extent_t *
+arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ const arena_bin_info_t *bin_info)
{
- arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(extent, run);
+ extent_t *slab;
+ bool zero, commit;
+
+ zero = false;
+ commit = true;
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ slab = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL,
+ bin_info->slab_size, 0, PAGE, &zero, &commit, true);
+ malloc_mutex_lock(tsdn, &arena->lock);
- arena_run_heap_insert(&bin->runs, miscelm);
+ return (slab);
}
-static arena_run_t *
-arena_bin_nonfull_run_tryget(arena_bin_t *bin)
+static extent_t *
+arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
+ const arena_bin_info_t *bin_info)
{
- arena_chunk_map_misc_t *miscelm;
+ extent_t *slab;
+ arena_slab_data_t *slab_data;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ bool zero;
+
+ zero = false;
+ slab = arena_chunk_cache_alloc_locked(tsdn, arena, &chunk_hooks, NULL,
+ bin_info->slab_size, 0, PAGE, &zero, true);
+ if (slab == NULL) {
+ slab = arena_slab_alloc_hard(tsdn, arena, &chunk_hooks,
+ bin_info);
+ if (slab == NULL)
+ return (NULL);
+ }
+ assert(extent_slab_get(slab));
+
+ arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
+
+ /* Initialize slab internals. */
+ slab_data = extent_slab_data_get(slab);
+ slab_data->binind = binind;
+ slab_data->nfree = bin_info->nregs;
+ bitmap_init(slab_data->bitmap, &bin_info->bitmap_info);
- miscelm = arena_run_heap_remove_first(&bin->runs);
- if (miscelm == NULL)
- return (NULL);
if (config_stats)
- bin->stats.reruns++;
+ arena->stats.mapped += extent_size_get(slab);
- return (&miscelm->run);
+ return (slab);
}
-static arena_run_t *
-arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
+static extent_t *
+arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
+ szind_t binind)
{
- arena_run_t *run;
- szind_t binind;
+ extent_t *slab;
const arena_bin_info_t *bin_info;
- /* Look for a usable run. */
- run = arena_bin_nonfull_run_tryget(bin);
- if (run != NULL)
- return (run);
- /* No existing runs have any space available. */
+ /* Look for a usable slab. */
+ slab = arena_bin_slabs_nonfull_tryget(bin);
+ if (slab != NULL)
+ return (slab);
+ /* No existing slabs have any space available. */
- binind = arena_bin_index(arena, bin);
bin_info = &arena_bin_info[binind];
- /* Allocate a new run. */
+ /* Allocate a new slab. */
malloc_mutex_unlock(tsdn, &bin->lock);
/******************************/
malloc_mutex_lock(tsdn, &arena->lock);
- run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind);
- if (run != NULL) {
- /* Initialize run internals. */
- run->binind = binind;
- run->nfree = bin_info->nregs;
- bitmap_init(run->bitmap, &bin_info->bitmap_info);
- }
+ slab = arena_slab_alloc(tsdn, arena, binind, bin_info);
malloc_mutex_unlock(tsdn, &arena->lock);
/********************************/
malloc_mutex_lock(tsdn, &bin->lock);
- if (run != NULL) {
+ if (slab != NULL) {
if (config_stats) {
- bin->stats.nruns++;
- bin->stats.curruns++;
+ bin->stats.nslabs++;
+ bin->stats.curslabs++;
}
- return (run);
+ return (slab);
}
/*
- * arena_run_alloc_small() failed, but another thread may have made
+ * arena_slab_alloc() failed, but another thread may have made
* sufficient memory available while this one dropped bin->lock above,
* so search one more time.
*/
- run = arena_bin_nonfull_run_tryget(bin);
- if (run != NULL)
- return (run);
+ slab = arena_bin_slabs_nonfull_tryget(bin);
+ if (slab != NULL)
+ return (slab);
return (NULL);
}
-/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
+/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
static void *
-arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
+arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
+ szind_t binind)
{
- szind_t binind;
const arena_bin_info_t *bin_info;
- arena_run_t *run;
+ extent_t *slab;
+
- binind = arena_bin_index(arena, bin);
bin_info = &arena_bin_info[binind];
- bin->runcur = NULL;
- run = arena_bin_nonfull_run_get(tsdn, arena, bin);
- if (bin->runcur != NULL && bin->runcur->nfree > 0) {
+ if (bin->slabcur != NULL) {
+ arena_bin_slabs_full_insert(bin, bin->slabcur);
+ bin->slabcur = NULL;
+ }
+ slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind);
+ if (bin->slabcur != NULL) {
/*
- * Another thread updated runcur while this one ran without the
- * bin lock in arena_bin_nonfull_run_get().
+ * Another thread updated slabcur while this one ran without the
+ * bin lock in arena_bin_nonfull_slab_get().
*/
- void *ret;
- assert(bin->runcur->nfree > 0);
- ret = arena_run_reg_alloc(tsdn, bin->runcur, bin_info);
- if (run != NULL) {
- extent_t *extent;
- arena_chunk_t *chunk;
-
- /*
- * arena_run_alloc_small() may have allocated run, or
- * it may have pulled run from the bin's run tree.
- * Therefore it is unsafe to make any assumptions about
- * how run has previously been used, and
- * arena_bin_lower_run() must be called, as if a region
- * were just deallocated from the run.
- */
- extent = iealloc(tsdn, run);
- chunk = (arena_chunk_t *)extent_base_get(extent);
- if (run->nfree == bin_info->nregs) {
- arena_dalloc_bin_run(tsdn, arena, chunk, extent,
- run, bin);
- } else {
- arena_bin_lower_run(tsdn, arena, extent, run,
- bin);
+ if (extent_slab_data_get(bin->slabcur)->nfree > 0) {
+ void *ret = arena_slab_reg_alloc(tsdn, bin->slabcur,
+ bin_info);
+ if (slab != NULL) {
+ /*
+ * arena_slab_alloc() may have allocated slab,
+ * or it may have been pulled from
+ * slabs_nonfull. Therefore it is unsafe to
+ * make any assumptions about how slab has
+ * previously been used, and
+ * arena_bin_lower_slab() must be called, as if
+ * a region were just deallocated from the slab.
+ */
+ if (extent_slab_data_get(slab)->nfree ==
+ bin_info->nregs) {
+ arena_dalloc_bin_slab(tsdn, arena, slab,
+ bin);
+ } else {
+ arena_bin_lower_slab(tsdn, arena, slab,
+ bin);
+ }
}
+ return (ret);
}
- return (ret);
+
+ arena_bin_slabs_full_insert(bin, bin->slabcur);
+ bin->slabcur = NULL;
}
- if (run == NULL)
+ if (slab == NULL)
return (NULL);
+ bin->slabcur = slab;
- bin->runcur = run;
-
- assert(bin->runcur->nfree > 0);
+ assert(extent_slab_data_get(bin->slabcur)->nfree > 0);
- return (arena_run_reg_alloc(tsdn, bin->runcur, bin_info));
+ return (arena_slab_reg_alloc(tsdn, slab, bin_info));
}
void
@@ -2102,13 +1152,14 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
malloc_mutex_lock(tsdn, &bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
tbin->lg_fill_div); i < nfill; i++) {
- arena_run_t *run;
+ extent_t *slab;
void *ptr;
- if ((run = bin->runcur) != NULL && run->nfree > 0) {
- ptr = arena_run_reg_alloc(tsdn, run,
+ if ((slab = bin->slabcur) != NULL &&
+ extent_slab_data_get(slab)->nfree > 0) {
+ ptr = arena_slab_reg_alloc(tsdn, slab,
&arena_bin_info[binind]);
} else
- ptr = arena_bin_malloc_hard(tsdn, arena, bin);
+ ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind);
if (ptr == NULL) {
/*
* OOM. tbin->avail isn't yet filled down to its first
@@ -2171,17 +1222,18 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
void *ret;
arena_bin_t *bin;
size_t usize;
- arena_run_t *run;
+ extent_t *slab;
assert(binind < NBINS);
bin = &arena->bins[binind];
usize = index2size(binind);
malloc_mutex_lock(tsdn, &bin->lock);
- if ((run = bin->runcur) != NULL && run->nfree > 0)
- ret = arena_run_reg_alloc(tsdn, run, &arena_bin_info[binind]);
+ if ((slab = bin->slabcur) != NULL && extent_slab_data_get(slab)->nfree >
+ 0)
+ ret = arena_slab_reg_alloc(tsdn, slab, &arena_bin_info[binind]);
else
- ret = arena_bin_malloc_hard(tsdn, arena, bin);
+ ret = arena_bin_malloc_hard(tsdn, arena, bin, binind);
if (ret == NULL) {
malloc_mutex_unlock(tsdn, &bin->lock);
@@ -2242,7 +1294,7 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
&& (usize & PAGE_MASK) == 0))) {
- /* Small; alignment doesn't require special run placement. */
+ /* Small; alignment doesn't require special slab placement. */
ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
tcache, true);
} else {
@@ -2315,97 +1367,92 @@ arena_dalloc_promoted(tsdn_t *tsdn, extent_t *extent, void *ptr,
}
static void
-arena_dissociate_bin_run(extent_t *extent, arena_run_t *run, arena_bin_t *bin)
+arena_dissociate_bin_slab(extent_t *slab, arena_bin_t *bin)
{
- /* Dissociate run from bin. */
- if (run == bin->runcur)
- bin->runcur = NULL;
+ /* Dissociate slab from bin. */
+ if (slab == bin->slabcur)
+ bin->slabcur = NULL;
else {
- szind_t binind = arena_bin_index(extent_arena_get(extent), bin);
+ szind_t binind = extent_slab_data_get(slab)->binind;
const arena_bin_info_t *bin_info = &arena_bin_info[binind];
/*
* The following block's conditional is necessary because if the
- * run only contains one region, then it never gets inserted
- * into the non-full runs tree.
+ * slab only contains one region, then it never gets inserted
+ * into the non-full slabs heap.
*/
- if (bin_info->nregs != 1) {
- arena_chunk_map_misc_t *miscelm =
- arena_run_to_miscelm(extent, run);
-
- arena_run_heap_remove(&bin->runs, miscelm);
- }
+ if (bin_info->nregs == 1)
+ arena_bin_slabs_full_remove(slab);
+ else
+ arena_bin_slabs_nonfull_remove(bin, slab);
}
}
static void
-arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- extent_t *extent, arena_run_t *run, arena_bin_t *bin)
+arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+ arena_bin_t *bin)
{
- assert(run != bin->runcur);
+ assert(slab != bin->slabcur);
malloc_mutex_unlock(tsdn, &bin->lock);
/******************************/
malloc_mutex_lock(tsdn, &arena->lock);
- arena_run_dalloc(tsdn, arena, extent, run, true, false, false);
+ arena_slab_dalloc(tsdn, arena, slab);
malloc_mutex_unlock(tsdn, &arena->lock);
/****************************/
malloc_mutex_lock(tsdn, &bin->lock);
if (config_stats)
- bin->stats.curruns--;
+ bin->stats.curslabs--;
}
static void
-arena_bin_lower_run(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
- arena_run_t *run, arena_bin_t *bin)
+arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+ arena_bin_t *bin)
{
+ assert(extent_slab_data_get(slab)->nfree > 0);
+
/*
- * Make sure that if bin->runcur is non-NULL, it refers to the lowest
- * non-full run. It is okay to NULL runcur out rather than proactively
- * keeping it pointing at the lowest non-full run.
+ * Make sure that if bin->slabcur is non-NULL, it refers to the lowest
+ * non-full slab. It is okay to NULL slabcur out rather than
+ * proactively keeping it pointing at the lowest non-full slab.
*/
- if ((uintptr_t)run < (uintptr_t)bin->runcur) {
- /* Switch runcur. */
- if (bin->runcur->nfree > 0) {
- arena_bin_runs_insert(bin, iealloc(tsdn, bin->runcur),
- bin->runcur);
- }
- bin->runcur = run;
+ if (bin->slabcur != NULL && (uintptr_t)extent_addr_get(slab) <
+ (uintptr_t)extent_addr_get(bin->slabcur)) {
+ /* Switch slabcur. */
+ if (extent_slab_data_get(bin->slabcur)->nfree > 0)
+ arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
+ else
+ arena_bin_slabs_full_insert(bin, bin->slabcur);
+ bin->slabcur = slab;
if (config_stats)
- bin->stats.reruns++;
+ bin->stats.reslabs++;
} else
- arena_bin_runs_insert(bin, extent, run);
+ arena_bin_slabs_nonfull_insert(bin, slab);
}
static void
-arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- extent_t *extent, void *ptr, arena_chunk_map_bits_t *bitselm, bool junked)
+arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+ void *ptr, bool junked)
{
- size_t pageind, rpages_ind;
- arena_run_t *run;
- arena_bin_t *bin;
- const arena_bin_info_t *bin_info;
- szind_t binind;
-
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
- run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
- binind = run->binind;
- bin = &arena->bins[binind];
- bin_info = &arena_bin_info[binind];
+ arena_slab_data_t *slab_data = extent_slab_data_get(slab);
+ szind_t binind = slab_data->binind;
+ arena_bin_t *bin = &arena->bins[binind];
+ const arena_bin_info_t *bin_info = &arena_bin_info[binind];
if (!junked && config_fill && unlikely(opt_junk_free))
arena_dalloc_junk_small(ptr, bin_info);
- arena_run_reg_dalloc(tsdn, run, extent, ptr);
- if (run->nfree == bin_info->nregs) {
- arena_dissociate_bin_run(extent, run, bin);
- arena_dalloc_bin_run(tsdn, arena, chunk, extent, run, bin);
- } else if (run->nfree == 1 && run != bin->runcur)
- arena_bin_lower_run(tsdn, arena, extent, run, bin);
+ arena_slab_reg_dalloc(tsdn, slab, slab_data, ptr);
+ if (slab_data->nfree == bin_info->nregs) {
+ arena_dissociate_bin_slab(slab, bin);
+ arena_dalloc_bin_slab(tsdn, arena, slab, bin);
+ } else if (slab_data->nfree == 1 && slab != bin->slabcur) {
+ arena_bin_slabs_full_remove(slab);
+ arena_bin_lower_slab(tsdn, arena, slab, bin);
+ }
if (config_stats) {
bin->stats.ndalloc++;
@@ -2414,45 +1461,28 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
}
void
-arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk, extent_t *extent, void *ptr,
- arena_chunk_map_bits_t *bitselm)
+arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+ void *ptr)
{
- arena_dalloc_bin_locked_impl(tsdn, arena, chunk, extent, ptr, bitselm,
- true);
+ arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true);
}
static void
-arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- extent_t *extent, void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm)
+arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr)
{
- arena_run_t *run;
- arena_bin_t *bin;
- size_t rpages_ind;
+ arena_bin_t *bin = &arena->bins[extent_slab_data_get(extent)->binind];
- rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
- run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
- bin = &arena->bins[run->binind];
malloc_mutex_lock(tsdn, &bin->lock);
- arena_dalloc_bin_locked_impl(tsdn, arena, chunk, extent, ptr, bitselm,
- false);
+ arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false);
malloc_mutex_unlock(tsdn, &bin->lock);
}
void
-arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- extent_t *extent, void *ptr, size_t pageind)
+arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr)
{
- arena_chunk_map_bits_t *bitselm;
- if (config_debug) {
- /* arena_ptr_small_binind_get() does extra sanity checking. */
- assert(arena_ptr_small_binind_get(tsdn, ptr,
- arena_mapbits_get(chunk, pageind)) != BININD_INVALID);
- }
- bitselm = arena_bitselm_get_mutable(chunk, pageind);
- arena_dalloc_bin(tsdn, arena, chunk, extent, ptr, pageind, bitselm);
+ arena_dalloc_bin(tsdn, arena, extent, ptr);
arena_decay_tick(tsdn, arena);
}
@@ -2682,9 +1712,9 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
bstats[i].nfills += bin->stats.nfills;
bstats[i].nflushes += bin->stats.nflushes;
}
- bstats[i].nruns += bin->stats.nruns;
- bstats[i].reruns += bin->stats.reruns;
- bstats[i].curruns += bin->stats.curruns;
+ bstats[i].nslabs += bin->stats.nslabs;
+ bstats[i].reslabs += bin->stats.reslabs;
+ bstats[i].curslabs += bin->stats.curslabs;
malloc_mutex_unlock(tsdn, &bin->lock);
}
}
@@ -2745,17 +1775,13 @@ arena_new(tsdn_t *tsdn, unsigned ind)
arena->dss_prec = chunk_dss_prec_get(tsdn);
- ql_new(&arena->achunks);
-
- arena->spare = NULL;
-
arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
arena->purging = false;
arena->nactive = 0;
arena->ndirty = 0;
- qr_new(&arena->runs_dirty, rd_link);
- qr_new(&arena->chunks_cache, cc_link);
+ extent_init(&arena->extents_dirty, arena, NULL, 0, 0, false, false,
+ false, false);
if (opt_purge == purge_mode_decay)
arena_decay_init(arena, arena_decay_time_default_get());
@@ -2786,52 +1812,23 @@ arena_new(tsdn_t *tsdn, unsigned ind)
if (malloc_mutex_init(&bin->lock, "arena_bin",
WITNESS_RANK_ARENA_BIN))
return (NULL);
- bin->runcur = NULL;
- arena_run_heap_new(&bin->runs);
+ bin->slabcur = NULL;
+ extent_heap_new(&bin->slabs_nonfull);
+ extent_init(&bin->slabs_full, arena, NULL, 0, 0, false, false,
+ false, false);
if (config_stats)
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
}
- for (i = 0; i < NPSIZES; i++)
- arena_run_heap_new(&arena->runs_avail[i]);
-
return (arena);
}
void
arena_boot(void)
{
- unsigned i;
arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
arena_decay_time_default_set(opt_decay_time);
-
- /*
- * Compute the header size such that it is large enough to contain the
- * page map. The page map is biased to omit entries for the header
- * itself, so some iteration is necessary to compute the map bias.
- *
- * 1) Compute safe header_size and map_bias values that include enough
- * space for an unbiased page map.
- * 2) Refine map_bias based on (1) to omit the header pages in the page
- * map. The resulting map_bias may be one too small.
- * 3) Refine map_bias based on (2). The result will be >= the result
- * from (2), and will always be correct.
- */
- map_bias = 0;
- for (i = 0; i < 3; i++) {
- size_t header_size = offsetof(arena_chunk_t, map_bits) +
- ((sizeof(arena_chunk_map_bits_t) +
- sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
- map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
- }
- assert(map_bias > 0);
-
- map_misc_offset = offsetof(arena_chunk_t, map_bits) +
- sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
-
- arena_maxrun = chunksize - (map_bias << LG_PAGE);
- assert(arena_maxrun > 0);
}
void