summaryrefslogtreecommitdiffstats
path: root/src/extent.c
diff options
context:
space:
mode:
authorJoerg Koenig <jck@techsat.com>2018-01-19 10:53:15 (GMT)
committerJoerg Koenig <jck@techsat.com>2018-01-19 10:53:15 (GMT)
commitaa502047980c13cdbe5a1adb7f024199c367254d (patch)
tree8cc6a214bde51a42beb77ac92fe0d00dd44c0c90 /src/extent.c
parent3f5049340e66c6929c3270f7359617f62e053b11 (diff)
parentf78d4ca3fbff6cab0c704c787706a53ddafcbe13 (diff)
downloadjemalloc-master.zip
jemalloc-master.tar.gz
jemalloc-master.tar.bz2
Merge https://github.com/jemalloc/jemallocHEADmaster
Change-Id: I44881ca21f4710f7ad5154a45c4a7204ae71c84c
Diffstat (limited to 'src/extent.c')
-rw-r--r--src/extent.c470
1 files changed, 345 insertions, 125 deletions
diff --git a/src/extent.c b/src/extent.c
index fa45c84..517780e 100644
--- a/src/extent.c
+++ b/src/extent.c
@@ -17,6 +17,8 @@ rtree_t extents_rtree;
/* Keyed by the address of the extent_t being protected. */
mutex_pool_t extent_mutex_pool;
+size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
+
static const bitmap_info_t extents_bitmap_info =
BITMAP_INFO_INITIALIZER(NPSIZES+1);
@@ -117,7 +119,7 @@ static void extent_record(tsdn_t *tsdn, arena_t *arena,
/******************************************************************************/
-rb_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, rb_link,
+ph_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, ph_link,
extent_esnead_comp)
typedef enum {
@@ -361,6 +363,43 @@ extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
}
+/*
+ * Find an extent with size [min_size, max_size) to satisfy the alignment
+ * requirement. For each size, try only the first extent in the heap.
+ */
+static extent_t *
+extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
+ size_t alignment) {
+ pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size));
+ pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size));
+
+ for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
+ &extents_bitmap_info, (size_t)pind); i < pind_max; i =
+ (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
+ (size_t)i+1)) {
+ assert(i < NPSIZES);
+ assert(!extent_heap_empty(&extents->heaps[i]));
+ extent_t *extent = extent_heap_first(&extents->heaps[i]);
+ uintptr_t base = (uintptr_t)extent_base_get(extent);
+ size_t candidate_size = extent_size_get(extent);
+ assert(candidate_size >= min_size);
+
+ uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
+ PAGE_CEILING(alignment));
+ if (base > next_align || base + candidate_size <= next_align) {
+ /* Overflow or not crossing the next alignment. */
+ continue;
+ }
+
+ size_t leadsize = next_align - base;
+ if (candidate_size - leadsize >= min_size) {
+ return extent;
+ }
+ }
+
+ return NULL;
+}
+
/* Do any-best-fit extent selection, i.e. select any extent that best fits. */
static extent_t *
extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
@@ -369,8 +408,15 @@ extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
(size_t)pind);
if (i < NPSIZES+1) {
+ /*
+ * In order to reduce fragmentation, avoid reusing and splitting
+ * large extents for much smaller sizes.
+ */
+ if ((sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
+ return NULL;
+ }
assert(!extent_heap_empty(&extents->heaps[i]));
- extent_t *extent = extent_heap_any(&extents->heaps[i]);
+ extent_t *extent = extent_heap_first(&extents->heaps[i]);
assert(extent_size_get(extent) >= size);
return extent;
}
@@ -415,12 +461,30 @@ extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
*/
static extent_t *
extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
- size_t size) {
+ size_t esize, size_t alignment) {
malloc_mutex_assert_owner(tsdn, &extents->mtx);
- return extents->delay_coalesce ? extents_best_fit_locked(tsdn, arena,
- extents, size) : extents_first_fit_locked(tsdn, arena, extents,
- size);
+ size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
+ /* Beware size_t wrap-around. */
+ if (max_size < esize) {
+ return NULL;
+ }
+
+ extent_t *extent = extents->delay_coalesce ?
+ extents_best_fit_locked(tsdn, arena, extents, max_size) :
+ extents_first_fit_locked(tsdn, arena, extents, max_size);
+
+ if (alignment > PAGE && extent == NULL) {
+ /*
+ * max_size guarantees the alignment requirement but is rather
+ * pessimistic. Next we try to satisfy the aligned allocation
+ * with sizes in [esize, max_size).
+ */
+ extent = extents_fit_alignment(extents, esize, max_size,
+ alignment);
+ }
+
+ return extent;
}
static bool
@@ -449,8 +513,10 @@ extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
- return extent_recycle(tsdn, arena, r_extent_hooks, extents, new_addr,
- size, pad, alignment, slab, szind, zero, commit, false);
+ extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
+ new_addr, size, pad, alignment, slab, szind, zero, commit, false);
+ assert(extent == NULL || extent_dumpable_get(extent));
+ return extent;
}
void
@@ -458,6 +524,7 @@ extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, extent_t *extent) {
assert(extent_base_get(extent) != NULL);
assert(extent_size_get(extent) != 0);
+ assert(extent_dumpable_get(extent));
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
@@ -487,10 +554,9 @@ extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
goto label_return;
}
/* Check the eviction limit. */
- size_t npages = extent_size_get(extent) >> LG_PAGE;
size_t extents_npages = atomic_load_zu(&extents->npages,
ATOMIC_RELAXED);
- if (extents_npages - npages < npages_min) {
+ if (extents_npages <= npages_min) {
extent = NULL;
goto label_return;
}
@@ -723,6 +789,13 @@ extent_reregister(tsdn_t *tsdn, extent_t *extent) {
assert(!err);
}
+/*
+ * Removes all pointers to the given extent from the global rtree indices for
+ * its interior. This is relevant for slab extents, for which we need to do
+ * metadata lookups at places other than the head of the extent. We deregister
+ * on the interior, then, when an extent moves from being an active slab to an
+ * inactive state.
+ */
static void
extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
extent_t *extent) {
@@ -737,8 +810,11 @@ extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
}
}
+/*
+ * Removes all pointers to the given extent from the global rtree.
+ */
static void
-extent_deregister(tsdn_t *tsdn, extent_t *extent) {
+extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_leaf_elm_t *elm_a, *elm_b;
@@ -755,16 +831,30 @@ extent_deregister(tsdn_t *tsdn, extent_t *extent) {
extent_unlock(tsdn, extent);
- if (config_prof) {
+ if (config_prof && gdump) {
extent_gdump_sub(tsdn, extent);
}
}
+static void
+extent_deregister(tsdn_t *tsdn, extent_t *extent) {
+ extent_deregister_impl(tsdn, extent, true);
+}
+
+static void
+extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
+ extent_deregister_impl(tsdn, extent, false);
+}
+
+/*
+ * Tries to find and remove an extent from extents that can be used for the
+ * given allocation request.
+ */
static extent_t *
extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
- bool *zero, bool *commit, bool growing_retained) {
+ bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
assert(alignment > 0);
@@ -786,11 +876,6 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
}
size_t esize = size + pad;
- size_t alloc_size = esize + PAGE_CEILING(alignment) - PAGE;
- /* Beware size_t wrap-around. */
- if (alloc_size < esize) {
- return NULL;
- }
malloc_mutex_lock(tsdn, &extents->mtx);
extent_hooks_assure_initialized(arena, r_extent_hooks);
extent_t *extent;
@@ -812,7 +897,8 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
extent_unlock(tsdn, unlock_extent);
}
} else {
- extent = extents_fit_locked(tsdn, arena, extents, alloc_size);
+ extent = extents_fit_locked(tsdn, arena, extents, esize,
+ alignment);
}
if (extent == NULL) {
malloc_mutex_unlock(tsdn, &extents->mtx);
@@ -822,76 +908,161 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
extent_activate_locked(tsdn, arena, extents, extent, false);
malloc_mutex_unlock(tsdn, &extents->mtx);
- if (extent_zeroed_get(extent)) {
- *zero = true;
- }
- if (extent_committed_get(extent)) {
- *commit = true;
- }
-
return extent;
}
-static extent_t *
-extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
+/*
+ * Given an allocation request and an extent guaranteed to be able to satisfy
+ * it, this splits off lead and trail extents, leaving extent pointing to an
+ * extent satisfying the allocation.
+ * This function doesn't put lead or trail into any extents_t; it's the caller's
+ * job to ensure that they can be reused.
+ */
+typedef enum {
+ /*
+ * Split successfully. lead, extent, and trail, are modified to extents
+ * describing the ranges before, in, and after the given allocation.
+ */
+ extent_split_interior_ok,
+ /*
+ * The extent can't satisfy the given allocation request. None of the
+ * input extent_t *s are touched.
+ */
+ extent_split_interior_cant_alloc,
+ /*
+ * In a potentially invalid state. Must leak (if *to_leak is non-NULL),
+ * and salvage what's still salvageable (if *to_salvage is non-NULL).
+ * None of lead, extent, or trail are valid.
+ */
+ extent_split_interior_error
+} extent_split_interior_result_t;
+
+static extent_split_interior_result_t
+extent_split_interior(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx,
+ /* The result of splitting, in case of success. */
+ extent_t **extent, extent_t **lead, extent_t **trail,
+ /* The mess to clean up, in case of error. */
+ extent_t **to_leak, extent_t **to_salvage,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
- szind_t szind, extent_t *extent, bool growing_retained) {
+ szind_t szind, bool growing_retained) {
size_t esize = size + pad;
- size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(extent),
- PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(extent);
+ size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
+ PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
assert(new_addr == NULL || leadsize == 0);
- assert(extent_size_get(extent) >= leadsize + esize);
- size_t trailsize = extent_size_get(extent) - leadsize - esize;
+ if (extent_size_get(*extent) < leadsize + esize) {
+ return extent_split_interior_cant_alloc;
+ }
+ size_t trailsize = extent_size_get(*extent) - leadsize - esize;
+
+ *lead = NULL;
+ *trail = NULL;
+ *to_leak = NULL;
+ *to_salvage = NULL;
/* Split the lead. */
if (leadsize != 0) {
- extent_t *lead = extent;
- extent = extent_split_impl(tsdn, arena, r_extent_hooks,
- lead, leadsize, NSIZES, false, esize + trailsize, szind,
+ *lead = *extent;
+ *extent = extent_split_impl(tsdn, arena, r_extent_hooks,
+ *lead, leadsize, NSIZES, false, esize + trailsize, szind,
slab, growing_retained);
- if (extent == NULL) {
- extent_deregister(tsdn, lead);
- extents_leak(tsdn, arena, r_extent_hooks, extents,
- lead, growing_retained);
- return NULL;
+ if (*extent == NULL) {
+ *to_leak = *lead;
+ *lead = NULL;
+ return extent_split_interior_error;
}
- extent_deactivate(tsdn, arena, extents, lead, false);
}
/* Split the trail. */
if (trailsize != 0) {
- extent_t *trail = extent_split_impl(tsdn, arena,
- r_extent_hooks, extent, esize, szind, slab, trailsize,
- NSIZES, false, growing_retained);
- if (trail == NULL) {
- extent_deregister(tsdn, extent);
- extents_leak(tsdn, arena, r_extent_hooks, extents,
- extent, growing_retained);
- return NULL;
+ *trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
+ esize, szind, slab, trailsize, NSIZES, false,
+ growing_retained);
+ if (*trail == NULL) {
+ *to_leak = *extent;
+ *to_salvage = *lead;
+ *lead = NULL;
+ *extent = NULL;
+ return extent_split_interior_error;
}
- extent_deactivate(tsdn, arena, extents, trail, false);
- } else if (leadsize == 0) {
+ }
+
+ if (leadsize == 0 && trailsize == 0) {
/*
* Splitting causes szind to be set as a side effect, but no
* splitting occurred.
*/
- extent_szind_set(extent, szind);
+ extent_szind_set(*extent, szind);
if (szind != NSIZES) {
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_addr_get(extent), szind, slab);
- if (slab && extent_size_get(extent) > PAGE) {
+ (uintptr_t)extent_addr_get(*extent), szind, slab);
+ if (slab && extent_size_get(*extent) > PAGE) {
rtree_szind_slab_update(tsdn, &extents_rtree,
rtree_ctx,
- (uintptr_t)extent_past_get(extent) -
+ (uintptr_t)extent_past_get(*extent) -
(uintptr_t)PAGE, szind, slab);
}
}
}
- return extent;
+ return extent_split_interior_ok;
}
+/*
+ * This fulfills the indicated allocation request out of the given extent (which
+ * the caller should have ensured was big enough). If there's any unused space
+ * before or after the resulting allocation, that space is given its own extent
+ * and put back into extents.
+ */
+static extent_t *
+extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
+ void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
+ szind_t szind, extent_t *extent, bool growing_retained) {
+ extent_t *lead;
+ extent_t *trail;
+ extent_t *to_leak;
+ extent_t *to_salvage;
+
+ extent_split_interior_result_t result = extent_split_interior(
+ tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
+ &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind,
+ growing_retained);
+
+ if (result == extent_split_interior_ok) {
+ if (lead != NULL) {
+ extent_deactivate(tsdn, arena, extents, lead, false);
+ }
+ if (trail != NULL) {
+ extent_deactivate(tsdn, arena, extents, trail, false);
+ }
+ return extent;
+ } else {
+ /*
+ * We should have picked an extent that was large enough to
+ * fulfill our allocation request.
+ */
+ assert(result == extent_split_interior_error);
+ if (to_salvage != NULL) {
+ extent_deregister(tsdn, to_salvage);
+ }
+ if (to_leak != NULL) {
+ void *leak = extent_base_get(to_leak);
+ extent_deregister_no_gdump_sub(tsdn, to_leak);
+ extents_leak(tsdn, arena, r_extent_hooks, extents,
+ to_leak, growing_retained);
+ assert(extent_lock_from_addr(tsdn, rtree_ctx, leak)
+ == NULL);
+ }
+ return NULL;
+ }
+ unreachable();
+}
+
+/*
+ * Tries to satisfy the given allocation request by reusing one of the extents
+ * in the given extents_t.
+ */
static extent_t *
extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, void *new_addr, size_t size, size_t pad,
@@ -906,16 +1077,12 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- bool committed = false;
extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
- rtree_ctx, extents, new_addr, size, pad, alignment, slab, zero,
- &committed, growing_retained);
+ rtree_ctx, extents, new_addr, size, pad, alignment, slab,
+ growing_retained);
if (extent == NULL) {
return NULL;
}
- if (committed) {
- *commit = true;
- }
extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
extents, new_addr, size, pad, alignment, slab, szind, extent,
@@ -934,6 +1101,13 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extent_zeroed_set(extent, true);
}
+ if (extent_committed_get(extent)) {
+ *commit = true;
+ }
+ if (extent_zeroed_get(extent)) {
+ *zero = true;
+ }
+
if (pad != 0) {
extent_addr_randomize(tsdn, extent, alignment);
}
@@ -1028,7 +1202,18 @@ extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
static void
extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
- pre_reentrancy(tsd, arena);
+ if (arena == arena_get(tsd_tsdn(tsd), 0, false)) {
+ /*
+ * The only legitimate case of customized extent hooks for a0 is
+ * hooks with no allocation activities. One such example is to
+ * place metadata on pre-allocated resources such as huge pages.
+ * In that case, rely on reentrancy_level checks to catch
+ * infinite recursions.
+ */
+ pre_reentrancy(tsd, NULL);
+ } else {
+ pre_reentrancy(tsd, arena);
+ }
}
static void
@@ -1094,21 +1279,18 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
extent_init(extent, arena, ptr, alloc_size, false, NSIZES,
arena_extent_sn_next(arena), extent_state_active, zeroed,
- committed);
+ committed, true);
if (ptr == NULL) {
extent_dalloc(tsdn, arena, extent);
goto label_err;
}
+
if (extent_register_no_gdump_add(tsdn, extent)) {
extents_leak(tsdn, arena, r_extent_hooks,
&arena->extents_retained, extent, true);
goto label_err;
}
- size_t leadsize = ALIGNMENT_CEILING((uintptr_t)ptr,
- PAGE_CEILING(alignment)) - (uintptr_t)ptr;
- assert(alloc_size >= leadsize + esize);
- size_t trailsize = alloc_size - leadsize - esize;
if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
*zero = true;
}
@@ -1116,54 +1298,48 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
*commit = true;
}
- /* Split the lead. */
- if (leadsize != 0) {
- extent_t *lead = extent;
- extent = extent_split_impl(tsdn, arena, r_extent_hooks, lead,
- leadsize, NSIZES, false, esize + trailsize, szind, slab,
- true);
- if (extent == NULL) {
- extent_deregister(tsdn, lead);
- extents_leak(tsdn, arena, r_extent_hooks,
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+
+ extent_t *lead;
+ extent_t *trail;
+ extent_t *to_leak;
+ extent_t *to_salvage;
+ extent_split_interior_result_t result = extent_split_interior(
+ tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
+ &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind,
+ true);
+
+ if (result == extent_split_interior_ok) {
+ if (lead != NULL) {
+ extent_record(tsdn, arena, r_extent_hooks,
&arena->extents_retained, lead, true);
- goto label_err;
}
- extent_record(tsdn, arena, r_extent_hooks,
- &arena->extents_retained, lead, true);
- }
-
- /* Split the trail. */
- if (trailsize != 0) {
- extent_t *trail = extent_split_impl(tsdn, arena, r_extent_hooks,
- extent, esize, szind, slab, trailsize, NSIZES, false, true);
- if (trail == NULL) {
- extent_deregister(tsdn, extent);
+ if (trail != NULL) {
+ extent_record(tsdn, arena, r_extent_hooks,
+ &arena->extents_retained, trail, true);
+ }
+ } else {
+ /*
+ * We should have allocated a sufficiently large extent; the
+ * cant_alloc case should not occur.
+ */
+ assert(result == extent_split_interior_error);
+ if (to_leak != NULL) {
+ extent_deregister_no_gdump_sub(tsdn, to_leak);
extents_leak(tsdn, arena, r_extent_hooks,
- &arena->extents_retained, extent, true);
+ &arena->extents_retained, to_leak, true);
goto label_err;
}
- extent_record(tsdn, arena, r_extent_hooks,
- &arena->extents_retained, trail, true);
- } else if (leadsize == 0) {
/*
- * Splitting causes szind to be set as a side effect, but no
- * splitting occurred.
+ * Note: we don't handle the non-NULL to_salvage case at all.
+ * This maintains the behavior that was present when the
+ * refactor pulling extent_split_interior into a helper function
+ * was added. I think this is actually a bug (we leak both the
+ * memory and the extent_t in that case), but since this code is
+ * getting deleted very shortly (in a subsequent commit),
+ * ensuring correctness down this path isn't worth the effort.
*/
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
- &rtree_ctx_fallback);
-
- extent_szind_set(extent, szind);
- if (szind != NSIZES) {
- rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_addr_get(extent), szind, slab);
- if (slab && extent_size_get(extent) > PAGE) {
- rtree_szind_slab_update(tsdn, &extents_rtree,
- rtree_ctx,
- (uintptr_t)extent_past_get(extent) -
- (uintptr_t)PAGE, szind, slab);
- }
- }
}
if (*commit && !extent_committed_get(extent)) {
@@ -1177,13 +1353,14 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
}
/*
- * Increment extent_grow_next if doing so wouldn't exceed the legal
+ * Increment extent_grow_next if doing so wouldn't exceed the allowed
* range.
*/
- if (arena->extent_grow_next + egn_skip + 1 < NPSIZES) {
+ if (arena->extent_grow_next + egn_skip + 1 <=
+ arena->retain_grow_limit) {
arena->extent_grow_next += egn_skip + 1;
} else {
- arena->extent_grow_next = NPSIZES - 1;
+ arena->extent_grow_next = arena->retain_grow_limit;
}
/* All opportunities for failure are past. */
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
@@ -1271,7 +1448,8 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
return NULL;
}
extent_init(extent, arena, addr, esize, slab, szind,
- arena_extent_sn_next(arena), extent_state_active, zero, commit);
+ arena_extent_sn_next(arena), extent_state_active, zero, commit,
+ true);
if (pad != 0) {
extent_addr_randomize(tsdn, extent, alignment);
}
@@ -1296,10 +1474,20 @@ extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
new_addr, size, pad, alignment, slab, szind, zero, commit);
if (extent == NULL) {
+ if (opt_retain && new_addr != NULL) {
+ /*
+ * When retain is enabled and new_addr is set, we do not
+ * attempt extent_alloc_wrapper_hard which does mmap
+ * that is very unlikely to succeed (unless it happens
+ * to be at the end).
+ */
+ return NULL;
+ }
extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
new_addr, size, pad, alignment, slab, szind, zero, commit);
}
+ assert(extent == NULL || extent_dumpable_get(extent));
return extent;
}
@@ -1329,13 +1517,12 @@ extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
bool growing_retained) {
assert(extent_can_coalesce(arena, extents, inner, outer));
- if (forward && extents->delay_coalesce) {
+ if (extents->delay_coalesce) {
/*
- * The extent that remains after coalescing must occupy the
- * outer extent's position in the LRU. For forward coalescing,
- * swap the inner extent into the LRU.
+ * Remove outer from the LRU list so that it won't be show up in
+ * decay through extents_evict.
*/
- extent_list_replace(&extents->lru, outer, inner);
+ extent_list_remove(&extents->lru, outer);
}
extent_activate_locked(tsdn, arena, extents, outer,
extents->delay_coalesce);
@@ -1345,9 +1532,16 @@ extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
forward ? inner : outer, forward ? outer : inner, growing_retained);
malloc_mutex_lock(tsdn, &extents->mtx);
+ if (!err && extents->delay_coalesce) {
+ if (forward) {
+ extent_list_prepend(&extents->lru, inner);
+ } else {
+ extent_list_prepend(&extents->lru, outer);
+ }
+ }
if (err) {
- if (forward && extents->delay_coalesce) {
- extent_list_replace(&extents->lru, inner, outer);
+ if (extents->delay_coalesce) {
+ extent_list_prepend(&extents->lru, outer);
}
extent_deactivate_locked(tsdn, arena, extents, outer,
extents->delay_coalesce);
@@ -1422,6 +1616,10 @@ extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
return extent;
}
+/*
+ * Does the metadata management portions of putting an unused extent into the
+ * given extents_t (coalesces, deregisters slab interiors, the heap operations).
+ */
static void
extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, extent_t *extent, bool growing_retained) {
@@ -1447,8 +1645,22 @@ extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
if (!extents->delay_coalesce) {
extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
rtree_ctx, extents, extent, NULL, growing_retained);
+ } else if (extent_size_get(extent) >= LARGE_MINCLASS) {
+ /* Always coalesce large extents eagerly. */
+ bool coalesced;
+ size_t prev_size;
+ do {
+ prev_size = extent_size_get(extent);
+ assert(extent_state_get(extent) == extent_state_active);
+ extent = extent_try_coalesce(tsdn, arena,
+ r_extent_hooks, rtree_ctx, extents, extent,
+ &coalesced, growing_retained);
+ if (coalesced) {
+ extent_list_remove(&extents->lru, extent);
+ }
+ } while (coalesced &&
+ extent_size_get(extent) >= prev_size + LARGE_MINCLASS);
}
-
extent_deactivate_locked(tsdn, arena, extents, extent, false);
malloc_mutex_unlock(tsdn, &extents->mtx);
@@ -1520,6 +1732,7 @@ extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
void
extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent) {
+ assert(extent_dumpable_get(extent));
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
@@ -1780,6 +1993,13 @@ extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
}
#endif
+/*
+ * Accepts the extent to split, and the characteristics of each side of the
+ * split. The 'a' parameters go with the 'lead' of the resulting pair of
+ * extents (the lower addressed portion of the split), and the 'b' parameters go
+ * with the trail (the higher addressed portion). This makes 'extent' the lead,
+ * and returns the trail (except in case of error).
+ */
static extent_t *
extent_split_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
@@ -1803,7 +2023,7 @@ extent_split_impl(tsdn_t *tsdn, arena_t *arena,
extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
extent_state_get(extent), extent_zeroed_get(extent),
- extent_committed_get(extent));
+ extent_committed_get(extent), extent_dumpable_get(extent));
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
@@ -1814,7 +2034,7 @@ extent_split_impl(tsdn_t *tsdn, arena_t *arena,
extent_init(&lead, arena, extent_addr_get(extent), size_a,
slab_a, szind_a, extent_sn_get(extent),
extent_state_get(extent), extent_zeroed_get(extent),
- extent_committed_get(extent));
+ extent_committed_get(extent), extent_dumpable_get(extent));
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
true, &lead_elm_a, &lead_elm_b);