summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/arena.c14
-rw-r--r--src/base.c35
-rw-r--r--src/chunk.c85
-rw-r--r--src/extent.c101
4 files changed, 158 insertions, 77 deletions
diff --git a/src/arena.c b/src/arena.c
index faf2349..720219d 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -3411,10 +3411,6 @@ arena_new(tsdn_t *tsdn, unsigned ind)
arena->nactive = 0;
arena->ndirty = 0;
- for (i = 0; i < sizeof(arena->runs_avail) / sizeof(arena_run_heap_t);
- i++)
- arena_run_heap_new(&arena->runs_avail[i]);
-
qr_new(&arena->runs_dirty, rd_link);
qr_new(&arena->chunks_cache, cc_link);
@@ -3426,8 +3422,11 @@ arena_new(tsdn_t *tsdn, unsigned ind)
WITNESS_RANK_ARENA_HUGE))
return (NULL);
- extent_tree_szad_new(&arena->chunks_szad_cached);
- extent_tree_szad_new(&arena->chunks_szad_retained);
+ for (i = 0; i < NPSIZES; i++) {
+ extent_heap_new(&arena->chunks_cached[i]);
+ extent_heap_new(&arena->chunks_retained[i]);
+ }
+
if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
WITNESS_RANK_ARENA_CHUNKS))
return (NULL);
@@ -3450,6 +3449,9 @@ arena_new(tsdn_t *tsdn, unsigned ind)
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
}
+ for (i = 0; i < NPSIZES; i++)
+ arena_run_heap_new(&arena->runs_avail[i]);
+
return (arena);
}
diff --git a/src/base.c b/src/base.c
index 2a6df4d..8816738 100644
--- a/src/base.c
+++ b/src/base.c
@@ -5,7 +5,7 @@
/* Data. */
static malloc_mutex_t base_mtx;
-static extent_tree_t base_avail_szad;
+static extent_heap_t base_avail[NSIZES];
static extent_t *base_extents;
static size_t base_allocated;
static size_t base_resident;
@@ -79,9 +79,9 @@ void *
base_alloc(tsdn_t *tsdn, size_t size)
{
void *ret;
- size_t csize, usize;
+ size_t csize;
+ szind_t i;
extent_t *extent;
- extent_t key;
/*
* Round size up to nearest multiple of the cacheline size, so that
@@ -89,14 +89,16 @@ base_alloc(tsdn_t *tsdn, size_t size)
*/
csize = CACHELINE_CEILING(size);
- usize = s2u(csize);
- extent_init(&key, NULL, NULL, usize, false, false, false, false);
+ extent = NULL;
malloc_mutex_lock(tsdn, &base_mtx);
- extent = extent_tree_szad_nsearch(&base_avail_szad, &key);
- if (extent != NULL) {
- /* Use existing space. */
- extent_tree_szad_remove(&base_avail_szad, extent);
- } else {
+ for (i = size2index(csize); i < NSIZES; i++) {
+ extent = extent_heap_remove_first(&base_avail[i]);
+ if (extent != NULL) {
+ /* Use existing space. */
+ break;
+ }
+ }
+ if (extent == NULL) {
/* Try to allocate more space. */
extent = base_chunk_alloc(tsdn, csize);
}
@@ -107,9 +109,16 @@ base_alloc(tsdn_t *tsdn, size_t size)
ret = extent_addr_get(extent);
if (extent_size_get(extent) > csize) {
+ szind_t index_floor;
+
extent_addr_set(extent, (void *)((uintptr_t)ret + csize));
extent_size_set(extent, extent_size_get(extent) - csize);
- extent_tree_szad_insert(&base_avail_szad, extent);
+ /*
+ * Compute the index for the largest size class that does not
+ * exceed extent's size.
+ */
+ index_floor = size2index(extent_size_get(extent) + 1) - 1;
+ extent_heap_insert(&base_avail[index_floor], extent);
} else
base_extent_dalloc(tsdn, extent);
if (config_stats) {
@@ -143,10 +152,12 @@ base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
bool
base_boot(void)
{
+ szind_t i;
if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
return (true);
- extent_tree_szad_new(&base_avail_szad);
+ for (i = 0; i < NSIZES; i++)
+ extent_heap_new(&base_avail[i]);
base_extents = NULL;
return (false);
diff --git a/src/chunk.c b/src/chunk.c
index 9a9b08e..2463028 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -50,11 +50,27 @@ const chunk_hooks_t chunk_hooks_default = {
*/
static void chunk_record(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad, bool cache,
+ chunk_hooks_t *chunk_hooks, extent_heap_t extent_heaps[NPSIZES], bool cache,
void *chunk, size_t size, bool zeroed, bool committed);
/******************************************************************************/
+static void
+extent_heaps_insert(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
+{
+ size_t psz = extent_size_quantize_floor(extent_size_get(extent));
+ pszind_t pind = psz2ind(psz);
+ extent_heap_insert(&extent_heaps[pind], extent);
+}
+
+static void
+extent_heaps_remove(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
+{
+ size_t psz = extent_size_quantize_floor(extent_size_get(extent));
+ pszind_t pind = psz2ind(psz);
+ extent_heap_remove(&extent_heaps[pind], extent);
+}
+
static chunk_hooks_t
chunk_hooks_get_locked(arena_t *arena)
{
@@ -245,14 +261,21 @@ chunk_reregister(tsdn_t *tsdn, const extent_t *extent)
* fits.
*/
static extent_t *
-chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad, size_t size)
+chunk_first_best_fit(arena_t *arena, extent_heap_t extent_heaps[NPSIZES],
+ size_t size)
{
- extent_t key;
+ pszind_t pind, i;
assert(size == CHUNK_CEILING(size));
- extent_init(&key, arena, NULL, size, false, false, false, false);
- return (extent_tree_szad_nsearch(chunks_szad, &key));
+ pind = psz2ind(extent_size_quantize_ceil(size));
+ for (i = pind; i < NPSIZES; i++) {
+ extent_t *extent = extent_heap_first(&extent_heaps[i]);
+ if (extent != NULL)
+ return (extent);
+ }
+
+ return (NULL);
}
static void
@@ -272,8 +295,8 @@ chunk_leak(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, bool cache,
static void *
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- extent_tree_t *chunks_szad, bool cache, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit, bool dalloc_extent)
+ extent_heap_t extent_heaps[NPSIZES], bool cache, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool *commit, bool dalloc_extent)
{
void *ret;
extent_t *extent;
@@ -309,7 +332,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
} else
extent = NULL;
} else
- extent = chunk_first_best_fit(arena, chunks_szad, alloc_size);
+ extent = chunk_first_best_fit(arena, extent_heaps, alloc_size);
if (extent == NULL || (new_addr != NULL && extent_size_get(extent) <
size)) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
@@ -334,9 +357,9 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (NULL);
}
- /* Remove extent from the tree. */
+ /* Remove extent from the heap. */
chunk_deregister(tsdn, extent);
- extent_tree_szad_remove(chunks_szad, extent);
+ extent_heaps_remove(extent_heaps, extent);
arena_chunk_cache_maybe_remove(arena, extent, cache);
if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */
@@ -346,7 +369,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_addr_get(extent), extent_size_get(extent));
arena_extent_dalloc(tsdn, arena, extent);
} else {
- extent_tree_szad_insert(chunks_szad, extent);
+ extent_heaps_insert(extent_heaps, extent);
arena_chunk_cache_maybe_insert(arena, extent, cache);
}
extent = NULL;
@@ -358,7 +381,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (dalloc_extent && extent != NULL)
arena_extent_dalloc(tsdn, arena, extent);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
- chunk_record(tsdn, arena, chunk_hooks, chunks_szad,
+ chunk_record(tsdn, arena, chunk_hooks, extent_heaps,
cache, ret, size + trailsize, zeroed, committed);
return (NULL);
}
@@ -368,7 +391,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (extent == NULL) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks,
- chunks_szad, cache, ret, size + trailsize,
+ extent_heaps, cache, ret, size + trailsize,
zeroed, committed);
return (NULL);
}
@@ -380,14 +403,14 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_addr_get(extent), extent_size_get(extent));
arena_extent_dalloc(tsdn, arena, extent);
} else {
- extent_tree_szad_insert(chunks_szad, extent);
+ extent_heaps_insert(extent_heaps, extent);
arena_chunk_cache_maybe_insert(arena, extent, cache);
}
extent = NULL;
}
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
- chunk_record(tsdn, arena, chunk_hooks, chunks_szad, cache, ret,
+ chunk_record(tsdn, arena, chunk_hooks, extent_heaps, cache, ret,
size, zeroed, committed);
return (NULL);
}
@@ -480,9 +503,8 @@ chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert((alignment & chunksize_mask) == 0);
commit = true;
- ret = chunk_recycle(tsdn, arena, chunk_hooks,
- &arena->chunks_szad_cached, true, new_addr, size, alignment, zero,
- &commit, dalloc_extent);
+ ret = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_cached,
+ true, new_addr, size, alignment, zero, &commit, dalloc_extent);
if (ret == NULL)
return (NULL);
assert(commit);
@@ -532,9 +554,8 @@ chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
- ret = chunk_recycle(tsdn, arena, chunk_hooks,
- &arena->chunks_szad_retained, false, new_addr, size, alignment,
- zero, commit, true);
+ ret = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_retained,
+ false, new_addr, size, alignment, zero, commit, true);
if (config_stats && ret != NULL)
arena->stats.retained -= size;
@@ -583,7 +604,7 @@ chunk_can_coalesce(const extent_t *a, const extent_t *b)
static void
chunk_try_coalesce(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- extent_t *a, extent_t *b, extent_tree_t *chunks_szad, bool cache)
+ extent_t *a, extent_t *b, extent_heap_t extent_heaps[NPSIZES], bool cache)
{
rtree_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
@@ -613,8 +634,8 @@ chunk_try_coalesce(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
} else
b_elm_b = b_elm_a;
- extent_tree_szad_remove(chunks_szad, a);
- extent_tree_szad_remove(chunks_szad, b);
+ extent_heaps_remove(extent_heaps, a);
+ extent_heaps_remove(extent_heaps, b);
arena_chunk_cache_maybe_remove(extent_arena_get(a), a, cache);
arena_chunk_cache_maybe_remove(extent_arena_get(b), b, cache);
@@ -622,7 +643,7 @@ chunk_try_coalesce(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_size_set(a, extent_size_get(a) + extent_size_get(b));
extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
- extent_tree_szad_insert(chunks_szad, a);
+ extent_heaps_insert(extent_heaps, a);
extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a);
extent_rtree_release(tsdn, a_elm_a, b_elm_b);
@@ -634,7 +655,7 @@ chunk_try_coalesce(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
static void
chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- extent_tree_t *chunks_szad, bool cache, void *chunk, size_t size,
+ extent_heap_t extent_heaps[NPSIZES], bool cache, void *chunk, size_t size,
bool zeroed, bool committed)
{
extent_t *extent, *prev, *next;
@@ -657,7 +678,7 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
chunk_leak(tsdn, arena, chunk_hooks, cache, chunk, size);
goto label_return;
}
- extent_tree_szad_insert(chunks_szad, extent);
+ extent_heaps_insert(extent_heaps, extent);
arena_chunk_cache_maybe_insert(arena, extent, cache);
/* Try to coalesce forward. */
@@ -665,7 +686,7 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
CHUNK_CEILING((uintptr_t)extent_past_get(extent)), false);
if (next != NULL) {
chunk_try_coalesce(tsdn, arena, chunk_hooks, extent, next,
- chunks_szad, cache);
+ extent_heaps, cache);
}
/* Try to coalesce backward. */
@@ -673,7 +694,7 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
(uintptr_t)extent_addr_get(extent) - chunksize, false);
if (prev != NULL) {
chunk_try_coalesce(tsdn, arena, chunk_hooks, prev, extent,
- chunks_szad, cache);
+ extent_heaps, cache);
}
label_return:
@@ -690,7 +711,7 @@ chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert(size != 0);
assert((size & chunksize_mask) == 0);
- chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_cached, true,
+ chunk_record(tsdn, arena, chunk_hooks, arena->chunks_cached, true,
chunk, size, false, committed);
arena_maybe_purge(tsdn, arena);
}
@@ -726,8 +747,8 @@ chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
arena->ind);
- chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_retained,
- false, chunk, size, zeroed, committed);
+ chunk_record(tsdn, arena, chunk_hooks, arena->chunks_retained, false,
+ chunk, size, zeroed, committed);
if (config_stats)
arena->stats.retained += size;
diff --git a/src/extent.c b/src/extent.c
index c550e6c..4757f75 100644
--- a/src/extent.c
+++ b/src/extent.c
@@ -3,39 +3,86 @@
/******************************************************************************/
-JEMALLOC_INLINE_C size_t
-extent_quantize(size_t size)
+#ifdef JEMALLOC_JET
+#undef extent_size_quantize_floor
+#define extent_size_quantize_floor JEMALLOC_N(n_extent_size_quantize_floor)
+#endif
+size_t
+extent_size_quantize_floor(size_t size)
{
+ size_t ret;
+ pszind_t pind;
- /*
- * Round down to the nearest chunk size that can actually be requested
- * during normal huge allocation.
- */
- return (index2size(size2index(size + 1) - 1));
+ assert(size > 0);
+ assert(size <= HUGE_MAXCLASS);
+ assert((size & PAGE_MASK) == 0);
+
+ assert(size != 0);
+ assert(size == PAGE_CEILING(size));
+
+ pind = psz2ind(size - large_pad + 1);
+ if (pind == 0) {
+ /*
+ * Avoid underflow. This short-circuit would also do the right
+ * thing for all sizes in the range for which there are
+ * PAGE-spaced size classes, but it's simplest to just handle
+ * the one case that would cause erroneous results.
+ */
+ return (size);
+ }
+ ret = pind2sz(pind - 1) + large_pad;
+ assert(ret <= size);
+ return (ret);
}
+#ifdef JEMALLOC_JET
+#undef extent_size_quantize_floor
+#define extent_size_quantize_floor JEMALLOC_N(extent_size_quantize_floor)
+extent_size_quantize_t *extent_size_quantize_floor =
+ JEMALLOC_N(n_extent_size_quantize_floor);
+#endif
-JEMALLOC_INLINE_C int
-extent_szad_comp(const extent_t *a, const extent_t *b)
+#ifdef JEMALLOC_JET
+#undef extent_size_quantize_ceil
+#define extent_size_quantize_ceil JEMALLOC_N(n_extent_size_quantize_ceil)
+#endif
+size_t
+extent_size_quantize_ceil(size_t size)
{
- int ret;
- size_t a_qsize = extent_quantize(extent_size_get(a));
- size_t b_qsize = extent_quantize(extent_size_get(b));
-
- /*
- * Compare based on quantized size rather than size, in order to sort
- * equally useful extents only by address.
- */
- ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
- if (ret == 0) {
- uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
- uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
-
- ret = (a_addr > b_addr) - (a_addr < b_addr);
- }
+ size_t ret;
+
+ assert(size > 0);
+ assert(size <= HUGE_MAXCLASS);
+ assert((size & PAGE_MASK) == 0);
+ ret = extent_size_quantize_floor(size);
+ if (ret < size) {
+ /*
+ * Skip a quantization that may have an adequately large extent,
+ * because under-sized extents may be mixed in. This only
+ * happens when an unusual size is requested, i.e. for aligned
+ * allocation, and is just one of several places where linear
+ * search would potentially find sufficiently aligned available
+ * memory somewhere lower.
+ */
+ ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad;
+ }
return (ret);
}
+#ifdef JEMALLOC_JET
+#undef extent_size_quantize_ceil
+#define extent_size_quantize_ceil JEMALLOC_N(extent_size_quantize_ceil)
+extent_size_quantize_t *extent_size_quantize_ceil =
+ JEMALLOC_N(n_extent_size_quantize_ceil);
+#endif
+
+JEMALLOC_INLINE_C int
+extent_ad_comp(const extent_t *a, const extent_t *b)
+{
+ uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
+ uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
+
+ return ((a_addr > b_addr) - (a_addr < b_addr));
+}
-/* Generate red-black tree functions. */
-rb_gen(, extent_tree_szad_, extent_tree_t, extent_t, szad_link,
- extent_szad_comp)
+/* Generate pairing heap functions. */
+ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_ad_comp)