summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/arena.c48
-rw-r--r--src/base.c407
-rw-r--r--src/ctl.c50
-rw-r--r--src/extent.c36
-rw-r--r--src/jemalloc.c35
-rw-r--r--src/prof.c6
-rw-r--r--src/rtree.c3
-rw-r--r--src/stats.c27
-rw-r--r--src/tcache.c8
9 files changed, 418 insertions, 202 deletions
diff --git a/src/arena.c b/src/arena.c
index 0eb6150..d5e87ea 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -1550,6 +1550,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats)
{
+ size_t base_allocated, base_resident, base_mapped;
unsigned i;
cassert(config_stats);
@@ -1558,12 +1559,18 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
arena_basic_stats_merge_locked(arena, nthreads, dss, decay_time,
nactive, ndirty);
- astats->mapped += arena->stats.mapped;
+ base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
+ &base_mapped);
+
+ astats->mapped += base_mapped + arena->stats.mapped;
astats->retained += arena->stats.retained;
astats->npurge += arena->stats.npurge;
astats->nmadvise += arena->stats.nmadvise;
astats->purged += arena->stats.purged;
- astats->metadata += arena_metadata_get(arena);
+ astats->base += base_allocated;
+ astats->internal += arena_internal_get(arena);
+ astats->resident += base_resident + (((arena->nactive + arena->ndirty)
+ << LG_PAGE));
astats->allocated_large += arena->stats.allocated_large;
astats->nmalloc_large += arena->stats.nmalloc_large;
astats->ndalloc_large += arena->stats.ndalloc_large;
@@ -1625,19 +1632,27 @@ arena_extent_sn_next(arena_t *arena)
}
arena_t *
-arena_new(tsdn_t *tsdn, unsigned ind)
+arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks)
{
arena_t *arena;
+ base_t *base;
unsigned i;
- arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t));
+ if (ind == 0)
+ base = b0get();
+ else {
+ base = base_new(tsdn, ind, extent_hooks);
+ if (base == NULL)
+ return (NULL);
+ }
+
+ arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE);
if (arena == NULL)
- return (NULL);
+ goto label_error;
- arena->ind = ind;
arena->nthreads[0] = arena->nthreads[1] = 0;
if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
- return (NULL);
+ goto label_error;
if (config_stats && config_tcache)
ql_new(&arena->tcache_ql);
@@ -1670,7 +1685,7 @@ arena_new(tsdn_t *tsdn, unsigned ind)
ql_new(&arena->large);
if (malloc_mutex_init(&arena->large_mtx, "arena_large",
WITNESS_RANK_ARENA_LARGE))
- return (NULL);
+ goto label_error;
for (i = 0; i < NPSIZES+1; i++) {
extent_heap_new(&arena->extents_cached[i]);
@@ -1682,9 +1697,7 @@ arena_new(tsdn_t *tsdn, unsigned ind)
if (malloc_mutex_init(&arena->extents_mtx, "arena_extents",
WITNESS_RANK_ARENA_EXTENTS))
- return (NULL);
-
- arena->extent_hooks = (extent_hooks_t *)&extent_hooks_default;
+ goto label_error;
if (!config_munmap)
arena->extent_grow_next = psz2ind(HUGEPAGE);
@@ -1692,14 +1705,14 @@ arena_new(tsdn_t *tsdn, unsigned ind)
ql_new(&arena->extent_cache);
if (malloc_mutex_init(&arena->extent_cache_mtx, "arena_extent_cache",
WITNESS_RANK_ARENA_EXTENT_CACHE))
- return (NULL);
+ goto label_error;
/* Initialize bins. */
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
if (malloc_mutex_init(&bin->lock, "arena_bin",
WITNESS_RANK_ARENA_BIN))
- return (NULL);
+ goto label_error;
bin->slabcur = NULL;
extent_heap_new(&bin->slabs_nonfull);
extent_init(&bin->slabs_full, arena, NULL, 0, 0, 0, false,
@@ -1708,7 +1721,13 @@ arena_new(tsdn_t *tsdn, unsigned ind)
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
}
+ arena->base = base;
+
return (arena);
+label_error:
+ if (ind != 0)
+ base_delete(base);
+ return (NULL);
}
void
@@ -1744,6 +1763,7 @@ arena_prefork3(tsdn_t *tsdn, arena_t *arena)
{
unsigned i;
+ base_prefork(tsdn, arena->base);
for (i = 0; i < NBINS; i++)
malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
malloc_mutex_prefork(tsdn, &arena->large_mtx);
@@ -1757,6 +1777,7 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
+ base_postfork_parent(tsdn, arena->base);
malloc_mutex_postfork_parent(tsdn, &arena->extent_cache_mtx);
malloc_mutex_postfork_parent(tsdn, &arena->extents_mtx);
malloc_mutex_postfork_parent(tsdn, &arena->lock);
@@ -1770,6 +1791,7 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
+ base_postfork_child(tsdn, arena->base);
malloc_mutex_postfork_child(tsdn, &arena->extent_cache_mtx);
malloc_mutex_postfork_child(tsdn, &arena->extents_mtx);
malloc_mutex_postfork_child(tsdn, &arena->lock);
diff --git a/src/base.c b/src/base.c
index 4764d9c..5eab7cd 100644
--- a/src/base.c
+++ b/src/base.c
@@ -4,112 +4,308 @@
/******************************************************************************/
/* Data. */
-static malloc_mutex_t base_mtx;
-static size_t base_extent_sn_next;
-static extent_heap_t base_avail[NSIZES];
-static extent_t *base_extents;
-static size_t base_allocated;
-static size_t base_resident;
-static size_t base_mapped;
+static base_t *b0;
/******************************************************************************/
-static extent_t *
-base_extent_try_alloc(tsdn_t *tsdn)
+static void *
+base_map(extent_hooks_t *extent_hooks, unsigned ind, size_t size)
{
- extent_t *extent;
+ void *addr;
+ bool zero = true;
+ bool commit = true;
- malloc_mutex_assert_owner(tsdn, &base_mtx);
+ assert(size == HUGEPAGE_CEILING(size));
- if (base_extents == NULL)
- return (NULL);
- extent = base_extents;
- base_extents = *(extent_t **)extent;
- return (extent);
+ if (extent_hooks == &extent_hooks_default)
+ addr = extent_alloc_mmap(NULL, size, PAGE, &zero, &commit);
+ else {
+ addr = extent_hooks->alloc(extent_hooks, NULL, size, PAGE,
+ &zero, &commit, ind);
+ }
+
+ return (addr);
}
static void
-base_extent_dalloc(tsdn_t *tsdn, extent_t *extent)
+base_unmap(extent_hooks_t *extent_hooks, unsigned ind, void *addr, size_t size)
{
- malloc_mutex_assert_owner(tsdn, &base_mtx);
-
- *(extent_t **)extent = base_extents;
- base_extents = extent;
+ /*
+ * Cascade through dalloc, decommit, purge_lazy, and purge_forced,
+ * stopping at first success. This cascade is performed for consistency
+ * with the cascade in extent_dalloc_wrapper() because an application's
+ * custom hooks may not support e.g. dalloc. This function is only ever
+ * called as a side effect of arena destruction, so although it might
+ * seem pointless to do anything besides dalloc here, the application
+ * may in fact want the end state of all associated virtual memory to in
+ * some consistent-but-allocated state.
+ */
+ if (extent_hooks == &extent_hooks_default) {
+ if (!extent_dalloc_mmap(addr, size))
+ return;
+ if (!pages_decommit(addr, size))
+ return;
+ if (!pages_purge_lazy(addr, size))
+ return;
+ if (!pages_purge_forced(addr, size))
+ return;
+ /* Nothing worked. This should never happen. */
+ not_reached();
+ } else {
+ if (extent_hooks->dalloc != NULL &&
+ !extent_hooks->dalloc(extent_hooks, addr, size, true, ind))
+ return;
+ if (extent_hooks->decommit != NULL &&
+ !extent_hooks->decommit(extent_hooks, addr, size, 0, size,
+ ind))
+ return;
+ if (extent_hooks->purge_lazy != NULL &&
+ !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
+ ind))
+ return;
+ if (extent_hooks->purge_forced != NULL &&
+ !extent_hooks->purge_forced(extent_hooks, addr, size, 0,
+ size, ind))
+ return;
+ /* Nothing worked. That's the application's problem. */
+ }
}
static void
-base_extent_init(extent_t *extent, void *addr, size_t size)
+base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
+ size_t size)
{
- size_t sn = atomic_add_zu(&base_extent_sn_next, 1) - 1;
+ size_t sn;
+
+ sn = *extent_sn_next;
+ (*extent_sn_next)++;
extent_init(extent, NULL, addr, size, 0, sn, true, true, true, false);
}
+static void *
+base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
+ size_t alignment)
+{
+ void *ret;
+
+ assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
+ assert(size == ALIGNMENT_CEILING(size, alignment));
+
+ *gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
+ alignment) - (uintptr_t)extent_addr_get(extent);
+ ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
+ assert(extent_size_get(extent) >= *gap_size + size);
+ extent_init(extent, NULL, (void *)((uintptr_t)extent_addr_get(extent) +
+ *gap_size + size), extent_size_get(extent) - *gap_size - size, 0,
+ extent_sn_get(extent), true, true, true, false);
+ return (ret);
+}
+
+static void
+base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent,
+ size_t gap_size, void *addr, size_t size)
+{
+
+ if (extent_size_get(extent) > 0) {
+ /*
+ * Compute the index for the largest size class that does not
+ * exceed extent's size.
+ */
+ szind_t index_floor = size2index(extent_size_get(extent) + 1) -
+ 1;
+ extent_heap_insert(&base->avail[index_floor], extent);
+ }
+
+ if (config_stats) {
+ base->allocated += size;
+ /*
+ * Add one PAGE to base_resident for every page boundary that is
+ * crossed by the new allocation.
+ */
+ base->resident += PAGE_CEILING((uintptr_t)addr + size) -
+ PAGE_CEILING((uintptr_t)addr - gap_size);
+ assert(base->allocated <= base->resident);
+ assert(base->resident <= base->mapped);
+ }
+}
+
+static void *
+base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, extent_t *extent,
+ size_t size, size_t alignment)
+{
+ void *ret;
+ size_t gap_size;
+
+ ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
+ base_extent_bump_alloc_post(tsdn, base, extent, gap_size, ret, size);
+ return (ret);
+}
+
+/*
+ * Allocate a block of virtual memory that is large enough to start with a
+ * base_block_t header, followed by an object of specified size and alignment.
+ * On success a pointer to the initialized base_block_t header is returned.
+ */
+static base_block_t *
+base_block_alloc(extent_hooks_t *extent_hooks, unsigned ind,
+ size_t *extent_sn_next, size_t size, size_t alignment)
+{
+ base_block_t *block;
+ size_t usize, header_size, gap_size, block_size;
+
+ alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
+ usize = ALIGNMENT_CEILING(size, alignment);
+ header_size = sizeof(base_block_t);
+ gap_size = ALIGNMENT_CEILING(header_size, alignment) - header_size;
+ block_size = HUGEPAGE_CEILING(header_size + gap_size + usize);
+ block = (base_block_t *)base_map(extent_hooks, ind, block_size);
+ if (block == NULL)
+ return (NULL);
+ block->size = block_size;
+ block->next = NULL;
+ assert(block_size >= header_size);
+ base_extent_init(extent_sn_next, &block->extent,
+ (void *)((uintptr_t)block + header_size), block_size - header_size);
+ return (block);
+}
+
+/*
+ * Allocate an extent that is at least as large as specified size, with
+ * specified alignment.
+ */
static extent_t *
-base_extent_alloc(tsdn_t *tsdn, size_t minsize)
+base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment)
{
- extent_t *extent;
- size_t esize, nsize;
- void *addr;
+ extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
+ base_block_t *block;
- malloc_mutex_assert_owner(tsdn, &base_mtx);
- assert(minsize != 0);
- extent = base_extent_try_alloc(tsdn);
- /* Allocate enough space to also carve an extent out if necessary. */
- nsize = (extent == NULL) ? CACHELINE_CEILING(sizeof(extent_t)) : 0;
- esize = PAGE_CEILING(minsize + nsize);
- /*
- * Directly call extent_alloc_mmap() because it's critical to allocate
- * untouched demand-zeroed virtual memory.
- */
- {
- bool zero = true;
- bool commit = true;
- addr = extent_alloc_mmap(NULL, esize, PAGE, &zero, &commit);
+ malloc_mutex_assert_owner(tsdn, &base->mtx);
+
+ block = base_block_alloc(extent_hooks, base_ind_get(base),
+ &base->extent_sn_next, size, alignment);
+ if (block == NULL)
+ return (NULL);
+ block->next = base->blocks;
+ base->blocks = block;
+ if (config_stats) {
+ base->allocated += sizeof(base_block_t);
+ base->resident += PAGE_CEILING(sizeof(base_block_t));
+ base->mapped += block->size;
+ assert(base->allocated <= base->resident);
+ assert(base->resident <= base->mapped);
}
- if (addr == NULL) {
- if (extent != NULL)
- base_extent_dalloc(tsdn, extent);
+ return (&block->extent);
+}
+
+base_t *
+b0get(void)
+{
+
+ return (b0);
+}
+
+base_t *
+base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks)
+{
+ base_t *base;
+ size_t extent_sn_next, base_alignment, base_size, gap_size;
+ base_block_t *block;
+ szind_t i;
+
+ extent_sn_next = 0;
+ block = base_block_alloc(extent_hooks, ind, &extent_sn_next,
+ sizeof(base_t), QUANTUM);
+ if (block == NULL)
+ return (NULL);
+
+ base_alignment = CACHELINE;
+ base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
+ base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
+ &gap_size, base_size, base_alignment);
+ base->ind = ind;
+ base->extent_hooks = extent_hooks;
+ if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE)) {
+ base_unmap(extent_hooks, ind, block, block->size);
return (NULL);
}
- base_mapped += esize;
- if (extent == NULL) {
- extent = (extent_t *)addr;
- addr = (void *)((uintptr_t)addr + nsize);
- esize -= nsize;
- if (config_stats) {
- base_allocated += nsize;
- base_resident += PAGE_CEILING(nsize);
- }
+ base->extent_sn_next = extent_sn_next;
+ base->blocks = block;
+ for (i = 0; i < NSIZES; i++)
+ extent_heap_new(&base->avail[i]);
+ if (config_stats) {
+ base->allocated = sizeof(base_block_t);
+ base->resident = PAGE_CEILING(sizeof(base_block_t));
+ base->mapped = block->size;
+ assert(base->allocated <= base->resident);
+ assert(base->resident <= base->mapped);
}
- base_extent_init(extent, addr, esize);
- return (extent);
+ base_extent_bump_alloc_post(tsdn, base, &block->extent, gap_size, base,
+ base_size);
+
+ return (base);
+}
+
+void
+base_delete(base_t *base)
+{
+ extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
+ base_block_t *next = base->blocks;
+ do {
+ base_block_t *block = next;
+ next = block->next;
+ base_unmap(extent_hooks, base_ind_get(base), block,
+ block->size);
+ } while (next != NULL);
+}
+
+extent_hooks_t *
+base_extent_hooks_get(base_t *base)
+{
+
+ return ((extent_hooks_t *)atomic_read_p(&base->extent_hooks_pun));
+}
+
+extent_hooks_t *
+base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks)
+{
+ extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base);
+ union {
+ extent_hooks_t **h;
+ void **v;
+ } u;
+
+ u.h = &base->extent_hooks;
+ atomic_write_p(u.v, extent_hooks);
+
+ return (old_extent_hooks);
}
/*
- * base_alloc() guarantees demand-zeroed memory, in order to make multi-page
- * sparse data structures such as radix tree nodes efficient with respect to
- * physical memory usage.
+ * base_alloc() returns zeroed memory, which is always demand-zeroed for the
+ * auto arenas, in order to make multi-page sparse data structures such as radix
+ * tree nodes efficient with respect to physical memory usage. Upon success a
+ * pointer to at least size bytes with specified alignment is returned. Note
+ * that size is rounded up to the nearest multiple of alignment to avoid false
+ * sharing.
*/
void *
-base_alloc(tsdn_t *tsdn, size_t size)
+base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment)
{
void *ret;
- size_t csize;
+ size_t usize, asize;
szind_t i;
extent_t *extent;
- /*
- * Round size up to nearest multiple of the cacheline size, so that
- * there is no chance of false cache line sharing.
- */
- csize = CACHELINE_CEILING(size);
+ alignment = QUANTUM_CEILING(alignment);
+ usize = ALIGNMENT_CEILING(size, alignment);
+ asize = usize + alignment - QUANTUM;
extent = NULL;
- malloc_mutex_lock(tsdn, &base_mtx);
- for (i = size2index(csize); i < NSIZES; i++) {
- extent = extent_heap_remove_first(&base_avail[i]);
+ malloc_mutex_lock(tsdn, &base->mtx);
+ for (i = size2index(asize); i < NSIZES; i++) {
+ extent = extent_heap_remove_first(&base->avail[i]);
if (extent != NULL) {
/* Use existing space. */
break;
@@ -117,87 +313,60 @@ base_alloc(tsdn_t *tsdn, size_t size)
}
if (extent == NULL) {
/* Try to allocate more space. */
- extent = base_extent_alloc(tsdn, csize);
+ extent = base_extent_alloc(tsdn, base, usize, alignment);
}
if (extent == NULL) {
ret = NULL;
goto label_return;
}
- ret = extent_addr_get(extent);
- if (extent_size_get(extent) > csize) {
- szind_t index_floor;
-
- extent_addr_set(extent, (void *)((uintptr_t)ret + csize));
- extent_size_set(extent, extent_size_get(extent) - csize);
- /*
- * Compute the index for the largest size class that does not
- * exceed extent's size.
- */
- index_floor = size2index(extent_size_get(extent) + 1) - 1;
- extent_heap_insert(&base_avail[index_floor], extent);
- } else
- base_extent_dalloc(tsdn, extent);
- if (config_stats) {
- base_allocated += csize;
- /*
- * Add one PAGE to base_resident for every page boundary that is
- * crossed by the new allocation.
- */
- base_resident += PAGE_CEILING((uintptr_t)ret + csize) -
- PAGE_CEILING((uintptr_t)ret);
- }
+ ret = base_extent_bump_alloc(tsdn, base, extent, usize, alignment);
label_return:
- malloc_mutex_unlock(tsdn, &base_mtx);
+ malloc_mutex_unlock(tsdn, &base->mtx);
return (ret);
}
void
-base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
+base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
size_t *mapped)
{
- malloc_mutex_lock(tsdn, &base_mtx);
- assert(base_allocated <= base_resident);
- assert(base_resident <= base_mapped);
- *allocated = base_allocated;
- *resident = base_resident;
- *mapped = base_mapped;
- malloc_mutex_unlock(tsdn, &base_mtx);
+ cassert(config_stats);
+
+ malloc_mutex_lock(tsdn, &base->mtx);
+ assert(base->allocated <= base->resident);
+ assert(base->resident <= base->mapped);
+ *allocated = base->allocated;
+ *resident = base->resident;
+ *mapped = base->mapped;
+ malloc_mutex_unlock(tsdn, &base->mtx);
}
-bool
-base_boot(void)
+void
+base_prefork(tsdn_t *tsdn, base_t *base)
{
- szind_t i;
-
- if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
- return (true);
- base_extent_sn_next = 0;
- for (i = 0; i < NSIZES; i++)
- extent_heap_new(&base_avail[i]);
- base_extents = NULL;
- return (false);
+ malloc_mutex_prefork(tsdn, &base->mtx);
}
void
-base_prefork(tsdn_t *tsdn)
+base_postfork_parent(tsdn_t *tsdn, base_t *base)
{
- malloc_mutex_prefork(tsdn, &base_mtx);
+ malloc_mutex_postfork_parent(tsdn, &base->mtx);
}
void
-base_postfork_parent(tsdn_t *tsdn)
+base_postfork_child(tsdn_t *tsdn, base_t *base)
{
- malloc_mutex_postfork_parent(tsdn, &base_mtx);
+ malloc_mutex_postfork_child(tsdn, &base->mtx);
}
-void
-base_postfork_child(tsdn_t *tsdn)
+bool
+base_boot(tsdn_t *tsdn)
{
- malloc_mutex_postfork_child(tsdn, &base_mtx);
+ b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
+ return (b0 == NULL);
}
diff --git a/src/ctl.c b/src/ctl.c
index 47b4768..964896a 100644
--- a/src/ctl.c
+++ b/src/ctl.c
@@ -55,7 +55,7 @@ static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats,
static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
ctl_arena_stats_t *astats);
static void ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i);
-static bool ctl_grow(tsdn_t *tsdn);
+static bool ctl_grow(tsdn_t *tsdn, extent_hooks_t *extent_hooks);
static void ctl_refresh(tsdn_t *tsdn);
static bool ctl_init(tsdn_t *tsdn);
static int ctl_lookup(tsdn_t *tsdn, const char *name,
@@ -174,7 +174,9 @@ CTL_PROTO(stats_arenas_i_retained)
CTL_PROTO(stats_arenas_i_npurge)
CTL_PROTO(stats_arenas_i_nmadvise)
CTL_PROTO(stats_arenas_i_purged)
-CTL_PROTO(stats_arenas_i_metadata)
+CTL_PROTO(stats_arenas_i_base)
+CTL_PROTO(stats_arenas_i_internal)
+CTL_PROTO(stats_arenas_i_resident)
INDEX_PROTO(stats_arenas_i)
CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active)
@@ -392,7 +394,9 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("npurge"), CTL(stats_arenas_i_npurge)},
{NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
{NAME("purged"), CTL(stats_arenas_i_purged)},
- {NAME("metadata"), CTL(stats_arenas_i_metadata)},
+ {NAME("base"), CTL(stats_arenas_i_base)},
+ {NAME("internal"), CTL(stats_arenas_i_internal)},
+ {NAME("resident"), CTL(stats_arenas_i_resident)},
{NAME("small"), CHILD(named, stats_arenas_i_small)},
{NAME("large"), CHILD(named, stats_arenas_i_large)},
{NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
@@ -500,7 +504,9 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
sstats->astats.nmadvise += astats->astats.nmadvise;
sstats->astats.purged += astats->astats.purged;
- sstats->astats.metadata += astats->astats.metadata;
+ sstats->astats.base += astats->astats.base;
+ sstats->astats.internal += astats->astats.internal;
+ sstats->astats.resident += astats->astats.resident;
sstats->allocated_small += astats->allocated_small;
sstats->nmalloc_small += astats->nmalloc_small;
@@ -556,12 +562,12 @@ ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i)
}
static bool
-ctl_grow(tsdn_t *tsdn)
+ctl_grow(tsdn_t *tsdn, extent_hooks_t *extent_hooks)
{
ctl_arena_stats_t *astats;
/* Initialize new arena. */
- if (arena_init(tsdn, ctl_stats.narenas) == NULL)
+ if (arena_init(tsdn, ctl_stats.narenas, extent_hooks) == NULL)
return (true);
/* Allocate extended arena stats. */
@@ -615,20 +621,17 @@ ctl_refresh(tsdn_t *tsdn)
}
if (config_stats) {
- size_t base_allocated, base_resident, base_mapped;
- base_stats_get(tsdn, &base_allocated, &base_resident,
- &base_mapped);
ctl_stats.allocated =
ctl_stats.arenas[ctl_stats.narenas].allocated_small +
ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large;
ctl_stats.active =
(ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE);
- ctl_stats.metadata = base_allocated +
- ctl_stats.arenas[ctl_stats.narenas].astats.metadata;
- ctl_stats.resident = base_resident +
- ((ctl_stats.arenas[ctl_stats.narenas].pactive +
- ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE);
- ctl_stats.mapped = base_mapped +
+ ctl_stats.metadata =
+ ctl_stats.arenas[ctl_stats.narenas].astats.base +
+ ctl_stats.arenas[ctl_stats.narenas].astats.internal;
+ ctl_stats.resident =
+ ctl_stats.arenas[ctl_stats.narenas].astats.resident;
+ ctl_stats.mapped =
ctl_stats.arenas[ctl_stats.narenas].astats.mapped;
ctl_stats.retained =
ctl_stats.arenas[ctl_stats.narenas].astats.retained;
@@ -1167,7 +1170,7 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
if (oldarena == NULL)
return (EAGAIN);
- newind = oldind = oldarena->ind;
+ newind = oldind = arena_ind_get(oldarena);
WRITE(newind, unsigned);
READ(oldind, unsigned);
if (newind != oldind) {
@@ -1738,11 +1741,14 @@ arenas_extend_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
+ extent_hooks_t *extent_hooks;
unsigned narenas;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
- READONLY();
- if (ctl_grow(tsd_tsdn(tsd))) {
+
+ extent_hooks = (extent_hooks_t *)&extent_hooks_default;
+ WRITE(extent_hooks, extent_hooks_t *);
+ if (ctl_grow(tsd_tsdn(tsd), extent_hooks)) {
ret = EAGAIN;
goto label_return;
}
@@ -1906,8 +1912,12 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_metadata,
- ctl_stats.arenas[mib[2]].astats.metadata, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_base,
+ ctl_stats.arenas[mib[2]].astats.base, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
+ ctl_stats.arenas[mib[2]].astats.internal, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
+ ctl_stats.arenas[mib[2]].astats.resident, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
ctl_stats.arenas[mib[2]].allocated_small, size_t)
diff --git a/src/extent.c b/src/extent.c
index 827a921..6eabde3 100644
--- a/src/extent.c
+++ b/src/extent.c
@@ -83,7 +83,8 @@ extent_alloc(tsdn_t *tsdn, arena_t *arena)
extent = ql_last(&arena->extent_cache, ql_link);
if (extent == NULL) {
malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
- return (base_alloc(tsdn, sizeof(extent_t)));
+ return (base_alloc(tsdn, arena->base, sizeof(extent_t),
+ QUANTUM));
}
ql_tail_remove(&arena->extent_cache, extent_t, ql_link);
malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
@@ -104,22 +105,14 @@ extent_hooks_t *
extent_hooks_get(arena_t *arena)
{
- return ((extent_hooks_t *)atomic_read_p(&arena->extent_hooks_pun));
+ return (base_extent_hooks_get(arena->base));
}
extent_hooks_t *
extent_hooks_set(arena_t *arena, extent_hooks_t *extent_hooks)
{
- extent_hooks_t *old_extent_hooks = extent_hooks_get(arena);
- union {
- extent_hooks_t **h;
- void **v;
- } u;
- u.h = &arena->extent_hooks;
- atomic_write_p(u.v, extent_hooks);
-
- return (old_extent_hooks);
+ return (base_extent_hooks_set(arena->base, extent_hooks));
}
static void
@@ -873,7 +866,7 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
alignment, zero, commit);
} else {
addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr, size,
- alignment, zero, commit, arena->ind);
+ alignment, zero, commit, arena_ind_get(arena));
}
if (addr == NULL) {
extent_dalloc(tsdn, arena, extent);
@@ -1071,7 +1064,7 @@ extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
err = ((*r_extent_hooks)->dalloc == NULL ||
(*r_extent_hooks)->dalloc(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent),
- extent_committed_get(extent), arena->ind));
+ extent_committed_get(extent), arena_ind_get(arena)));
}
if (!err) {
@@ -1088,12 +1081,12 @@ extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
else if ((*r_extent_hooks)->purge_lazy != NULL &&
!(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), 0,
- extent_size_get(extent), arena->ind))
+ extent_size_get(extent), arena_ind_get(arena)))
zeroed = false;
else if ((*r_extent_hooks)->purge_forced != NULL &&
!(*r_extent_hooks)->purge_forced(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), 0,
- extent_size_get(extent), arena->ind))
+ extent_size_get(extent), arena_ind_get(arena)))
zeroed = true;
else
zeroed = false;
@@ -1129,7 +1122,7 @@ extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_assure_initialized(arena, r_extent_hooks);
err = ((*r_extent_hooks)->commit == NULL ||
(*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
- extent_size_get(extent), offset, length, arena->ind));
+ extent_size_get(extent), offset, length, arena_ind_get(arena)));
extent_committed_set(extent, extent_committed_get(extent) || !err);
return (err);
}
@@ -1157,7 +1150,7 @@ extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
err = ((*r_extent_hooks)->decommit == NULL ||
(*r_extent_hooks)->decommit(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), offset, length,
- arena->ind));
+ arena_ind_get(arena)));
extent_committed_set(extent, extent_committed_get(extent) && err);
return (err);
}
@@ -1189,7 +1182,7 @@ extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
return ((*r_extent_hooks)->purge_lazy == NULL ||
(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), offset, length,
- arena->ind));
+ arena_ind_get(arena)));
}
#ifdef PAGES_CAN_PURGE_FORCED
@@ -1219,7 +1212,7 @@ extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
return ((*r_extent_hooks)->purge_forced == NULL ||
(*r_extent_hooks)->purge_forced(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), offset, length,
- arena->ind));
+ arena_ind_get(arena)));
}
#ifdef JEMALLOC_MAPS_COALESCE
@@ -1280,7 +1273,7 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
if ((*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
size_a + size_b, size_a, size_b, extent_committed_get(extent),
- arena->ind))
+ arena_ind_get(arena)))
goto label_error_d;
extent_size_set(extent, size_a);
@@ -1348,7 +1341,8 @@ extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
} else {
err = (*r_extent_hooks)->merge(*r_extent_hooks,
extent_base_get(a), extent_size_get(a), extent_base_get(b),
- extent_size_get(b), extent_committed_get(a), arena->ind);
+ extent_size_get(b), extent_committed_get(a),
+ arena_ind_get(arena));
}
if (err)
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 7df3fc9..2c49401 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -304,21 +304,21 @@ malloc_init(void)
*/
static void *
-a0ialloc(size_t size, bool zero, bool is_metadata)
+a0ialloc(size_t size, bool zero, bool is_internal)
{
if (unlikely(malloc_init_a0()))
return (NULL);
return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL,
- is_metadata, arena_get(TSDN_NULL, 0, true), true));
+ is_internal, arena_get(TSDN_NULL, 0, true), true));
}
static void
-a0idalloc(extent_t *extent, void *ptr, bool is_metadata)
+a0idalloc(extent_t *extent, void *ptr, bool is_internal)
{
- idalloctm(TSDN_NULL, extent, ptr, false, is_metadata, true);
+ idalloctm(TSDN_NULL, extent, ptr, false, is_internal, true);
}
void *
@@ -405,7 +405,7 @@ narenas_total_get(void)
/* Create a new arena and insert it into the arenas array at index ind. */
static arena_t *
-arena_init_locked(tsdn_t *tsdn, unsigned ind)
+arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks)
{
arena_t *arena;
@@ -426,18 +426,18 @@ arena_init_locked(tsdn_t *tsdn, unsigned ind)
}
/* Actually initialize the arena. */
- arena = arena_new(tsdn, ind);
+ arena = arena_new(tsdn, ind, extent_hooks);
arena_set(ind, arena);
return (arena);
}
arena_t *
-arena_init(tsdn_t *tsdn, unsigned ind)
+arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks)
{
arena_t *arena;
malloc_mutex_lock(tsdn, &arenas_lock);
- arena = arena_init_locked(tsdn, ind);
+ arena = arena_init_locked(tsdn, ind, extent_hooks);
malloc_mutex_unlock(tsdn, &arenas_lock);
return (arena);
}
@@ -629,7 +629,8 @@ arena_choose_hard(tsd_t *tsd, bool internal)
/* Initialize a new arena. */
choose[j] = first_null;
arena = arena_init_locked(tsd_tsdn(tsd),
- choose[j]);
+ choose[j],
+ (extent_hooks_t *)&extent_hooks_default);
if (arena == NULL) {
malloc_mutex_unlock(tsd_tsdn(tsd),
&arenas_lock);
@@ -657,7 +658,7 @@ iarena_cleanup(tsd_t *tsd)
iarena = tsd_iarena_get(tsd);
if (iarena != NULL)
- arena_unbind(tsd, iarena->ind, true);
+ arena_unbind(tsd, arena_ind_get(iarena), true);
}
void
@@ -667,7 +668,7 @@ arena_cleanup(tsd_t *tsd)
arena = tsd_arena_get(tsd);
if (arena != NULL)
- arena_unbind(tsd, arena->ind, false);
+ arena_unbind(tsd, arena_ind_get(arena), false);
}
void
@@ -1211,7 +1212,7 @@ malloc_init_hard_a0_locked()
}
}
pages_boot();
- if (base_boot())
+ if (base_boot(TSDN_NULL))
return (true);
if (extent_boot())
return (true);
@@ -1236,7 +1237,8 @@ malloc_init_hard_a0_locked()
* Initialize one arena here. The rest are lazily created in
* arena_choose_hard().
*/
- if (arena_init(TSDN_NULL, 0) == NULL)
+ if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default) ==
+ NULL)
return (true);
malloc_init_state = malloc_init_a0_initialized;
@@ -1309,8 +1311,8 @@ malloc_init_hard_finish(tsdn_t *tsdn)
narenas_total_set(narenas_auto);
/* Allocate and initialize arenas. */
- arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) *
- (MALLOCX_ARENA_MAX+1));
+ arenas = (arena_t **)base_alloc(tsdn, a0->base, sizeof(arena_t *) *
+ (MALLOCX_ARENA_MAX+1), CACHELINE);
if (arenas == NULL)
return (true);
/* Copy the pointer to the one arena that was already initialized. */
@@ -2690,7 +2692,6 @@ _malloc_prefork(void)
}
}
}
- base_prefork(tsd_tsdn(tsd));
for (i = 0; i < narenas; i++) {
if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
arena_prefork3(tsd_tsdn(tsd), arena);
@@ -2719,7 +2720,6 @@ _malloc_postfork(void)
witness_postfork_parent(tsd);
/* Release all mutexes, now that fork() has completed. */
- base_postfork_parent(tsd_tsdn(tsd));
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena;
@@ -2743,7 +2743,6 @@ jemalloc_postfork_child(void)
witness_postfork_child(tsd);
/* Release all mutexes, now that fork() has completed. */
- base_postfork_child(tsd_tsdn(tsd));
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena;
diff --git a/src/prof.c b/src/prof.c
index 19c8fb7..b9a9d65 100644
--- a/src/prof.c
+++ b/src/prof.c
@@ -2254,7 +2254,8 @@ prof_boot2(tsd_t *tsd)
}
gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
- PROF_NCTX_LOCKS * sizeof(malloc_mutex_t));
+ b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t),
+ CACHELINE);
if (gctx_locks == NULL)
return (true);
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
@@ -2264,7 +2265,8 @@ prof_boot2(tsd_t *tsd)
}
tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
- PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t));
+ b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t),
+ CACHELINE);
if (tdata_locks == NULL)
return (true);
for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
diff --git a/src/rtree.c b/src/rtree.c
index b6b9ed7..fd5e85d 100644
--- a/src/rtree.c
+++ b/src/rtree.c
@@ -72,7 +72,8 @@ static rtree_elm_t *
rtree_node_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms)
{
- return ((rtree_elm_t *)base_alloc(tsdn, nelms * sizeof(rtree_elm_t)));
+ return ((rtree_elm_t *)base_alloc(tsdn, b0get(), nelms *
+ sizeof(rtree_elm_t), CACHELINE));
}
#ifdef JEMALLOC_JET
#undef rtree_node_alloc
diff --git a/src/stats.c b/src/stats.c
index e150a27..0a3deaa 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -254,7 +254,8 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned nthreads;
const char *dss;
ssize_t decay_time;
- size_t page, pactive, pdirty, mapped, retained, metadata;
+ size_t page, pactive, pdirty, mapped, retained;
+ size_t base, internal, resident;
uint64_t npurge, nmadvise, purged;
size_t small_allocated;
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
@@ -404,14 +405,32 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
"retained: %12zu\n", retained);
}
- CTL_M2_GET("stats.arenas.0.metadata", i, &metadata, size_t);
+ CTL_M2_GET("stats.arenas.0.base", i, &base, size_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"metadata\": %zu%s\n", metadata, (bins || large) ?
+ "\t\t\t\t\"base\": %zu,\n", base);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "base: %12zu\n", base);
+ }
+
+ CTL_M2_GET("stats.arenas.0.internal", i, &internal, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"internal\": %zu,\n", internal);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "internal: %12zu\n", internal);
+ }
+
+ CTL_M2_GET("stats.arenas.0.resident", i, &resident, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"resident\": %zu%s\n", resident, (bins || large) ?
"," : "");
} else {
malloc_cprintf(write_cb, cbopaque,
- "metadata: %12zu\n", metadata);
+ "resident: %12zu\n", resident);
}
if (bins)
diff --git a/src/tcache.c b/src/tcache.c
index 7f5b291..fad5277 100644
--- a/src/tcache.c
+++ b/src/tcache.c
@@ -440,8 +440,8 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
tcaches_t *elm;
if (tcaches == NULL) {
- tcaches = base_alloc(tsd_tsdn(tsd), sizeof(tcache_t *) *
- (MALLOCX_TCACHE_MAX+1));
+ tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *)
+ * (MALLOCX_TCACHE_MAX+1), CACHELINE);
if (tcaches == NULL)
return (true);
}
@@ -510,8 +510,8 @@ tcache_boot(tsdn_t *tsdn)
nhbins = size2index(tcache_maxclass) + 1;
/* Initialize tcache_bin_info. */
- tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, nhbins *
- sizeof(tcache_bin_info_t));
+ tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins
+ * sizeof(tcache_bin_info_t), CACHELINE);
if (tcache_bin_info == NULL)
return (true);
stack_nelms = 0;