summaryrefslogtreecommitdiffstats
path: root/src/base.c
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2016-12-22 22:39:10 (GMT)
committerJason Evans <jasone@canonware.com>2016-12-27 02:08:28 (GMT)
commita0dd3a4483e2e72ee80e70424a6522f873f2c7ff (patch)
tree35653ce115166355fd3989dff22714dc0402bf15 /src/base.c
parenta6e86810d83aba0d94d0f6423ed09e8e6e0909fa (diff)
downloadjemalloc-a0dd3a4483e2e72ee80e70424a6522f873f2c7ff.zip
jemalloc-a0dd3a4483e2e72ee80e70424a6522f873f2c7ff.tar.gz
jemalloc-a0dd3a4483e2e72ee80e70424a6522f873f2c7ff.tar.bz2
Implement per arena base allocators.
Add/rename related mallctls: - Add stats.arenas.<i>.base . - Rename stats.arenas.<i>.metadata to stats.arenas.<i>.internal . - Add stats.arenas.<i>.resident . Modify the arenas.extend mallctl to take an optional (extent_hooks_t *) argument so that it is possible for all base allocations to be serviced by the specified extent hooks. This resolves #463.
Diffstat (limited to 'src/base.c')
-rw-r--r--src/base.c407
1 files changed, 288 insertions, 119 deletions
diff --git a/src/base.c b/src/base.c
index 4764d9c..5eab7cd 100644
--- a/src/base.c
+++ b/src/base.c
@@ -4,112 +4,308 @@
/******************************************************************************/
/* Data. */
-static malloc_mutex_t base_mtx;
-static size_t base_extent_sn_next;
-static extent_heap_t base_avail[NSIZES];
-static extent_t *base_extents;
-static size_t base_allocated;
-static size_t base_resident;
-static size_t base_mapped;
+static base_t *b0;
/******************************************************************************/
-static extent_t *
-base_extent_try_alloc(tsdn_t *tsdn)
+static void *
+base_map(extent_hooks_t *extent_hooks, unsigned ind, size_t size)
{
- extent_t *extent;
+ void *addr;
+ bool zero = true;
+ bool commit = true;
- malloc_mutex_assert_owner(tsdn, &base_mtx);
+ assert(size == HUGEPAGE_CEILING(size));
- if (base_extents == NULL)
- return (NULL);
- extent = base_extents;
- base_extents = *(extent_t **)extent;
- return (extent);
+ if (extent_hooks == &extent_hooks_default)
+ addr = extent_alloc_mmap(NULL, size, PAGE, &zero, &commit);
+ else {
+ addr = extent_hooks->alloc(extent_hooks, NULL, size, PAGE,
+ &zero, &commit, ind);
+ }
+
+ return (addr);
}
static void
-base_extent_dalloc(tsdn_t *tsdn, extent_t *extent)
+base_unmap(extent_hooks_t *extent_hooks, unsigned ind, void *addr, size_t size)
{
- malloc_mutex_assert_owner(tsdn, &base_mtx);
-
- *(extent_t **)extent = base_extents;
- base_extents = extent;
+ /*
+ * Cascade through dalloc, decommit, purge_lazy, and purge_forced,
+ * stopping at first success. This cascade is performed for consistency
+ * with the cascade in extent_dalloc_wrapper() because an application's
+ * custom hooks may not support e.g. dalloc. This function is only ever
+ * called as a side effect of arena destruction, so although it might
+ * seem pointless to do anything besides dalloc here, the application
+ * may in fact want the end state of all associated virtual memory to in
+ * some consistent-but-allocated state.
+ */
+ if (extent_hooks == &extent_hooks_default) {
+ if (!extent_dalloc_mmap(addr, size))
+ return;
+ if (!pages_decommit(addr, size))
+ return;
+ if (!pages_purge_lazy(addr, size))
+ return;
+ if (!pages_purge_forced(addr, size))
+ return;
+ /* Nothing worked. This should never happen. */
+ not_reached();
+ } else {
+ if (extent_hooks->dalloc != NULL &&
+ !extent_hooks->dalloc(extent_hooks, addr, size, true, ind))
+ return;
+ if (extent_hooks->decommit != NULL &&
+ !extent_hooks->decommit(extent_hooks, addr, size, 0, size,
+ ind))
+ return;
+ if (extent_hooks->purge_lazy != NULL &&
+ !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
+ ind))
+ return;
+ if (extent_hooks->purge_forced != NULL &&
+ !extent_hooks->purge_forced(extent_hooks, addr, size, 0,
+ size, ind))
+ return;
+ /* Nothing worked. That's the application's problem. */
+ }
}
static void
-base_extent_init(extent_t *extent, void *addr, size_t size)
+base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
+ size_t size)
{
- size_t sn = atomic_add_zu(&base_extent_sn_next, 1) - 1;
+ size_t sn;
+
+ sn = *extent_sn_next;
+ (*extent_sn_next)++;
extent_init(extent, NULL, addr, size, 0, sn, true, true, true, false);
}
+static void *
+base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
+ size_t alignment)
+{
+ void *ret;
+
+ assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
+ assert(size == ALIGNMENT_CEILING(size, alignment));
+
+ *gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
+ alignment) - (uintptr_t)extent_addr_get(extent);
+ ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
+ assert(extent_size_get(extent) >= *gap_size + size);
+ extent_init(extent, NULL, (void *)((uintptr_t)extent_addr_get(extent) +
+ *gap_size + size), extent_size_get(extent) - *gap_size - size, 0,
+ extent_sn_get(extent), true, true, true, false);
+ return (ret);
+}
+
+static void
+base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent,
+ size_t gap_size, void *addr, size_t size)
+{
+
+ if (extent_size_get(extent) > 0) {
+ /*
+ * Compute the index for the largest size class that does not
+ * exceed extent's size.
+ */
+ szind_t index_floor = size2index(extent_size_get(extent) + 1) -
+ 1;
+ extent_heap_insert(&base->avail[index_floor], extent);
+ }
+
+ if (config_stats) {
+ base->allocated += size;
+ /*
+ * Add one PAGE to base_resident for every page boundary that is
+ * crossed by the new allocation.
+ */
+ base->resident += PAGE_CEILING((uintptr_t)addr + size) -
+ PAGE_CEILING((uintptr_t)addr - gap_size);
+ assert(base->allocated <= base->resident);
+ assert(base->resident <= base->mapped);
+ }
+}
+
+static void *
+base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, extent_t *extent,
+ size_t size, size_t alignment)
+{
+ void *ret;
+ size_t gap_size;
+
+ ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
+ base_extent_bump_alloc_post(tsdn, base, extent, gap_size, ret, size);
+ return (ret);
+}
+
+/*
+ * Allocate a block of virtual memory that is large enough to start with a
+ * base_block_t header, followed by an object of specified size and alignment.
+ * On success a pointer to the initialized base_block_t header is returned.
+ */
+static base_block_t *
+base_block_alloc(extent_hooks_t *extent_hooks, unsigned ind,
+ size_t *extent_sn_next, size_t size, size_t alignment)
+{
+ base_block_t *block;
+ size_t usize, header_size, gap_size, block_size;
+
+ alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
+ usize = ALIGNMENT_CEILING(size, alignment);
+ header_size = sizeof(base_block_t);
+ gap_size = ALIGNMENT_CEILING(header_size, alignment) - header_size;
+ block_size = HUGEPAGE_CEILING(header_size + gap_size + usize);
+ block = (base_block_t *)base_map(extent_hooks, ind, block_size);
+ if (block == NULL)
+ return (NULL);
+ block->size = block_size;
+ block->next = NULL;
+ assert(block_size >= header_size);
+ base_extent_init(extent_sn_next, &block->extent,
+ (void *)((uintptr_t)block + header_size), block_size - header_size);
+ return (block);
+}
+
+/*
+ * Allocate an extent that is at least as large as specified size, with
+ * specified alignment.
+ */
static extent_t *
-base_extent_alloc(tsdn_t *tsdn, size_t minsize)
+base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment)
{
- extent_t *extent;
- size_t esize, nsize;
- void *addr;
+ extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
+ base_block_t *block;
- malloc_mutex_assert_owner(tsdn, &base_mtx);
- assert(minsize != 0);
- extent = base_extent_try_alloc(tsdn);
- /* Allocate enough space to also carve an extent out if necessary. */
- nsize = (extent == NULL) ? CACHELINE_CEILING(sizeof(extent_t)) : 0;
- esize = PAGE_CEILING(minsize + nsize);
- /*
- * Directly call extent_alloc_mmap() because it's critical to allocate
- * untouched demand-zeroed virtual memory.
- */
- {
- bool zero = true;
- bool commit = true;
- addr = extent_alloc_mmap(NULL, esize, PAGE, &zero, &commit);
+ malloc_mutex_assert_owner(tsdn, &base->mtx);
+
+ block = base_block_alloc(extent_hooks, base_ind_get(base),
+ &base->extent_sn_next, size, alignment);
+ if (block == NULL)
+ return (NULL);
+ block->next = base->blocks;
+ base->blocks = block;
+ if (config_stats) {
+ base->allocated += sizeof(base_block_t);
+ base->resident += PAGE_CEILING(sizeof(base_block_t));
+ base->mapped += block->size;
+ assert(base->allocated <= base->resident);
+ assert(base->resident <= base->mapped);
}
- if (addr == NULL) {
- if (extent != NULL)
- base_extent_dalloc(tsdn, extent);
+ return (&block->extent);
+}
+
+base_t *
+b0get(void)
+{
+
+ return (b0);
+}
+
+base_t *
+base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks)
+{
+ base_t *base;
+ size_t extent_sn_next, base_alignment, base_size, gap_size;
+ base_block_t *block;
+ szind_t i;
+
+ extent_sn_next = 0;
+ block = base_block_alloc(extent_hooks, ind, &extent_sn_next,
+ sizeof(base_t), QUANTUM);
+ if (block == NULL)
+ return (NULL);
+
+ base_alignment = CACHELINE;
+ base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
+ base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
+ &gap_size, base_size, base_alignment);
+ base->ind = ind;
+ base->extent_hooks = extent_hooks;
+ if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE)) {
+ base_unmap(extent_hooks, ind, block, block->size);
return (NULL);
}
- base_mapped += esize;
- if (extent == NULL) {
- extent = (extent_t *)addr;
- addr = (void *)((uintptr_t)addr + nsize);
- esize -= nsize;
- if (config_stats) {
- base_allocated += nsize;
- base_resident += PAGE_CEILING(nsize);
- }
+ base->extent_sn_next = extent_sn_next;
+ base->blocks = block;
+ for (i = 0; i < NSIZES; i++)
+ extent_heap_new(&base->avail[i]);
+ if (config_stats) {
+ base->allocated = sizeof(base_block_t);
+ base->resident = PAGE_CEILING(sizeof(base_block_t));
+ base->mapped = block->size;
+ assert(base->allocated <= base->resident);
+ assert(base->resident <= base->mapped);
}
- base_extent_init(extent, addr, esize);
- return (extent);
+ base_extent_bump_alloc_post(tsdn, base, &block->extent, gap_size, base,
+ base_size);
+
+ return (base);
+}
+
+void
+base_delete(base_t *base)
+{
+ extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
+ base_block_t *next = base->blocks;
+ do {
+ base_block_t *block = next;
+ next = block->next;
+ base_unmap(extent_hooks, base_ind_get(base), block,
+ block->size);
+ } while (next != NULL);
+}
+
+extent_hooks_t *
+base_extent_hooks_get(base_t *base)
+{
+
+ return ((extent_hooks_t *)atomic_read_p(&base->extent_hooks_pun));
+}
+
+extent_hooks_t *
+base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks)
+{
+ extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base);
+ union {
+ extent_hooks_t **h;
+ void **v;
+ } u;
+
+ u.h = &base->extent_hooks;
+ atomic_write_p(u.v, extent_hooks);
+
+ return (old_extent_hooks);
}
/*
- * base_alloc() guarantees demand-zeroed memory, in order to make multi-page
- * sparse data structures such as radix tree nodes efficient with respect to
- * physical memory usage.
+ * base_alloc() returns zeroed memory, which is always demand-zeroed for the
+ * auto arenas, in order to make multi-page sparse data structures such as radix
+ * tree nodes efficient with respect to physical memory usage. Upon success a
+ * pointer to at least size bytes with specified alignment is returned. Note
+ * that size is rounded up to the nearest multiple of alignment to avoid false
+ * sharing.
*/
void *
-base_alloc(tsdn_t *tsdn, size_t size)
+base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment)
{
void *ret;
- size_t csize;
+ size_t usize, asize;
szind_t i;
extent_t *extent;
- /*
- * Round size up to nearest multiple of the cacheline size, so that
- * there is no chance of false cache line sharing.
- */
- csize = CACHELINE_CEILING(size);
+ alignment = QUANTUM_CEILING(alignment);
+ usize = ALIGNMENT_CEILING(size, alignment);
+ asize = usize + alignment - QUANTUM;
extent = NULL;
- malloc_mutex_lock(tsdn, &base_mtx);
- for (i = size2index(csize); i < NSIZES; i++) {
- extent = extent_heap_remove_first(&base_avail[i]);
+ malloc_mutex_lock(tsdn, &base->mtx);
+ for (i = size2index(asize); i < NSIZES; i++) {
+ extent = extent_heap_remove_first(&base->avail[i]);
if (extent != NULL) {
/* Use existing space. */
break;
@@ -117,87 +313,60 @@ base_alloc(tsdn_t *tsdn, size_t size)
}
if (extent == NULL) {
/* Try to allocate more space. */
- extent = base_extent_alloc(tsdn, csize);
+ extent = base_extent_alloc(tsdn, base, usize, alignment);
}
if (extent == NULL) {
ret = NULL;
goto label_return;
}
- ret = extent_addr_get(extent);
- if (extent_size_get(extent) > csize) {
- szind_t index_floor;
-
- extent_addr_set(extent, (void *)((uintptr_t)ret + csize));
- extent_size_set(extent, extent_size_get(extent) - csize);
- /*
- * Compute the index for the largest size class that does not
- * exceed extent's size.
- */
- index_floor = size2index(extent_size_get(extent) + 1) - 1;
- extent_heap_insert(&base_avail[index_floor], extent);
- } else
- base_extent_dalloc(tsdn, extent);
- if (config_stats) {
- base_allocated += csize;
- /*
- * Add one PAGE to base_resident for every page boundary that is
- * crossed by the new allocation.
- */
- base_resident += PAGE_CEILING((uintptr_t)ret + csize) -
- PAGE_CEILING((uintptr_t)ret);
- }
+ ret = base_extent_bump_alloc(tsdn, base, extent, usize, alignment);
label_return:
- malloc_mutex_unlock(tsdn, &base_mtx);
+ malloc_mutex_unlock(tsdn, &base->mtx);
return (ret);
}
void
-base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
+base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
size_t *mapped)
{
- malloc_mutex_lock(tsdn, &base_mtx);
- assert(base_allocated <= base_resident);
- assert(base_resident <= base_mapped);
- *allocated = base_allocated;
- *resident = base_resident;
- *mapped = base_mapped;
- malloc_mutex_unlock(tsdn, &base_mtx);
+ cassert(config_stats);
+
+ malloc_mutex_lock(tsdn, &base->mtx);
+ assert(base->allocated <= base->resident);
+ assert(base->resident <= base->mapped);
+ *allocated = base->allocated;
+ *resident = base->resident;
+ *mapped = base->mapped;
+ malloc_mutex_unlock(tsdn, &base->mtx);
}
-bool
-base_boot(void)
+void
+base_prefork(tsdn_t *tsdn, base_t *base)
{
- szind_t i;
-
- if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
- return (true);
- base_extent_sn_next = 0;
- for (i = 0; i < NSIZES; i++)
- extent_heap_new(&base_avail[i]);
- base_extents = NULL;
- return (false);
+ malloc_mutex_prefork(tsdn, &base->mtx);
}
void
-base_prefork(tsdn_t *tsdn)
+base_postfork_parent(tsdn_t *tsdn, base_t *base)
{
- malloc_mutex_prefork(tsdn, &base_mtx);
+ malloc_mutex_postfork_parent(tsdn, &base->mtx);
}
void
-base_postfork_parent(tsdn_t *tsdn)
+base_postfork_child(tsdn_t *tsdn, base_t *base)
{
- malloc_mutex_postfork_parent(tsdn, &base_mtx);
+ malloc_mutex_postfork_child(tsdn, &base->mtx);
}
-void
-base_postfork_child(tsdn_t *tsdn)
+bool
+base_boot(tsdn_t *tsdn)
{
- malloc_mutex_postfork_child(tsdn, &base_mtx);
+ b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
+ return (b0 == NULL);
}