summaryrefslogtreecommitdiffstats
path: root/src/base.c
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2015-01-31 05:49:19 (GMT)
committerJason Evans <jasone@canonware.com>2015-02-05 00:51:53 (GMT)
commitf500a10b2e94852b867334703ad77467dcfd2ddd (patch)
tree261233df873ca379c1fb8d70d8e528030fcd6d5e /src/base.c
parent918a1a5b3f09cb456c25be9a2555a8fea6a9bb94 (diff)
downloadjemalloc-f500a10b2e94852b867334703ad77467dcfd2ddd.zip
jemalloc-f500a10b2e94852b867334703ad77467dcfd2ddd.tar.gz
jemalloc-f500a10b2e94852b867334703ad77467dcfd2ddd.tar.bz2
Refactor base_alloc() to guarantee demand-zeroed memory.
Refactor base_alloc() to guarantee that allocations are carved from demand-zeroed virtual memory. This supports sparse data structures such as multi-page radix tree nodes. Enhance base_alloc() to keep track of fragments which were too small to support previous allocation requests, and try to consume them during subsequent requests. This becomes important when request sizes commonly approach or exceed the chunk size (as could radix tree node allocations).
Diffstat (limited to 'src/base.c')
-rw-r--r--src/base.c147
1 files changed, 91 insertions, 56 deletions
diff --git a/src/base.c b/src/base.c
index 22f3613..0d1de7f 100644
--- a/src/base.c
+++ b/src/base.c
@@ -5,73 +5,117 @@
/* Data. */
static malloc_mutex_t base_mtx;
-
-/*
- * Current pages that are being used for internal memory allocations. These
- * pages are carved up in cacheline-size quanta, so that there is no chance of
- * false cache line sharing.
- */
-static void *base_pages;
-static void *base_next_addr;
-static void *base_past_addr; /* Addr immediately past base_pages. */
+static extent_tree_t base_avail_szad;
static extent_node_t *base_nodes;
-
static size_t base_allocated;
/******************************************************************************/
-static bool
-base_pages_alloc(size_t minsize)
+static extent_node_t *
+base_node_try_alloc_locked(void)
{
- size_t csize;
+ extent_node_t *node;
- assert(minsize != 0);
- csize = CHUNK_CEILING(minsize);
- base_pages = chunk_alloc_base(csize);
- if (base_pages == NULL)
- return (true);
- base_next_addr = base_pages;
- base_past_addr = (void *)((uintptr_t)base_pages + csize);
+ if (base_nodes == NULL)
+ return (NULL);
+ node = base_nodes;
+ base_nodes = *(extent_node_t **)node;
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
+ return (node);
+}
- return (false);
+static void
+base_node_dalloc_locked(extent_node_t *node)
+{
+
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
+ *(extent_node_t **)node = base_nodes;
+ base_nodes = node;
}
-void *
-base_alloc(size_t size)
+/* base_mtx must be held. */
+static extent_node_t *
+base_chunk_alloc(size_t minsize)
+{
+ extent_node_t *node;
+ size_t csize, nsize;
+ void *addr;
+
+ assert(minsize != 0);
+ node = base_node_try_alloc_locked();
+ /* Allocate enough space to also carve a node out if necessary. */
+ nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
+ csize = CHUNK_CEILING(minsize + nsize);
+ addr = chunk_alloc_base(csize);
+ if (addr == NULL) {
+ if (node != NULL)
+ base_node_dalloc_locked(node);
+ return (NULL);
+ }
+ if (node == NULL) {
+ csize -= nsize;
+ node = (extent_node_t *)((uintptr_t)addr + csize);
+ if (config_stats)
+ base_allocated += nsize;
+ }
+ node->addr = addr;
+ node->size = csize;
+ return (node);
+}
+
+static void *
+base_alloc_locked(size_t size)
{
void *ret;
size_t csize;
+ extent_node_t *node;
+ extent_node_t key;
- /* Round size up to nearest multiple of the cacheline size. */
+ /*
+ * Round size up to nearest multiple of the cacheline size, so that
+ * there is no chance of false cache line sharing.
+ */
csize = CACHELINE_CEILING(size);
- malloc_mutex_lock(&base_mtx);
- /* Make sure there's enough space for the allocation. */
- if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
- if (base_pages_alloc(csize)) {
- malloc_mutex_unlock(&base_mtx);
- return (NULL);
- }
+ key.addr = NULL;
+ key.size = csize;
+ node = extent_tree_szad_nsearch(&base_avail_szad, &key);
+ if (node != NULL) {
+ /* Use existing space. */
+ extent_tree_szad_remove(&base_avail_szad, node);
+ } else {
+ /* Try to allocate more space. */
+ node = base_chunk_alloc(csize);
}
- /* Allocate. */
- ret = base_next_addr;
- base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
+ if (node == NULL)
+ return (NULL);
+
+ ret = node->addr;
+ if (node->size > csize) {
+ node->addr = (void *)((uintptr_t)ret + csize);
+ node->size -= csize;
+ extent_tree_szad_insert(&base_avail_szad, node);
+ } else
+ base_node_dalloc_locked(node);
if (config_stats)
base_allocated += csize;
- malloc_mutex_unlock(&base_mtx);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
-
return (ret);
}
+/*
+ * base_alloc() guarantees demand-zeroed memory, in order to make multi-page
+ * sparse data structures such as radix tree nodes efficient with respect to
+ * physical memory usage.
+ */
void *
-base_calloc(size_t number, size_t size)
+base_alloc(size_t size)
{
- void *ret = base_alloc(number * size);
-
- if (ret != NULL)
- memset(ret, 0, number * size);
+ void *ret;
+ malloc_mutex_lock(&base_mtx);
+ ret = base_alloc_locked(size);
+ malloc_mutex_unlock(&base_mtx);
return (ret);
}
@@ -81,17 +125,9 @@ base_node_alloc(void)
extent_node_t *ret;
malloc_mutex_lock(&base_mtx);
- if (base_nodes != NULL) {
- ret = base_nodes;
- base_nodes = *(extent_node_t **)ret;
- malloc_mutex_unlock(&base_mtx);
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret,
- sizeof(extent_node_t));
- } else {
- malloc_mutex_unlock(&base_mtx);
- ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
- }
-
+ if ((ret = base_node_try_alloc_locked()) == NULL)
+ ret = (extent_node_t *)base_alloc_locked(sizeof(extent_node_t));
+ malloc_mutex_unlock(&base_mtx);
return (ret);
}
@@ -99,10 +135,8 @@ void
base_node_dalloc(extent_node_t *node)
{
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
malloc_mutex_lock(&base_mtx);
- *(extent_node_t **)node = base_nodes;
- base_nodes = node;
+ base_node_dalloc_locked(node);
malloc_mutex_unlock(&base_mtx);
}
@@ -121,9 +155,10 @@ bool
base_boot(void)
{
- base_nodes = NULL;
if (malloc_mutex_init(&base_mtx))
return (true);
+ extent_tree_szad_new(&base_avail_szad);
+ base_nodes = NULL;
return (false);
}