summaryrefslogtreecommitdiffstats
path: root/src/chunk.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/chunk.c')
-rw-r--r--src/chunk.c157
1 files changed, 105 insertions, 52 deletions
diff --git a/src/chunk.c b/src/chunk.c
index 246324a..38d0286 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -31,13 +31,12 @@ size_t map_bias;
size_t arena_maxclass; /* Max size class for arenas. */
/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
-static void *chunk_recycle(extent_tree_t *chunks_szad,
- extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base,
- bool *zero);
-static void chunk_record(extent_tree_t *chunks_szad,
- extent_tree_t *chunks_ad, void *chunk, size_t size);
+static void chunk_dalloc_core(void *chunk, size_t size);
/******************************************************************************/
@@ -104,7 +103,7 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
malloc_mutex_unlock(&chunks_mtx);
node = base_node_alloc();
if (node == NULL) {
- chunk_dealloc(ret, size, true);
+ chunk_dalloc_core(ret, size);
return (NULL);
}
malloc_mutex_lock(&chunks_mtx);
@@ -119,7 +118,7 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
malloc_mutex_unlock(&chunks_mtx);
if (node != NULL)
- base_node_dealloc(node);
+ base_node_dalloc(node);
if (*zero) {
if (zeroed == false)
memset(ret, 0, size);
@@ -141,8 +140,8 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
* takes advantage of this to avoid demanding zeroed chunks, but taking
* advantage of them if they are returned.
*/
-void *
-chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
+static void *
+chunk_alloc_core(size_t size, size_t alignment, bool base, bool *zero,
dss_prec_t dss_prec)
{
void *ret;
@@ -156,59 +155,105 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
if (have_dss && dss_prec == dss_prec_primary) {
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
alignment, base, zero)) != NULL)
- goto label_return;
+ return (ret);
if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
- goto label_return;
+ return (ret);
}
/* mmap. */
if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size,
alignment, base, zero)) != NULL)
- goto label_return;
+ return (ret);
if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
- goto label_return;
+ return (ret);
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary) {
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
alignment, base, zero)) != NULL)
- goto label_return;
+ return (ret);
if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
- goto label_return;
+ return (ret);
}
/* All strategies for allocation failed. */
- ret = NULL;
-label_return:
- if (ret != NULL) {
- if (config_ivsalloc && base == false) {
- if (rtree_set(chunks_rtree, (uintptr_t)ret, 1)) {
- chunk_dealloc(ret, size, true);
- return (NULL);
- }
- }
- if (config_stats || config_prof) {
- bool gdump;
- malloc_mutex_lock(&chunks_mtx);
- if (config_stats)
- stats_chunks.nchunks += (size / chunksize);
- stats_chunks.curchunks += (size / chunksize);
- if (stats_chunks.curchunks > stats_chunks.highchunks) {
- stats_chunks.highchunks =
- stats_chunks.curchunks;
- if (config_prof)
- gdump = true;
- } else if (config_prof)
- gdump = false;
- malloc_mutex_unlock(&chunks_mtx);
- if (config_prof && opt_prof && opt_prof_gdump && gdump)
- prof_gdump();
- }
- if (config_valgrind)
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+ return (NULL);
+}
+
+static bool
+chunk_register(void *chunk, size_t size, bool base)
+{
+
+ assert(chunk != NULL);
+ assert(CHUNK_ADDR2BASE(chunk) == chunk);
+
+ if (config_ivsalloc && base == false) {
+ if (rtree_set(chunks_rtree, (uintptr_t)chunk, 1))
+ return (true);
+ }
+ if (config_stats || config_prof) {
+ bool gdump;
+ malloc_mutex_lock(&chunks_mtx);
+ if (config_stats)
+ stats_chunks.nchunks += (size / chunksize);
+ stats_chunks.curchunks += (size / chunksize);
+ if (stats_chunks.curchunks > stats_chunks.highchunks) {
+ stats_chunks.highchunks =
+ stats_chunks.curchunks;
+ if (config_prof)
+ gdump = true;
+ } else if (config_prof)
+ gdump = false;
+ malloc_mutex_unlock(&chunks_mtx);
+ if (config_prof && opt_prof && opt_prof_gdump && gdump)
+ prof_gdump();
+ }
+ if (config_valgrind)
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(chunk, size);
+ return (false);
+}
+
+void *
+chunk_alloc_base(size_t size)
+{
+ void *ret;
+ bool zero;
+
+ zero = false;
+ ret = chunk_alloc_core(size, chunksize, true, &zero,
+ chunk_dss_prec_get());
+ if (ret == NULL)
+ return (NULL);
+ if (chunk_register(ret, size, true)) {
+ chunk_dalloc_core(ret, size);
+ return (NULL);
}
- assert(CHUNK_ADDR2BASE(ret) == ret);
return (ret);
}
+void *
+chunk_alloc_arena(chunk_alloc_t *chunk_alloc, chunk_dalloc_t *chunk_dalloc,
+ unsigned arena_ind, size_t size, size_t alignment, bool *zero)
+{
+ void *ret;
+
+ ret = chunk_alloc(size, alignment, zero, arena_ind);
+ if (ret != NULL && chunk_register(ret, size, false)) {
+ chunk_dalloc(ret, size, arena_ind);
+ ret = NULL;
+ }
+
+ return (ret);
+}
+
+/* Default arena chunk allocation routine in the absence of user override. */
+void *
+chunk_alloc_default(size_t size, size_t alignment, bool *zero,
+ unsigned arena_ind)
+{
+
+ return (chunk_alloc_core(size, alignment, false, zero,
+ arenas[arena_ind]->dss_prec));
+}
+
static void
chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
size_t size)
@@ -292,9 +337,9 @@ label_return:
* avoid potential deadlock.
*/
if (xnode != NULL)
- base_node_dealloc(xnode);
+ base_node_dalloc(xnode);
if (xprev != NULL)
- base_node_dealloc(xprev);
+ base_node_dalloc(xprev);
}
void
@@ -307,12 +352,12 @@ chunk_unmap(void *chunk, size_t size)
if (have_dss && chunk_in_dss(chunk))
chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
- else if (chunk_dealloc_mmap(chunk, size))
+ else if (chunk_dalloc_mmap(chunk, size))
chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
}
-void
-chunk_dealloc(void *chunk, size_t size, bool unmap)
+static void
+chunk_dalloc_core(void *chunk, size_t size)
{
assert(chunk != NULL);
@@ -329,8 +374,16 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
malloc_mutex_unlock(&chunks_mtx);
}
- if (unmap)
- chunk_unmap(chunk, size);
+ chunk_unmap(chunk, size);
+}
+
+/* Default arena chunk deallocation routine in the absence of user override. */
+bool
+chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
+{
+
+ chunk_dalloc_core(chunk, size);
+ return (false);
}
bool