summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/arena.c10
-rw-r--r--src/base.c15
-rw-r--r--src/ctl.c30
-rw-r--r--src/huge.c113
-rw-r--r--src/jemalloc.c37
-rw-r--r--src/prof.c30
-rw-r--r--src/quarantine.c11
-rw-r--r--src/stats.c14
-rw-r--r--src/tcache.c4
-rw-r--r--src/tsd.c2
10 files changed, 148 insertions, 118 deletions
diff --git a/src/arena.c b/src/arena.c
index 1eb4000..984b8ad 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -405,8 +405,10 @@ arena_chunk_alloc_internal(arena_t *arena, size_t size, size_t alignment,
chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc,
arena->ind, NULL, size, alignment, zero);
malloc_mutex_lock(&arena->lock);
- if (config_stats && chunk != NULL)
+ if (config_stats && chunk != NULL) {
arena->stats.mapped += chunksize;
+ arena->stats.metadata_mapped += (map_bias << LG_PAGE);
+ }
return (chunk);
}
@@ -514,8 +516,10 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
malloc_mutex_unlock(&arena->lock);
chunk_dalloc((void *)spare, chunksize, arena->ind);
malloc_mutex_lock(&arena->lock);
- if (config_stats)
+ if (config_stats) {
arena->stats.mapped -= chunksize;
+ arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
+ }
} else
arena->spare = chunk;
}
@@ -2273,6 +2277,8 @@ arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
astats->npurge += arena->stats.npurge;
astats->nmadvise += arena->stats.nmadvise;
astats->purged += arena->stats.purged;
+ astats->metadata_mapped += arena->stats.metadata_mapped;
+ astats->metadata_allocated += arena_metadata_allocated_get(arena);
astats->allocated_large += arena->stats.allocated_large;
astats->nmalloc_large += arena->stats.nmalloc_large;
astats->ndalloc_large += arena->stats.ndalloc_large;
diff --git a/src/base.c b/src/base.c
index 409c7bb..22f3613 100644
--- a/src/base.c
+++ b/src/base.c
@@ -16,6 +16,8 @@ static void *base_next_addr;
static void *base_past_addr; /* Addr immediately past base_pages. */
static extent_node_t *base_nodes;
+static size_t base_allocated;
+
/******************************************************************************/
static bool
@@ -54,6 +56,8 @@ base_alloc(size_t size)
/* Allocate. */
ret = base_next_addr;
base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
+ if (config_stats)
+ base_allocated += csize;
malloc_mutex_unlock(&base_mtx);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
@@ -102,6 +106,17 @@ base_node_dalloc(extent_node_t *node)
malloc_mutex_unlock(&base_mtx);
}
+size_t
+base_allocated_get(void)
+{
+ size_t ret;
+
+ malloc_mutex_lock(&base_mtx);
+ ret = base_allocated;
+ malloc_mutex_unlock(&base_mtx);
+ return (ret);
+}
+
bool
base_boot(void)
{
diff --git a/src/ctl.c b/src/ctl.c
index 6b95584..b65af52 100644
--- a/src/ctl.c
+++ b/src/ctl.c
@@ -183,10 +183,13 @@ CTL_PROTO(stats_arenas_i_mapped)
CTL_PROTO(stats_arenas_i_npurge)
CTL_PROTO(stats_arenas_i_nmadvise)
CTL_PROTO(stats_arenas_i_purged)
+CTL_PROTO(stats_arenas_i_metadata_mapped)
+CTL_PROTO(stats_arenas_i_metadata_allocated)
INDEX_PROTO(stats_arenas_i)
CTL_PROTO(stats_cactive)
CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active)
+CTL_PROTO(stats_metadata)
CTL_PROTO(stats_mapped)
/******************************************************************************/
@@ -355,6 +358,11 @@ static const ctl_named_node_t stats_chunks_node[] = {
{NAME("high"), CTL(stats_chunks_high)}
};
+static const ctl_named_node_t stats_arenas_i_metadata_node[] = {
+ {NAME("mapped"), CTL(stats_arenas_i_metadata_mapped)},
+ {NAME("allocated"), CTL(stats_arenas_i_metadata_allocated)}
+};
+
static const ctl_named_node_t stats_arenas_i_small_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
@@ -432,6 +440,7 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("npurge"), CTL(stats_arenas_i_npurge)},
{NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
{NAME("purged"), CTL(stats_arenas_i_purged)},
+ {NAME("metadata"), CHILD(named, stats_arenas_i_metadata)},
{NAME("small"), CHILD(named, stats_arenas_i_small)},
{NAME("large"), CHILD(named, stats_arenas_i_large)},
{NAME("huge"), CHILD(named, stats_arenas_i_huge)},
@@ -451,6 +460,7 @@ static const ctl_named_node_t stats_node[] = {
{NAME("cactive"), CTL(stats_cactive)},
{NAME("allocated"), CTL(stats_allocated)},
{NAME("active"), CTL(stats_active)},
+ {NAME("metadata"), CTL(stats_metadata)},
{NAME("mapped"), CTL(stats_mapped)},
{NAME("chunks"), CHILD(named, stats_chunks)},
{NAME("arenas"), CHILD(indexed, stats_arenas)}
@@ -484,14 +494,14 @@ ctl_arena_init(ctl_arena_stats_t *astats)
if (astats->lstats == NULL) {
astats->lstats = (malloc_large_stats_t *)a0malloc(nlclasses *
- sizeof(malloc_large_stats_t), false);
+ sizeof(malloc_large_stats_t));
if (astats->lstats == NULL)
return (true);
}
if (astats->hstats == NULL) {
astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses *
- sizeof(malloc_huge_stats_t), false);
+ sizeof(malloc_huge_stats_t));
if (astats->hstats == NULL)
return (true);
}
@@ -551,6 +561,9 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
sstats->astats.nmadvise += astats->astats.nmadvise;
sstats->astats.purged += astats->astats.purged;
+ sstats->astats.metadata_mapped += astats->astats.metadata_mapped;
+ sstats->astats.metadata_allocated += astats->astats.metadata_allocated;
+
sstats->allocated_small += astats->allocated_small;
sstats->nmalloc_small += astats->nmalloc_small;
sstats->ndalloc_small += astats->ndalloc_small;
@@ -627,7 +640,7 @@ ctl_grow(void)
/* Allocate extended arena stats. */
astats = (ctl_arena_stats_t *)a0malloc((ctl_stats.narenas + 2) *
- sizeof(ctl_arena_stats_t), false);
+ sizeof(ctl_arena_stats_t));
if (astats == NULL)
return (true);
@@ -704,6 +717,10 @@ ctl_refresh(void)
+ ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge;
ctl_stats.active =
(ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE);
+ ctl_stats.metadata = base_allocated_get()
+ + ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped
+ + ctl_stats.arenas[ctl_stats.narenas].astats
+ .metadata_allocated;
ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
}
@@ -723,7 +740,7 @@ ctl_init(void)
*/
ctl_stats.narenas = narenas_total_get();
ctl_stats.arenas = (ctl_arena_stats_t *)a0malloc(
- (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t), false);
+ (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t));
if (ctl_stats.arenas == NULL) {
ret = true;
goto label_return;
@@ -1806,6 +1823,7 @@ CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
+CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t)
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
@@ -1825,6 +1843,10 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_mapped,
+ ctl_stats.arenas[mib[2]].astats.metadata_mapped, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_allocated,
+ ctl_stats.arenas[mib[2]].astats.metadata_allocated, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
ctl_stats.arenas[mib[2]].allocated_small, size_t)
diff --git a/src/huge.c b/src/huge.c
index 416cb17..c4d1ebc 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -37,8 +37,8 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
/* Allocate one or more contiguous chunks for this request. */
/* Allocate an extent node with which to track the chunk. */
- node = ipalloct(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
- CACHELINE, false, try_tcache, NULL);
+ node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
+ CACHELINE, false, try_tcache, true, arena);
if (node == NULL)
return (NULL);
@@ -50,7 +50,7 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
usize, alignment, &is_zeroed)) == NULL) {
- idalloct(tsd, node, try_tcache);
+ idalloctm(tsd, node, try_tcache, true);
return (NULL);
}
@@ -73,6 +73,33 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
return (ret);
}
+static extent_node_t *
+huge_node_locked(const void *ptr)
+{
+ extent_node_t *node, key;
+
+ /* Extract from tree of huge allocations. */
+ key.addr = __DECONST(void *, ptr);
+ node = extent_tree_ad_search(&huge, &key);
+ assert(node != NULL);
+ assert(node->addr == ptr);
+ malloc_mutex_unlock(&huge_mtx);
+
+ return (node);
+}
+
+static extent_node_t *
+huge_node(const void *ptr)
+{
+ extent_node_t *node;
+
+ malloc_mutex_lock(&huge_mtx);
+ node = huge_node_locked(ptr);
+ malloc_mutex_unlock(&huge_mtx);
+
+ return (node);
+}
+
#ifdef JEMALLOC_JET
#undef huge_dalloc_junk
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
@@ -102,7 +129,7 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
{
size_t usize_next;
bool zeroed;
- extent_node_t *node, key;
+ extent_node_t *node;
arena_t *arena;
/* Increase usize to incorporate extra. */
@@ -126,10 +153,7 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
zeroed = true;
malloc_mutex_lock(&huge_mtx);
- key.addr = ptr;
- node = extent_tree_ad_search(&huge, &key);
- assert(node != NULL);
- assert(node->addr == ptr);
+ node = huge_node_locked(ptr);
arena = node->arena;
/* Update the size of the huge allocation. */
assert(node->size != usize);
@@ -159,7 +183,7 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
{
size_t sdiff;
bool zeroed;
- extent_node_t *node, key;
+ extent_node_t *node;
arena_t *arena;
sdiff = CHUNK_CEILING(usize) - usize;
@@ -172,10 +196,7 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
}
malloc_mutex_lock(&huge_mtx);
- key.addr = ptr;
- node = extent_tree_ad_search(&huge, &key);
- assert(node != NULL);
- assert(node->addr == ptr);
+ node = huge_node_locked(ptr);
arena = node->arena;
/* Update the size of the huge allocation. */
node->size = usize;
@@ -190,7 +211,7 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
static bool
huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
size_t usize;
- extent_node_t *node, key;
+ extent_node_t *node;
arena_t *arena;
bool is_zeroed_subchunk, is_zeroed_chunk;
@@ -201,10 +222,7 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
}
malloc_mutex_lock(&huge_mtx);
- key.addr = ptr;
- node = extent_tree_ad_search(&huge, &key);
- assert(node != NULL);
- assert(node->addr == ptr);
+ node = huge_node_locked(ptr);
arena = node->arena;
is_zeroed_subchunk = node->zeroed;
malloc_mutex_unlock(&huge_mtx);
@@ -342,77 +360,44 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
void
huge_dalloc(tsd_t *tsd, void *ptr, bool try_tcache)
{
- extent_node_t *node, key;
+ extent_node_t *node;
malloc_mutex_lock(&huge_mtx);
- /* Extract from tree of huge allocations. */
- key.addr = ptr;
- node = extent_tree_ad_search(&huge, &key);
- assert(node != NULL);
- assert(node->addr == ptr);
+ node = huge_node_locked(ptr);
extent_tree_ad_remove(&huge, node);
malloc_mutex_unlock(&huge_mtx);
huge_dalloc_junk(node->addr, node->size);
arena_chunk_dalloc_huge(node->arena, node->addr, node->size);
- idalloct(tsd, node, try_tcache);
+ idalloctm(tsd, node, try_tcache, true);
}
-size_t
-huge_salloc(const void *ptr)
+arena_t *
+huge_aalloc(const void *ptr)
{
- size_t ret;
- extent_node_t *node, key;
-
- malloc_mutex_lock(&huge_mtx);
-
- /* Extract from tree of huge allocations. */
- key.addr = __DECONST(void *, ptr);
- node = extent_tree_ad_search(&huge, &key);
- assert(node != NULL);
- ret = node->size;
+ return (huge_node(ptr)->arena);
+}
- malloc_mutex_unlock(&huge_mtx);
+size_t
+huge_salloc(const void *ptr)
+{
- return (ret);
+ return (huge_node(ptr)->size);
}
prof_tctx_t *
huge_prof_tctx_get(const void *ptr)
{
- prof_tctx_t *ret;
- extent_node_t *node, key;
-
- malloc_mutex_lock(&huge_mtx);
- /* Extract from tree of huge allocations. */
- key.addr = __DECONST(void *, ptr);
- node = extent_tree_ad_search(&huge, &key);
- assert(node != NULL);
-
- ret = node->prof_tctx;
-
- malloc_mutex_unlock(&huge_mtx);
-
- return (ret);
+ return (huge_node(ptr)->prof_tctx);
}
void
huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
{
- extent_node_t *node, key;
- malloc_mutex_lock(&huge_mtx);
-
- /* Extract from tree of huge allocations. */
- key.addr = __DECONST(void *, ptr);
- node = extent_tree_ad_search(&huge, &key);
- assert(node != NULL);
-
- node->prof_tctx = tctx;
-
- malloc_mutex_unlock(&huge_mtx);
+ huge_node(ptr)->prof_tctx = tctx;
}
bool
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 632c8d3..d1fa674 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -289,45 +289,34 @@ a0get(void)
}
static void *
-a0imalloc(size_t size, bool zero)
+a0ialloc(size_t size, bool zero, bool is_metadata)
{
- void *ret;
if (unlikely(malloc_init_a0()))
return (NULL);
- if (likely(size <= arena_maxclass))
- ret = arena_malloc(NULL, a0get(), size, zero, false);
- else
- ret = huge_malloc(NULL, a0get(), size, zero, false);
-
- return (ret);
+ return (iallocztm(NULL, size, zero, false, is_metadata, a0get()));
}
static void
-a0idalloc(void *ptr)
+a0idalloc(void *ptr, bool is_metadata)
{
- arena_chunk_t *chunk;
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (likely(chunk != ptr))
- arena_dalloc(NULL, chunk, ptr, false);
- else
- huge_dalloc(NULL, ptr, false);
+ idalloctm(NULL, ptr, false, is_metadata);
}
void *
-a0malloc(size_t size, bool zero)
+a0malloc(size_t size)
{
- return (a0imalloc(size, zero));
+ return (a0ialloc(size, false, true));
}
void
a0dalloc(void *ptr)
{
- a0idalloc(ptr);
+ a0idalloc(ptr, true);
}
/*
@@ -343,7 +332,7 @@ bootstrap_malloc(size_t size)
if (unlikely(size == 0))
size = 1;
- return (a0imalloc(size, false));
+ return (a0ialloc(size, false, false));
}
void *
@@ -357,7 +346,7 @@ bootstrap_calloc(size_t num, size_t size)
num_size = 1;
}
- return (a0imalloc(num_size, true));
+ return (a0ialloc(num_size, true, false));
}
void
@@ -367,7 +356,7 @@ bootstrap_free(void *ptr)
if (unlikely(ptr == NULL))
return;
- a0idalloc(ptr);
+ a0idalloc(ptr, false);
}
/* Create a new arena and insert it into the arenas array at index ind. */
@@ -382,7 +371,7 @@ arena_init_locked(unsigned ind)
unsigned narenas_new = narenas_total + 1;
arena_t **arenas_new =
(arena_t **)a0malloc(CACHELINE_CEILING(narenas_new *
- sizeof(arena_t *)), false);
+ sizeof(arena_t *)));
if (arenas_new == NULL)
return (NULL);
memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *));
@@ -519,7 +508,7 @@ arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
if (!*arenas_cache_bypassp) {
*arenas_cache_bypassp = true;
arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
- narenas_cache, false);
+ narenas_cache);
*arenas_cache_bypassp = false;
} else
arenas_cache = NULL;
@@ -1202,6 +1191,8 @@ malloc_init_hard_a0_locked(void)
arena_boot();
if (config_tcache && tcache_boot())
return (true);
+ if (config_tcache && tcache_boot())
+ malloc_mutex_unlock(&init_lock);
if (huge_boot())
return (true);
if (malloc_mutex_init(&arenas_lock))
diff --git a/src/prof.c b/src/prof.c
index 1103cc9..06f5499 100644
--- a/src/prof.c
+++ b/src/prof.c
@@ -532,8 +532,8 @@ prof_gctx_create(tsd_t *tsd, prof_bt_t *bt)
/*
* Create a single allocation that has space for vec of length bt->len.
*/
- prof_gctx_t *gctx = (prof_gctx_t *)imalloc(tsd, offsetof(prof_gctx_t,
- vec) + (bt->len * sizeof(void *)));
+ prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, offsetof(prof_gctx_t,
+ vec) + (bt->len * sizeof(void *)), false, true, true, NULL);
if (gctx == NULL)
return (NULL);
gctx->lock = prof_gctx_mutex_choose();
@@ -574,7 +574,7 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
prof_leave(tsd, tdata_self);
/* Destroy gctx. */
malloc_mutex_unlock(gctx->lock);
- idalloc(tsd, gctx);
+ idalloctm(tsd, gctx, true, true);
} else {
/*
* Compensate for increment in prof_tctx_destroy() or
@@ -674,7 +674,7 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
prof_tdata_destroy(tsd, tdata, false);
if (destroy_tctx)
- idalloc(tsd, tctx);
+ idalloctm(tsd, tctx, true, true);
}
static bool
@@ -703,7 +703,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
/* OOM. */
prof_leave(tsd, tdata);
- idalloc(tsd, gctx.v);
+ idalloctm(tsd, gctx.v, true, true);
return (true);
}
new_gctx = true;
@@ -760,7 +760,8 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
return (NULL);
/* Link a prof_tctx_t into gctx for this thread. */
- ret.v = imalloc(tsd, sizeof(prof_tctx_t));
+ ret.v = iallocztm(tsd, sizeof(prof_tctx_t), false, true, true,
+ NULL);
if (ret.p == NULL) {
if (new_gctx)
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
@@ -778,7 +779,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
if (error) {
if (new_gctx)
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
- idalloc(tsd, ret.v);
+ idalloctm(tsd, ret.v, true, true);
return (NULL);
}
malloc_mutex_lock(gctx->lock);
@@ -1158,7 +1159,7 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
to_destroy);
tctx_tree_remove(&gctx->tctxs,
to_destroy);
- idalloc(tsd, to_destroy);
+ idalloctm(tsd, to_destroy, true, true);
} else
next = NULL;
} while (next != NULL);
@@ -1640,7 +1641,8 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
cassert(config_prof);
/* Initialize an empty cache for this thread. */
- tdata = (prof_tdata_t *)imalloc(tsd, sizeof(prof_tdata_t));
+ tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t), false,
+ true, true, NULL);
if (tdata == NULL)
return (NULL);
@@ -1653,7 +1655,7 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS,
prof_bt_hash, prof_bt_keycomp)) {
- idalloc(tsd, tdata);
+ idalloctm(tsd, tdata, true, true);
return (NULL);
}
@@ -1706,9 +1708,9 @@ prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
tdata_tree_remove(&tdatas, tdata);
if (tdata->thread_name != NULL)
- idalloc(tsd, tdata->thread_name);
+ idalloctm(tsd, tdata->thread_name, true, true);
ckh_delete(tsd, &tdata->bt2tctx);
- idalloc(tsd, tdata);
+ idalloctm(tsd, tdata, true, true);
}
static void
@@ -1869,7 +1871,7 @@ prof_thread_name_alloc(tsd_t *tsd, const char *thread_name)
if (size == 1)
return ("");
- ret = imalloc(tsd, size);
+ ret = iallocztm(tsd, size, false, true, true, NULL);
if (ret == NULL)
return (NULL);
memcpy(ret, thread_name, size);
@@ -1901,7 +1903,7 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name)
return (EAGAIN);
if (tdata->thread_name != NULL) {
- idalloc(tsd, tdata->thread_name);
+ idalloctm(tsd, tdata->thread_name, true, true);
tdata->thread_name = NULL;
}
if (strlen(s) > 0)
diff --git a/src/quarantine.c b/src/quarantine.c
index 12c37e0..094b44d 100644
--- a/src/quarantine.c
+++ b/src/quarantine.c
@@ -26,8 +26,9 @@ quarantine_init(tsd_t *tsd, size_t lg_maxobjs)
assert(tsd_nominal(tsd));
- quarantine = (quarantine_t *)imalloc(tsd, offsetof(quarantine_t, objs) +
- ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)));
+ quarantine = (quarantine_t *)iallocztm(tsd, offsetof(quarantine_t, objs)
+ + ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)), false, true,
+ true, NULL);
if (quarantine == NULL)
return (NULL);
quarantine->curbytes = 0;
@@ -54,7 +55,7 @@ quarantine_alloc_hook_work(tsd_t *tsd)
if (tsd_quarantine_get(tsd) == NULL)
tsd_quarantine_set(tsd, quarantine);
else
- idalloc(tsd, quarantine);
+ idalloctm(tsd, quarantine, true, true);
}
static quarantine_t *
@@ -86,7 +87,7 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
sizeof(quarantine_obj_t));
}
- idalloc(tsd, quarantine);
+ idalloctm(tsd, quarantine, true, true);
tsd_quarantine_set(tsd, ret);
return (ret);
@@ -176,7 +177,7 @@ quarantine_cleanup(tsd_t *tsd)
quarantine = tsd_quarantine_get(tsd);
if (quarantine != NULL) {
quarantine_drain(tsd, quarantine, 0);
- idalloc(tsd, quarantine);
+ idalloctm(tsd, quarantine, true, true);
tsd_quarantine_set(tsd, NULL);
}
}
diff --git a/src/stats.c b/src/stats.c
index 2b3da64..865f775 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -265,6 +265,7 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned nthreads;
const char *dss;
size_t page, pactive, pdirty, mapped;
+ size_t metadata_mapped, metadata_allocated;
uint64_t npurge, nmadvise, purged;
size_t small_allocated;
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
@@ -331,6 +332,12 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n",
mapped);
+ CTL_I_GET("stats.arenas.0.metadata.mapped", &metadata_mapped, size_t);
+ CTL_I_GET("stats.arenas.0.metadata.allocated", &metadata_allocated,
+ size_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "metadata: mapped: %zu, allocated: %zu\n", metadata_mapped,
+ metadata_allocated);
if (bins)
stats_arena_bins_print(write_cb, cbopaque, i);
@@ -539,17 +546,18 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
if (config_stats) {
size_t *cactive;
- size_t allocated, active, mapped;
+ size_t allocated, active, metadata, mapped;
size_t chunks_current, chunks_high;
uint64_t chunks_total;
CTL_GET("stats.cactive", &cactive, size_t *);
CTL_GET("stats.allocated", &allocated, size_t);
CTL_GET("stats.active", &active, size_t);
+ CTL_GET("stats.metadata", &metadata, size_t);
CTL_GET("stats.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque,
- "Allocated: %zu, active: %zu, mapped: %zu\n",
- allocated, active, mapped);
+ "Allocated: %zu, active: %zu, metadata: %zu, mapped: %zu\n",
+ allocated, active, metadata, mapped);
malloc_cprintf(write_cb, cbopaque,
"Current active ceiling: %zu\n", atomic_read_z(cactive));
diff --git a/src/tcache.c b/src/tcache.c
index 34224ec..d638015 100644
--- a/src/tcache.c
+++ b/src/tcache.c
@@ -298,7 +298,7 @@ tcache_create(tsd_t *tsd, arena_t *arena)
/* Avoid false cacheline sharing. */
size = sa2u(size, CACHELINE);
- tcache = ipalloct(tsd, size, CACHELINE, true, false, arena);
+ tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, arena);
if (tcache == NULL)
return (NULL);
@@ -353,7 +353,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
prof_idump();
- idalloct(tsd, tcache, false);
+ idalloctm(tsd, tcache, false, true);
}
void
diff --git a/src/tsd.c b/src/tsd.c
index 00d8f95..3b59acf 100644
--- a/src/tsd.c
+++ b/src/tsd.c
@@ -15,7 +15,7 @@ void *
malloc_tsd_malloc(size_t size)
{
- return (a0malloc(CACHELINE_CEILING(size), false));
+ return (a0malloc(CACHELINE_CEILING(size)));
}
void