summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/arena.c113
-rw-r--r--src/base.c12
-rw-r--r--src/chunk.c157
-rw-r--r--src/chunk_mmap.c2
-rw-r--r--src/ctl.c113
-rw-r--r--src/huge.c121
-rw-r--r--src/jemalloc.c4
-rw-r--r--src/stats.c29
8 files changed, 331 insertions, 220 deletions
diff --git a/src/arena.c b/src/arena.c
index d956be3..f5d7d06 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -560,6 +560,65 @@ arena_chunk_init_spare(arena_t *arena)
}
static arena_chunk_t *
+arena_chunk_alloc_internal(arena_t *arena, size_t size, size_t alignment,
+ bool *zero)
+{
+ arena_chunk_t *chunk;
+ chunk_alloc_t *chunk_alloc;
+ chunk_dalloc_t *chunk_dalloc;
+
+ chunk_alloc = arena->chunk_alloc;
+ chunk_dalloc = arena->chunk_dalloc;
+ malloc_mutex_unlock(&arena->lock);
+ chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc,
+ arena->ind, size, alignment, zero);
+ malloc_mutex_lock(&arena->lock);
+ if (config_stats && chunk != NULL)
+ arena->stats.mapped += chunksize;
+
+ return (chunk);
+}
+
+void *
+arena_chunk_alloc_huge(arena_t *arena, size_t size, size_t alignment,
+ bool *zero)
+{
+ void *ret;
+ chunk_alloc_t *chunk_alloc;
+ chunk_dalloc_t *chunk_dalloc;
+
+ malloc_mutex_lock(&arena->lock);
+ chunk_alloc = arena->chunk_alloc;
+ chunk_dalloc = arena->chunk_dalloc;
+ if (config_stats) {
+ /* Optimistically update stats prior to unlocking. */
+ arena->stats.mapped += size;
+ arena->stats.allocated_huge += size;
+ arena->stats.nmalloc_huge++;
+ arena->stats.nrequests_huge++;
+ }
+ arena->nactive += (size >> LG_PAGE);
+ malloc_mutex_unlock(&arena->lock);
+
+ ret = chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind,
+ size, alignment, zero);
+ if (config_stats) {
+ if (ret != NULL)
+ stats_cactive_add(size);
+ else {
+ /* Revert optimistic stats updates. */
+ malloc_mutex_lock(&arena->lock);
+ arena->stats.mapped -= size;
+ arena->stats.allocated_huge -= size;
+ arena->stats.nmalloc_huge--;
+ malloc_mutex_unlock(&arena->lock);
+ }
+ }
+
+ return (ret);
+}
+
+static arena_chunk_t *
arena_chunk_init_hard(arena_t *arena)
{
arena_chunk_t *chunk;
@@ -569,14 +628,9 @@ arena_chunk_init_hard(arena_t *arena)
assert(arena->spare == NULL);
zero = false;
- malloc_mutex_unlock(&arena->lock);
- chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, false,
- &zero, arena->dss_prec);
- malloc_mutex_lock(&arena->lock);
+ chunk = arena_chunk_alloc_internal(arena, chunksize, chunksize, &zero);
if (chunk == NULL)
return (NULL);
- if (config_stats)
- arena->stats.mapped += chunksize;
chunk->arena = arena;
@@ -645,7 +699,38 @@ arena_chunk_alloc(arena_t *arena)
}
static void
-arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
+arena_chunk_dalloc_internal(arena_t *arena, arena_chunk_t *chunk)
+{
+ chunk_dalloc_t *chunk_dalloc;
+
+ chunk_dalloc = arena->chunk_dalloc;
+ malloc_mutex_unlock(&arena->lock);
+ chunk_dalloc((void *)chunk, chunksize, arena->ind);
+ malloc_mutex_lock(&arena->lock);
+ if (config_stats)
+ arena->stats.mapped -= chunksize;
+}
+
+void
+arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t size)
+{
+ chunk_dalloc_t *chunk_dalloc;
+
+ malloc_mutex_lock(&arena->lock);
+ chunk_dalloc = arena->chunk_dalloc;
+ if (config_stats) {
+ arena->stats.mapped -= size;
+ arena->stats.allocated_huge -= size;
+ arena->stats.ndalloc_huge++;
+ stats_cactive_sub(size);
+ }
+ arena->nactive -= (size >> LG_PAGE);
+ malloc_mutex_unlock(&arena->lock);
+ chunk_dalloc(chunk, size, arena->ind);
+}
+
+static void
+arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
{
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
@@ -667,11 +752,7 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
arena_chunk_t *spare = arena->spare;
arena->spare = chunk;
- malloc_mutex_unlock(&arena->lock);
- chunk_dealloc((void *)spare, chunksize, true);
- malloc_mutex_lock(&arena->lock);
- if (config_stats)
- arena->stats.mapped -= chunksize;
+ arena_chunk_dalloc_internal(arena, spare);
} else
arena->spare = chunk;
}
@@ -1231,7 +1312,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
if (size == arena_maxclass) {
assert(run_ind == map_bias);
assert(run_pages == (arena_maxclass >> LG_PAGE));
- arena_chunk_dealloc(arena, chunk);
+ arena_chunk_dalloc(arena, chunk);
}
/*
@@ -2283,6 +2364,10 @@ arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
astats->nmalloc_large += arena->stats.nmalloc_large;
astats->ndalloc_large += arena->stats.ndalloc_large;
astats->nrequests_large += arena->stats.nrequests_large;
+ astats->allocated_huge += arena->stats.allocated_huge;
+ astats->nmalloc_huge += arena->stats.nmalloc_huge;
+ astats->ndalloc_huge += arena->stats.ndalloc_huge;
+ astats->nrequests_huge += arena->stats.nrequests_huge;
for (i = 0; i < nlclasses; i++) {
lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
@@ -2319,6 +2404,8 @@ arena_new(arena_t *arena, unsigned ind)
arena->ind = ind;
arena->nthreads = 0;
+ arena->chunk_alloc = chunk_alloc_default;
+ arena->chunk_dalloc = chunk_dalloc_default;
if (malloc_mutex_init(&arena->lock))
return (true);
diff --git a/src/base.c b/src/base.c
index 03dcf8f..409c7bb 100644
--- a/src/base.c
+++ b/src/base.c
@@ -17,23 +17,15 @@ static void *base_past_addr; /* Addr immediately past base_pages. */
static extent_node_t *base_nodes;
/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static bool base_pages_alloc(size_t minsize);
-
-/******************************************************************************/
static bool
base_pages_alloc(size_t minsize)
{
size_t csize;
- bool zero;
assert(minsize != 0);
csize = CHUNK_CEILING(minsize);
- zero = false;
- base_pages = chunk_alloc(csize, chunksize, true, &zero,
- chunk_dss_prec_get());
+ base_pages = chunk_alloc_base(csize);
if (base_pages == NULL)
return (true);
base_next_addr = base_pages;
@@ -100,7 +92,7 @@ base_node_alloc(void)
}
void
-base_node_dealloc(extent_node_t *node)
+base_node_dalloc(extent_node_t *node)
{
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
diff --git a/src/chunk.c b/src/chunk.c
index 246324a..38d0286 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -31,13 +31,12 @@ size_t map_bias;
size_t arena_maxclass; /* Max size class for arenas. */
/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
-static void *chunk_recycle(extent_tree_t *chunks_szad,
- extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base,
- bool *zero);
-static void chunk_record(extent_tree_t *chunks_szad,
- extent_tree_t *chunks_ad, void *chunk, size_t size);
+static void chunk_dalloc_core(void *chunk, size_t size);
/******************************************************************************/
@@ -104,7 +103,7 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
malloc_mutex_unlock(&chunks_mtx);
node = base_node_alloc();
if (node == NULL) {
- chunk_dealloc(ret, size, true);
+ chunk_dalloc_core(ret, size);
return (NULL);
}
malloc_mutex_lock(&chunks_mtx);
@@ -119,7 +118,7 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
malloc_mutex_unlock(&chunks_mtx);
if (node != NULL)
- base_node_dealloc(node);
+ base_node_dalloc(node);
if (*zero) {
if (zeroed == false)
memset(ret, 0, size);
@@ -141,8 +140,8 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
* takes advantage of this to avoid demanding zeroed chunks, but taking
* advantage of them if they are returned.
*/
-void *
-chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
+static void *
+chunk_alloc_core(size_t size, size_t alignment, bool base, bool *zero,
dss_prec_t dss_prec)
{
void *ret;
@@ -156,59 +155,105 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
if (have_dss && dss_prec == dss_prec_primary) {
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
alignment, base, zero)) != NULL)
- goto label_return;
+ return (ret);
if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
- goto label_return;
+ return (ret);
}
/* mmap. */
if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size,
alignment, base, zero)) != NULL)
- goto label_return;
+ return (ret);
if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
- goto label_return;
+ return (ret);
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary) {
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
alignment, base, zero)) != NULL)
- goto label_return;
+ return (ret);
if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
- goto label_return;
+ return (ret);
}
/* All strategies for allocation failed. */
- ret = NULL;
-label_return:
- if (ret != NULL) {
- if (config_ivsalloc && base == false) {
- if (rtree_set(chunks_rtree, (uintptr_t)ret, 1)) {
- chunk_dealloc(ret, size, true);
- return (NULL);
- }
- }
- if (config_stats || config_prof) {
- bool gdump;
- malloc_mutex_lock(&chunks_mtx);
- if (config_stats)
- stats_chunks.nchunks += (size / chunksize);
- stats_chunks.curchunks += (size / chunksize);
- if (stats_chunks.curchunks > stats_chunks.highchunks) {
- stats_chunks.highchunks =
- stats_chunks.curchunks;
- if (config_prof)
- gdump = true;
- } else if (config_prof)
- gdump = false;
- malloc_mutex_unlock(&chunks_mtx);
- if (config_prof && opt_prof && opt_prof_gdump && gdump)
- prof_gdump();
- }
- if (config_valgrind)
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+ return (NULL);
+}
+
+static bool
+chunk_register(void *chunk, size_t size, bool base)
+{
+
+ assert(chunk != NULL);
+ assert(CHUNK_ADDR2BASE(chunk) == chunk);
+
+ if (config_ivsalloc && base == false) {
+ if (rtree_set(chunks_rtree, (uintptr_t)chunk, 1))
+ return (true);
+ }
+ if (config_stats || config_prof) {
+ bool gdump;
+ malloc_mutex_lock(&chunks_mtx);
+ if (config_stats)
+ stats_chunks.nchunks += (size / chunksize);
+ stats_chunks.curchunks += (size / chunksize);
+ if (stats_chunks.curchunks > stats_chunks.highchunks) {
+ stats_chunks.highchunks =
+ stats_chunks.curchunks;
+ if (config_prof)
+ gdump = true;
+ } else if (config_prof)
+ gdump = false;
+ malloc_mutex_unlock(&chunks_mtx);
+ if (config_prof && opt_prof && opt_prof_gdump && gdump)
+ prof_gdump();
+ }
+ if (config_valgrind)
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(chunk, size);
+ return (false);
+}
+
+void *
+chunk_alloc_base(size_t size)
+{
+ void *ret;
+ bool zero;
+
+ zero = false;
+ ret = chunk_alloc_core(size, chunksize, true, &zero,
+ chunk_dss_prec_get());
+ if (ret == NULL)
+ return (NULL);
+ if (chunk_register(ret, size, true)) {
+ chunk_dalloc_core(ret, size);
+ return (NULL);
}
- assert(CHUNK_ADDR2BASE(ret) == ret);
return (ret);
}
+void *
+chunk_alloc_arena(chunk_alloc_t *chunk_alloc, chunk_dalloc_t *chunk_dalloc,
+ unsigned arena_ind, size_t size, size_t alignment, bool *zero)
+{
+ void *ret;
+
+ ret = chunk_alloc(size, alignment, zero, arena_ind);
+ if (ret != NULL && chunk_register(ret, size, false)) {
+ chunk_dalloc(ret, size, arena_ind);
+ ret = NULL;
+ }
+
+ return (ret);
+}
+
+/* Default arena chunk allocation routine in the absence of user override. */
+void *
+chunk_alloc_default(size_t size, size_t alignment, bool *zero,
+ unsigned arena_ind)
+{
+
+ return (chunk_alloc_core(size, alignment, false, zero,
+ arenas[arena_ind]->dss_prec));
+}
+
static void
chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
size_t size)
@@ -292,9 +337,9 @@ label_return:
* avoid potential deadlock.
*/
if (xnode != NULL)
- base_node_dealloc(xnode);
+ base_node_dalloc(xnode);
if (xprev != NULL)
- base_node_dealloc(xprev);
+ base_node_dalloc(xprev);
}
void
@@ -307,12 +352,12 @@ chunk_unmap(void *chunk, size_t size)
if (have_dss && chunk_in_dss(chunk))
chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
- else if (chunk_dealloc_mmap(chunk, size))
+ else if (chunk_dalloc_mmap(chunk, size))
chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
}
-void
-chunk_dealloc(void *chunk, size_t size, bool unmap)
+static void
+chunk_dalloc_core(void *chunk, size_t size)
{
assert(chunk != NULL);
@@ -329,8 +374,16 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
malloc_mutex_unlock(&chunks_mtx);
}
- if (unmap)
- chunk_unmap(chunk, size);
+ chunk_unmap(chunk, size);
+}
+
+/* Default arena chunk deallocation routine in the absence of user override. */
+bool
+chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
+{
+
+ chunk_dalloc_core(chunk, size);
+ return (false);
}
bool
diff --git a/src/chunk_mmap.c b/src/chunk_mmap.c
index 2056d79..f960e06 100644
--- a/src/chunk_mmap.c
+++ b/src/chunk_mmap.c
@@ -200,7 +200,7 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
}
bool
-chunk_dealloc_mmap(void *chunk, size_t size)
+chunk_dalloc_mmap(void *chunk, size_t size)
{
if (config_munmap)
diff --git a/src/ctl.c b/src/ctl.c
index 9ee5de9..a193605 100644
--- a/src/ctl.c
+++ b/src/ctl.c
@@ -76,7 +76,6 @@ CTL_PROTO(thread_deallocatedp)
CTL_PROTO(config_debug)
CTL_PROTO(config_fill)
CTL_PROTO(config_lazy_lock)
-CTL_PROTO(config_mremap)
CTL_PROTO(config_munmap)
CTL_PROTO(config_prof)
CTL_PROTO(config_prof_libgcc)
@@ -113,6 +112,8 @@ CTL_PROTO(opt_prof_accum)
CTL_PROTO(arena_i_purge)
static void arena_purge(unsigned arena_ind);
CTL_PROTO(arena_i_dss)
+CTL_PROTO(arena_i_chunk_alloc)
+CTL_PROTO(arena_i_chunk_dalloc)
INDEX_PROTO(arena_i)
CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs)
@@ -135,9 +136,6 @@ CTL_PROTO(prof_interval)
CTL_PROTO(stats_chunks_current)
CTL_PROTO(stats_chunks_total)
CTL_PROTO(stats_chunks_high)
-CTL_PROTO(stats_huge_allocated)
-CTL_PROTO(stats_huge_nmalloc)
-CTL_PROTO(stats_huge_ndalloc)
CTL_PROTO(stats_arenas_i_small_allocated)
CTL_PROTO(stats_arenas_i_small_nmalloc)
CTL_PROTO(stats_arenas_i_small_ndalloc)
@@ -146,6 +144,10 @@ CTL_PROTO(stats_arenas_i_large_allocated)
CTL_PROTO(stats_arenas_i_large_nmalloc)
CTL_PROTO(stats_arenas_i_large_ndalloc)
CTL_PROTO(stats_arenas_i_large_nrequests)
+CTL_PROTO(stats_arenas_i_huge_allocated)
+CTL_PROTO(stats_arenas_i_huge_nmalloc)
+CTL_PROTO(stats_arenas_i_huge_ndalloc)
+CTL_PROTO(stats_arenas_i_huge_nrequests)
CTL_PROTO(stats_arenas_i_bins_j_allocated)
CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
@@ -212,7 +214,6 @@ static const ctl_named_node_t config_node[] = {
{NAME("debug"), CTL(config_debug)},
{NAME("fill"), CTL(config_fill)},
{NAME("lazy_lock"), CTL(config_lazy_lock)},
- {NAME("mremap"), CTL(config_mremap)},
{NAME("munmap"), CTL(config_munmap)},
{NAME("prof"), CTL(config_prof)},
{NAME("prof_libgcc"), CTL(config_prof_libgcc)},
@@ -251,9 +252,15 @@ static const ctl_named_node_t opt_node[] = {
{NAME("prof_accum"), CTL(opt_prof_accum)}
};
+static const ctl_named_node_t chunk_node[] = {
+ {NAME("alloc"), CTL(arena_i_chunk_alloc)},
+ {NAME("dalloc"), CTL(arena_i_chunk_dalloc)}
+};
+
static const ctl_named_node_t arena_i_node[] = {
{NAME("purge"), CTL(arena_i_purge)},
- {NAME("dss"), CTL(arena_i_dss)}
+ {NAME("dss"), CTL(arena_i_dss)},
+ {NAME("chunk"), CHILD(named, chunk)},
};
static const ctl_named_node_t super_arena_i_node[] = {
{NAME(""), CHILD(named, arena_i)}
@@ -313,12 +320,6 @@ static const ctl_named_node_t stats_chunks_node[] = {
{NAME("high"), CTL(stats_chunks_high)}
};
-static const ctl_named_node_t stats_huge_node[] = {
- {NAME("allocated"), CTL(stats_huge_allocated)},
- {NAME("nmalloc"), CTL(stats_huge_nmalloc)},
- {NAME("ndalloc"), CTL(stats_huge_ndalloc)}
-};
-
static const ctl_named_node_t stats_arenas_i_small_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
@@ -333,6 +334,13 @@ static const ctl_named_node_t stats_arenas_i_large_node[] = {
{NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
};
+static const ctl_named_node_t stats_arenas_i_huge_node[] = {
+ {NAME("allocated"), CTL(stats_arenas_i_huge_allocated)},
+ {NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_huge_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_huge_nrequests)},
+};
+
static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
@@ -377,6 +385,7 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("purged"), CTL(stats_arenas_i_purged)},
{NAME("small"), CHILD(named, stats_arenas_i_small)},
{NAME("large"), CHILD(named, stats_arenas_i_large)},
+ {NAME("huge"), CHILD(named, stats_arenas_i_huge)},
{NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
{NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)}
};
@@ -394,7 +403,6 @@ static const ctl_named_node_t stats_node[] = {
{NAME("active"), CTL(stats_active)},
{NAME("mapped"), CTL(stats_mapped)},
{NAME("chunks"), CHILD(named, stats_chunks)},
- {NAME("huge"), CHILD(named, stats_huge)},
{NAME("arenas"), CHILD(indexed, stats_arenas)}
};
@@ -492,6 +500,11 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
sstats->astats.nrequests_large += astats->astats.nrequests_large;
+ sstats->astats.allocated_huge += astats->astats.allocated_huge;
+ sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
+ sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
+ sstats->astats.nrequests_huge += astats->astats.nrequests_huge;
+
for (i = 0; i < nlclasses; i++) {
sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
@@ -618,12 +631,6 @@ ctl_refresh(void)
ctl_stats.chunks.total = stats_chunks.nchunks;
ctl_stats.chunks.high = stats_chunks.highchunks;
malloc_mutex_unlock(&chunks_mtx);
-
- malloc_mutex_lock(&huge_mtx);
- ctl_stats.huge.allocated = huge_allocated;
- ctl_stats.huge.nmalloc = huge_nmalloc;
- ctl_stats.huge.ndalloc = huge_ndalloc;
- malloc_mutex_unlock(&huge_mtx);
}
/*
@@ -654,10 +661,9 @@ ctl_refresh(void)
ctl_stats.allocated =
ctl_stats.arenas[ctl_stats.narenas].allocated_small
+ ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large
- + ctl_stats.huge.allocated;
+ + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge;
ctl_stats.active =
- (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE)
- + ctl_stats.huge.allocated;
+ (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE);
ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
}
@@ -1132,7 +1138,6 @@ label_return:
CTL_RO_BOOL_CONFIG_GEN(config_debug)
CTL_RO_BOOL_CONFIG_GEN(config_fill)
CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
-CTL_RO_BOOL_CONFIG_GEN(config_mremap)
CTL_RO_BOOL_CONFIG_GEN(config_munmap)
CTL_RO_BOOL_CONFIG_GEN(config_prof)
CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
@@ -1368,6 +1373,57 @@ label_return:
return (ret);
}
+static int
+arena_i_chunk_alloc_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned arena_ind = mib[1];
+ arena_t *arena;
+
+ malloc_mutex_lock(&ctl_mtx);
+ if (arena_ind < narenas_total && (arena = arenas[arena_ind]) != NULL) {
+ malloc_mutex_lock(&arena->lock);
+ READ(arena->chunk_alloc, chunk_alloc_t *);
+ WRITE(arena->chunk_alloc, chunk_alloc_t *);
+ } else {
+ ret = EFAULT;
+ goto label_outer_return;
+ }
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(&arena->lock);
+label_outer_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
+
+static int
+arena_i_chunk_dalloc_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+
+ int ret;
+ unsigned arena_ind = mib[1];
+ arena_t *arena;
+
+ malloc_mutex_lock(&ctl_mtx);
+ if (arena_ind < narenas_total && (arena = arenas[arena_ind]) != NULL) {
+ malloc_mutex_lock(&arena->lock);
+ READ(arena->chunk_dalloc, chunk_dalloc_t *);
+ WRITE(arena->chunk_dalloc, chunk_dalloc_t *);
+ } else {
+ ret = EFAULT;
+ goto label_outer_return;
+ }
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(&arena->lock);
+label_outer_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
+
static const ctl_named_node_t *
arena_i_index(const size_t *mib, size_t miblen, size_t i)
{
@@ -1552,9 +1608,6 @@ CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
size_t)
CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
-CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t)
-CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t)
-CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t)
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
@@ -1585,6 +1638,14 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated,
+ ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc,
+ ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc,
+ ctl_stats.arenas[mib[2]].astats.ndalloc_huge, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests,
+ ctl_stats.arenas[mib[2]].astats.nrequests_huge, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated,
ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t)
diff --git a/src/huge.c b/src/huge.c
index e725fd9..d08ed4a 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -4,11 +4,8 @@
/******************************************************************************/
/* Data. */
-uint64_t huge_nmalloc;
-uint64_t huge_ndalloc;
-size_t huge_allocated;
-
-malloc_mutex_t huge_mtx;
+/* Protects chunk-related data structures. */
+static malloc_mutex_t huge_mtx;
/******************************************************************************/
@@ -16,14 +13,14 @@ malloc_mutex_t huge_mtx;
static extent_tree_t huge;
void *
-huge_malloc(size_t size, bool zero, dss_prec_t dss_prec)
+huge_malloc(arena_t *arena, size_t size, bool zero)
{
- return (huge_palloc(size, chunksize, zero, dss_prec));
+ return (huge_palloc(arena, size, chunksize, zero));
}
void *
-huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
+huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
{
void *ret;
size_t csize;
@@ -48,23 +45,20 @@ huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
- ret = chunk_alloc(csize, alignment, false, &is_zeroed, dss_prec);
+ arena = choose_arena(arena);
+ ret = arena_chunk_alloc_huge(arena, csize, alignment, &is_zeroed);
if (ret == NULL) {
- base_node_dealloc(node);
+ base_node_dalloc(node);
return (NULL);
}
/* Insert node into huge. */
node->addr = ret;
node->size = csize;
+ node->arena = arena;
malloc_mutex_lock(&huge_mtx);
extent_tree_ad_insert(&huge, node);
- if (config_stats) {
- stats_cactive_add(csize);
- huge_nmalloc++;
- huge_allocated += csize;
- }
malloc_mutex_unlock(&huge_mtx);
if (config_fill && zero == false) {
@@ -96,8 +90,8 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
}
void *
-huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec)
+huge_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
+ size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc)
{
void *ret;
size_t copysize;
@@ -112,18 +106,18 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
* space and copying.
*/
if (alignment > chunksize)
- ret = huge_palloc(size + extra, alignment, zero, dss_prec);
+ ret = huge_palloc(arena, size + extra, alignment, zero);
else
- ret = huge_malloc(size + extra, zero, dss_prec);
+ ret = huge_malloc(arena, size + extra, zero);
if (ret == NULL) {
if (extra == 0)
return (NULL);
/* Try again, this time without extra. */
if (alignment > chunksize)
- ret = huge_palloc(size, alignment, zero, dss_prec);
+ ret = huge_palloc(arena, size, alignment, zero);
else
- ret = huge_malloc(size, zero, dss_prec);
+ ret = huge_malloc(arena, size, zero);
if (ret == NULL)
return (NULL);
@@ -134,59 +128,8 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
* expectation that the extra bytes will be reliably preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
-
-#ifdef JEMALLOC_MREMAP
- /*
- * Use mremap(2) if this is a huge-->huge reallocation, and neither the
- * source nor the destination are in dss.
- */
- if (oldsize >= chunksize && (have_dss == false || (chunk_in_dss(ptr)
- == false && chunk_in_dss(ret) == false))) {
- size_t newsize = huge_salloc(ret);
-
- /*
- * Remove ptr from the tree of huge allocations before
- * performing the remap operation, in order to avoid the
- * possibility of another thread acquiring that mapping before
- * this one removes it from the tree.
- */
- huge_dalloc(ptr, false);
- if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
- ret) == MAP_FAILED) {
- /*
- * Assuming no chunk management bugs in the allocator,
- * the only documented way an error can occur here is
- * if the application changed the map type for a
- * portion of the old allocation. This is firmly in
- * undefined behavior territory, so write a diagnostic
- * message, and optionally abort.
- */
- char buf[BUFERROR_BUF];
-
- buferror(get_errno(), buf, sizeof(buf));
- malloc_printf("<jemalloc>: Error in mremap(): %s\n",
- buf);
- if (opt_abort)
- abort();
- memcpy(ret, ptr, copysize);
- chunk_dealloc_mmap(ptr, oldsize);
- } else if (config_fill && zero == false && opt_junk && oldsize
- < newsize) {
- /*
- * mremap(2) clobbers the original mapping, so
- * junk/zero filling is not preserved. There is no
- * need to zero fill here, since any trailing
- * uninititialized memory is demand-zeroed by the
- * kernel, but junk filling must be redone.
- */
- memset(ret + oldsize, 0xa5, newsize - oldsize);
- }
- } else
-#endif
- {
- memcpy(ret, ptr, copysize);
- iqalloct(ptr, try_tcache_dalloc);
- }
+ memcpy(ret, ptr, copysize);
+ iqalloct(ptr, try_tcache_dalloc);
return (ret);
}
@@ -214,7 +157,7 @@ huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
#endif
void
-huge_dalloc(void *ptr, bool unmap)
+huge_dalloc(void *ptr)
{
extent_node_t *node, key;
@@ -227,20 +170,11 @@ huge_dalloc(void *ptr, bool unmap)
assert(node->addr == ptr);
extent_tree_ad_remove(&huge, node);
- if (config_stats) {
- stats_cactive_sub(node->size);
- huge_ndalloc++;
- huge_allocated -= node->size;
- }
-
malloc_mutex_unlock(&huge_mtx);
- if (unmap)
- huge_dalloc_junk(node->addr, node->size);
-
- chunk_dealloc(node->addr, node->size, unmap);
-
- base_node_dealloc(node);
+ huge_dalloc_junk(node->addr, node->size);
+ arena_chunk_dalloc_huge(node->arena, node->addr, node->size);
+ base_node_dalloc(node);
}
size_t
@@ -263,13 +197,6 @@ huge_salloc(const void *ptr)
return (ret);
}
-dss_prec_t
-huge_dss_prec_get(arena_t *arena)
-{
-
- return (arena_dss_prec_get(choose_arena(arena)));
-}
-
prof_ctx_t *
huge_prof_ctx_get(const void *ptr)
{
@@ -316,12 +243,6 @@ huge_boot(void)
return (true);
extent_tree_ad_new(&huge);
- if (config_stats) {
- huge_nmalloc = 0;
- huge_ndalloc = 0;
- huge_allocated = 0;
- }
-
return (false);
}
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 289d7f7..43a494e 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -1983,7 +1983,7 @@ a0alloc(size_t size, bool zero)
if (size <= arena_maxclass)
return (arena_malloc(arenas[0], size, zero, false));
else
- return (huge_malloc(size, zero, huge_dss_prec_get(arenas[0])));
+ return (huge_malloc(NULL, size, zero));
}
void *
@@ -2012,7 +2012,7 @@ a0free(void *ptr)
if (chunk != ptr)
arena_dalloc(chunk, ptr, false);
else
- huge_dalloc(ptr, true);
+ huge_dalloc(ptr);
}
/******************************************************************************/
diff --git a/src/stats.c b/src/stats.c
index bef2ab3..a0eb297 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -213,6 +213,8 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
size_t large_allocated;
uint64_t large_nmalloc, large_ndalloc, large_nrequests;
+ size_t huge_allocated;
+ uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests;
CTL_GET("arenas.page", &page, size_t);
@@ -249,12 +251,19 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque,
"large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
+ CTL_I_GET("stats.arenas.0.huge.allocated", &huge_allocated, size_t);
+ CTL_I_GET("stats.arenas.0.huge.nmalloc", &huge_nmalloc, uint64_t);
+ CTL_I_GET("stats.arenas.0.huge.ndalloc", &huge_ndalloc, uint64_t);
+ CTL_I_GET("stats.arenas.0.huge.nrequests", &huge_nrequests, uint64_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "huge: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
+ huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
malloc_cprintf(write_cb, cbopaque,
"total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
- small_allocated + large_allocated,
- small_nmalloc + large_nmalloc,
- small_ndalloc + large_ndalloc,
- small_nrequests + large_nrequests);
+ small_allocated + large_allocated + huge_allocated,
+ small_nmalloc + large_nmalloc + huge_nmalloc,
+ small_ndalloc + large_ndalloc + huge_ndalloc,
+ small_nrequests + large_nrequests + huge_nrequests);
malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page);
CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped);
@@ -458,8 +467,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
size_t allocated, active, mapped;
size_t chunks_current, chunks_high;
uint64_t chunks_total;
- size_t huge_allocated;
- uint64_t huge_nmalloc, huge_ndalloc;
CTL_GET("stats.cactive", &cactive, size_t *);
CTL_GET("stats.allocated", &allocated, size_t);
@@ -481,16 +488,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
" %13"PRIu64" %12zu %12zu\n",
chunks_total, chunks_high, chunks_current);
- /* Print huge stats. */
- CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t);
- CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t);
- CTL_GET("stats.huge.allocated", &huge_allocated, size_t);
- malloc_cprintf(write_cb, cbopaque,
- "huge: nmalloc ndalloc allocated\n");
- malloc_cprintf(write_cb, cbopaque,
- " %12"PRIu64" %12"PRIu64" %12zu\n",
- huge_nmalloc, huge_ndalloc, huge_allocated);
-
if (merged) {
unsigned narenas;