summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ChangeLog6
-rw-r--r--doc/jemalloc.xml.in12
-rw-r--r--include/jemalloc/internal/arena.h5
-rw-r--r--include/jemalloc/internal/ctl.h1
-rw-r--r--src/arena.c8
-rw-r--r--src/ctl.c11
-rw-r--r--src/stats.c11
7 files changed, 35 insertions, 19 deletions
diff --git a/ChangeLog b/ChangeLog
index 2607576..8cc214a 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -38,7 +38,8 @@ found in the git revision history:
"opt.prof_thread_active_init", "prof.thread_active_init", and
"thread.prof.active" mallctls.
- Add support for per arena application-specified chunk allocators, configured
- via the "arena<i>.chunk.alloc" and "arena<i>.chunk.dalloc" mallctls.
+ via the "arena<i>.chunk.alloc", "arena<i>.chunk.dalloc", and
+ "arena.<i>.chunk.purge" mallctls.
- Refactor huge allocation to be managed by arenas, so that arenas now
function as general purpose independent allocators. This is important in
the context of user-specified chunk allocators, aside from the scalability
@@ -65,6 +66,9 @@ found in the git revision history:
"stats.arenas.<i>.metadata.allocated" mallctls.
- Add the "stats.resident" mallctl, which reports the upper limit of
physically resident memory mapped by the allocator.
+ - Add per arena control over unused dirty page purging, via the
+ "arenas.lg_dirty_mult", "arena.<i>.lg_dirty_mult", and
+ "stats.arenas.<i>.lg_dirty_mult" mallctls.
- Add the "prof.gdump" mallctl, which makes it possible to toggle the gdump
feature on/off during program execution.
- Add sdallocx(), which implements sized deallocation. The primary
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
index adff6a4..d3f3616 100644
--- a/doc/jemalloc.xml.in
+++ b/doc/jemalloc.xml.in
@@ -1983,6 +1983,18 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</para></listitem>
</varlistentry>
+ <varlistentry id="stats.arenas.i.lg_dirty_mult">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.lg_dirty_mult</mallctl>
+ (<type>ssize_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Minimum ratio (log base 2) of active to dirty pages.
+ See <link
+ linkend="opt.lg_dirty_mult"><mallctl>opt.lg_dirty_mult</mallctl></link>
+ for details.</para></listitem>
+ </varlistentry>
+
<varlistentry id="stats.arenas.i.nthreads">
<term>
<mallctl>stats.arenas.&lt;i&gt;.nthreads</mallctl>
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index 56ee74a..dff99fb 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -470,8 +470,9 @@ dss_prec_t arena_dss_prec_get(arena_t *arena);
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
ssize_t arena_lg_dirty_mult_default_get(void);
bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
-void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
- size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
+void arena_stats_merge(arena_t *arena, const char **dss,
+ ssize_t *lg_dirty_mult, size_t *nactive, size_t *ndirty,
+ arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
arena_t *arena_new(unsigned ind);
void arena_boot(void);
diff --git a/include/jemalloc/internal/ctl.h b/include/jemalloc/internal/ctl.h
index 7c2a4be..751c14b 100644
--- a/include/jemalloc/internal/ctl.h
+++ b/include/jemalloc/internal/ctl.h
@@ -34,6 +34,7 @@ struct ctl_arena_stats_s {
bool initialized;
unsigned nthreads;
const char *dss;
+ ssize_t lg_dirty_mult;
size_t pactive;
size_t pdirty;
arena_stats_t astats;
diff --git a/src/arena.c b/src/arena.c
index d38ffc6..bc13d20 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -2657,14 +2657,16 @@ arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
}
void
-arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
- size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
- malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats)
+arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult,
+ size_t *nactive, size_t *ndirty, arena_stats_t *astats,
+ malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
+ malloc_huge_stats_t *hstats)
{
unsigned i;
malloc_mutex_lock(&arena->lock);
*dss = dss_prec_names[arena->dss_prec];
+ *lg_dirty_mult = arena->lg_dirty_mult;
*nactive += arena->nactive;
*ndirty += arena->ndirty;
diff --git a/src/ctl.c b/src/ctl.c
index 4493546..d215b19 100644
--- a/src/ctl.c
+++ b/src/ctl.c
@@ -181,6 +181,7 @@ CTL_PROTO(stats_arenas_i_hchunks_j_curhchunks)
INDEX_PROTO(stats_arenas_i_hchunks_j)
CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_dss)
+CTL_PROTO(stats_arenas_i_lg_dirty_mult)
CTL_PROTO(stats_arenas_i_pactive)
CTL_PROTO(stats_arenas_i_pdirty)
CTL_PROTO(stats_arenas_i_mapped)
@@ -443,6 +444,7 @@ static const ctl_indexed_node_t stats_arenas_i_hchunks_node[] = {
static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
{NAME("dss"), CTL(stats_arenas_i_dss)},
+ {NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)},
{NAME("pactive"), CTL(stats_arenas_i_pactive)},
{NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
{NAME("mapped"), CTL(stats_arenas_i_mapped)},
@@ -524,6 +526,7 @@ ctl_arena_clear(ctl_arena_stats_t *astats)
{
astats->dss = dss_prec_names[dss_prec_limit];
+ astats->lg_dirty_mult = -1;
astats->pactive = 0;
astats->pdirty = 0;
if (config_stats) {
@@ -545,9 +548,9 @@ ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
{
unsigned i;
- arena_stats_merge(arena, &cstats->dss, &cstats->pactive,
- &cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats,
- cstats->hstats);
+ arena_stats_merge(arena, &cstats->dss, &cstats->lg_dirty_mult,
+ &cstats->pactive, &cstats->pdirty, &cstats->astats, cstats->bstats,
+ cstats->lstats, cstats->hstats);
for (i = 0; i < NBINS; i++) {
cstats->allocated_small += cstats->bstats[i].curregs *
@@ -2000,6 +2003,8 @@ CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t)
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
+CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult,
+ ssize_t)
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
diff --git a/src/stats.c b/src/stats.c
index c5cea5e..6e1752e 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -6,15 +6,6 @@
xmallctl(n, v, &sz, NULL, 0); \
} while (0)
-#define CTL_M1_GET(n, i, v, t) do { \
- size_t mib[6]; \
- size_t miblen = sizeof(mib) / sizeof(size_t); \
- size_t sz = sizeof(t); \
- xmallctlnametomib(n, mib, &miblen); \
- mib[1] = (i); \
- xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
-} while (0)
-
#define CTL_M2_GET(n, i, v, t) do { \
size_t mib[6]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
@@ -285,7 +276,7 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *);
malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
dss);
- CTL_M1_GET("arena.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t);
+ CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t);
if (lg_dirty_mult >= 0) {
malloc_cprintf(write_cb, cbopaque,
"min active:dirty page ratio: %u:1\n",