summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorDavid Goldblatt <davidgoldblatt@fb.com>2017-03-08 23:56:31 (GMT)
committerDavid Goldblatt <davidtgoldblatt@gmail.com>2017-03-14 01:22:33 (GMT)
commit4fc2acf5aef9ea8fe7e2dd39ee8b6a5050c5ff7f (patch)
treea36fa8713ab2e251b83ca787f30daeeb30454f66 /src
parent26d23da6cd91e4d7d6210c89de5194dedf0f0f60 (diff)
downloadjemalloc-4fc2acf5aef9ea8fe7e2dd39ee8b6a5050c5ff7f.zip
jemalloc-4fc2acf5aef9ea8fe7e2dd39ee8b6a5050c5ff7f.tar.gz
jemalloc-4fc2acf5aef9ea8fe7e2dd39ee8b6a5050c5ff7f.tar.bz2
Switch atomic uint64_ts in arena_stats_t to C11 atomics
I expect this to be the trickiest conversion we will see, since we want atomics on 64-bit platforms, but are also always able to piggyback on some sort of external synchronization on non-64 bit platforms.
Diffstat (limited to 'src')
-rw-r--r--src/arena.c60
-rw-r--r--src/ctl.c80
2 files changed, 99 insertions, 41 deletions
diff --git a/src/arena.c b/src/arena.c
index cb0194a..1fbf87d 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -78,9 +78,10 @@ arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
}
static uint64_t
-arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, uint64_t *p) {
+arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
+ arena_stats_u64_t *p) {
#ifdef JEMALLOC_ATOMIC_U64
- return atomic_read_u64(p);
+ return atomic_load_u64(p, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
return *p;
@@ -88,10 +89,10 @@ arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, uint64_t *p) {
}
static void
-arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, uint64_t *p,
- uint64_t x) {
+arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
+ arena_stats_u64_t *p, uint64_t x) {
#ifdef JEMALLOC_ATOMIC_U64
- atomic_add_u64(p, x);
+ atomic_fetch_add_u64(p, x, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
*p += x;
@@ -99,11 +100,11 @@ arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, uint64_t *p,
}
UNUSED static void
-arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, uint64_t *p,
- uint64_t x) {
+arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
+ arena_stats_u64_t *p, uint64_t x) {
#ifdef JEMALLOC_ATOMIC_U64
- UNUSED uint64_t r = atomic_sub_u64(p, x);
- assert(r + x >= r);
+ UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
+ assert(r - x <= r);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
*p -= x;
@@ -111,6 +112,21 @@ arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, uint64_t *p,
#endif
}
+/*
+ * Non-atomically sets *dst += src. *dst needs external synchronization.
+ * This lets us avoid the cost of a fetch_add when its unnecessary (note that
+ * the types here are atomic).
+ */
+static void
+arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
+#ifdef JEMALLOC_ATOMIC_U64
+ uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
+ atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED);
+#else
+ *dst += src;
+#endif
+}
+
static size_t
arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t *p) {
#ifdef JEMALLOC_ATOMIC_U64
@@ -191,12 +207,12 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
&arena->stats.mapped);
astats->retained += (extents_npages_get(&arena->extents_retained) <<
LG_PAGE);
- astats->npurge += arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.npurge);
- astats->nmadvise += arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.nmadvise);
- astats->purged += arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.purged);
+ arena_stats_accum_u64(&astats->npurge, arena_stats_read_u64(tsdn,
+ &arena->stats, &arena->stats.npurge));
+ arena_stats_accum_u64(&astats->nmadvise, arena_stats_read_u64(tsdn,
+ &arena->stats, &arena->stats.nmadvise));
+ arena_stats_accum_u64(&astats->purged, arena_stats_read_u64(tsdn,
+ &arena->stats, &arena->stats.purged));
astats->base += base_allocated;
astats->internal += arena_internal_get(arena);
astats->resident += base_resident + (((atomic_read_zu(&arena->nactive) +
@@ -205,18 +221,20 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
for (szind_t i = 0; i < NSIZES - NBINS; i++) {
uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.lstats[i].nmalloc);
- lstats[i].nmalloc += nmalloc;
- astats->nmalloc_large += nmalloc;
+ arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc);
+ arena_stats_accum_u64(&astats->nmalloc_large, nmalloc);
uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.lstats[i].ndalloc);
- lstats[i].ndalloc += ndalloc;
- astats->ndalloc_large += ndalloc;
+ arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc);
+ arena_stats_accum_u64(&astats->ndalloc_large, ndalloc);
uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.lstats[i].nrequests);
- lstats[i].nrequests += nmalloc + nrequests;
- astats->nrequests_large += nmalloc + nrequests;
+ arena_stats_accum_u64(&lstats[i].nrequests,
+ nmalloc + nrequests);
+ arena_stats_accum_u64(&astats->nrequests_large,
+ nmalloc + nrequests);
assert(nmalloc >= ndalloc);
assert(nmalloc - ndalloc <= SIZE_T_MAX);
diff --git a/src/ctl.c b/src/ctl.c
index d4ab699..bb83583 100644
--- a/src/ctl.c
+++ b/src/ctl.c
@@ -431,6 +431,33 @@ static const ctl_named_node_t super_root_node[] = {
/******************************************************************************/
+/*
+ * Sets *dst + *src non-atomically. This is safe, since everything is
+ * synchronized by the ctl mutex.
+ */
+static void
+accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) {
+#ifdef JEMALLOC_ATOMIC_U64
+ uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
+ uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED);
+ atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED);
+#else
+ *dst += *src;
+#endif
+}
+
+/* Likewise: with ctl mutex synchronization, reading is simple. */
+static uint64_t
+arena_stats_read_u64(arena_stats_u64_t *p) {
+#ifdef JEMALLOC_ATOMIC_U64
+ return atomic_load_u64(p, ATOMIC_RELAXED);
+#else
+ return *p;
+#endif
+}
+
+/******************************************************************************/
+
static unsigned
arenas_i2a_impl(size_t i, bool compat, bool validate) {
unsigned a;
@@ -589,9 +616,12 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
sdstats->astats.mapped += astats->astats.mapped;
sdstats->astats.retained += astats->astats.retained;
}
- sdstats->astats.npurge += astats->astats.npurge;
- sdstats->astats.nmadvise += astats->astats.nmadvise;
- sdstats->astats.purged += astats->astats.purged;
+ accum_arena_stats_u64(&sdstats->astats.npurge,
+ &astats->astats.npurge);
+ accum_arena_stats_u64(&sdstats->astats.nmadvise,
+ &astats->astats.nmadvise);
+ accum_arena_stats_u64(&sdstats->astats.purged,
+ &astats->astats.purged);
if (!destroyed) {
sdstats->astats.base += astats->astats.base;
@@ -616,10 +646,12 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
} else {
assert(astats->astats.allocated_large == 0);
}
- sdstats->astats.nmalloc_large += astats->astats.nmalloc_large;
- sdstats->astats.ndalloc_large += astats->astats.ndalloc_large;
- sdstats->astats.nrequests_large +=
- astats->astats.nrequests_large;
+ accum_arena_stats_u64(&sdstats->astats.nmalloc_large,
+ &astats->astats.nmalloc_large);
+ accum_arena_stats_u64(&sdstats->astats.ndalloc_large,
+ &astats->astats.ndalloc_large);
+ accum_arena_stats_u64(&sdstats->astats.nrequests_large,
+ &astats->astats.nrequests_large);
if (config_tcache) {
sdstats->astats.tcache_bytes +=
@@ -654,10 +686,12 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
}
for (i = 0; i < NSIZES - NBINS; i++) {
- sdstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
- sdstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
- sdstats->lstats[i].nrequests +=
- astats->lstats[i].nrequests;
+ accum_arena_stats_u64(&sdstats->lstats[i].nmalloc,
+ &astats->lstats[i].nmalloc);
+ accum_arena_stats_u64(&sdstats->lstats[i].ndalloc,
+ &astats->lstats[i].ndalloc);
+ accum_arena_stats_u64(&sdstats->lstats[i].nrequests,
+ &astats->lstats[i].nrequests);
if (!destroyed) {
sdstats->lstats[i].curlextents +=
astats->lstats[i].curlextents;
@@ -2139,11 +2173,11 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
arenas_i(mib[2])->astats->astats.retained, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
- arenas_i(mib[2])->astats->astats.npurge, uint64_t)
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.npurge), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
- arenas_i(mib[2])->astats->astats.nmadvise, uint64_t)
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.nmadvise), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
- arenas_i(mib[2])->astats->astats.purged, uint64_t)
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.purged), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_base,
arenas_i(mib[2])->astats->astats.base, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
@@ -2164,11 +2198,14 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
arenas_i(mib[2])->astats->astats.allocated_large, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
- arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t)
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.nmalloc_large),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
- arenas_i(mib[2])->astats->astats.ndalloc_large, uint64_t)
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.ndalloc_large),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
- arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t) /* Intentional. */
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.nmalloc_large),
+ uint64_t) /* Intentional. */
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t)
@@ -2199,11 +2236,14 @@ stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
}
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
- arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc, uint64_t)
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
- arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc, uint64_t)
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
- arenas_i(mib[2])->astats->lstats[mib[4]].nrequests, uint64_t)
+ arena_stats_read_u64(
+ &arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)