diff options
author | Jason Evans <jasone@canonware.com> | 2016-10-11 22:30:01 (GMT) |
---|---|---|
committer | Jason Evans <jasone@canonware.com> | 2016-10-11 22:30:01 (GMT) |
commit | b4b4a77848f1c726134ace82509b6adb9f8e7055 (patch) | |
tree | 7b9427d829826de0d0404baa694e229e00a73f7c /src/arena.c | |
parent | 48993ed5368506013fa1dcbc72b299409b7f5716 (diff) | |
download | jemalloc-b4b4a77848f1c726134ace82509b6adb9f8e7055.zip jemalloc-b4b4a77848f1c726134ace82509b6adb9f8e7055.tar.gz jemalloc-b4b4a77848f1c726134ace82509b6adb9f8e7055.tar.bz2 |
Fix and simplify decay-based purging.
Simplify decay-based purging attempts to only be triggered when the
epoch is advanced, rather than every time purgeable memory increases.
In a correctly functioning system (not previously the case; see below),
this only causes a behavior difference if during subsequent purge
attempts the least recently used (LRU) purgeable memory extent is
initially too large to be purged, but that memory is reused between
attempts and one or more of the next LRU purgeable memory extents are
small enough to be purged. In practice this is an arbitrary behavior
change that is within the set of acceptable behaviors.
As for the purging fix, assure that arena->decay.ndirty is recorded
*after* the epoch advance and associated purging occurs. Prior to this
fix, it was possible for purging during epoch advance to cause a
substantially underrepresentative (arena->ndirty - arena->decay.ndirty),
i.e. the number of dirty pages attributed to the current epoch was too
low, and a series of unintended purges could result. This fix is also
relevant in the context of the simplification described above, but the
bug's impact would be limited to over-purging at epoch advances.
Diffstat (limited to 'src/arena.c')
-rw-r--r-- | src/arena.c | 109 |
1 files changed, 58 insertions, 51 deletions
diff --git a/src/arena.c b/src/arena.c index f53a464..2f0291e 100644 --- a/src/arena.c +++ b/src/arena.c @@ -523,11 +523,41 @@ arena_decay_backlog_npages_limit(const arena_t *arena) } static void -arena_decay_epoch_advance(arena_t *arena, const nstime_t *time) +arena_decay_backlog_update_last(arena_t *arena) +{ + size_t ndirty_delta = (arena->ndirty > arena->decay.ndirty) ? + arena->ndirty - arena->decay.ndirty : 0; + arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta; +} + +static void +arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64) +{ + + if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { + memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) * + sizeof(size_t)); + } else { + size_t nadvance_z = (size_t)nadvance_u64; + + assert((uint64_t)nadvance_z == nadvance_u64); + + memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z], + (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); + if (nadvance_z > 1) { + memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS - + nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); + } + } + + arena_decay_backlog_update_last(arena); +} + +static void +arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time) { uint64_t nadvance_u64; nstime_t delta; - size_t ndirty_delta; assert(opt_purge == purge_mode_decay); assert(arena_decay_deadline_reached(arena, time)); @@ -546,43 +576,25 @@ arena_decay_epoch_advance(arena_t *arena, const nstime_t *time) arena_decay_deadline_init(arena); /* Update the backlog. */ - if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { - memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) * - sizeof(size_t)); - } else { - size_t nadvance_z = (size_t)nadvance_u64; + arena_decay_backlog_update(arena, nadvance_u64); +} - assert((uint64_t)nadvance_z == nadvance_u64); +static void +arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena) +{ + size_t ndirty_limit = arena_decay_backlog_npages_limit(arena); - memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z], - (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); - if (nadvance_z > 1) { - memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS - - nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); - } - } - ndirty_delta = (arena->ndirty > arena->decay.ndirty) ? arena->ndirty - - arena->decay.ndirty : 0; + if (arena->ndirty > ndirty_limit) + arena_purge_to_limit(tsdn, arena, ndirty_limit); arena->decay.ndirty = arena->ndirty; - arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta; - arena->decay.backlog_npages_limit = - arena_decay_backlog_npages_limit(arena); } -static size_t -arena_decay_npages_limit(arena_t *arena) +static void +arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time) { - size_t npages_limit; - - assert(opt_purge == purge_mode_decay); - - npages_limit = arena->decay.backlog_npages_limit; - /* Add in any dirty pages created during the current epoch. */ - if (arena->ndirty > arena->decay.ndirty) - npages_limit += arena->ndirty - arena->decay.ndirty; - - return (npages_limit); + arena_decay_epoch_advance_helper(arena, time); + arena_decay_epoch_advance_purge(tsdn, arena); } static void @@ -600,7 +612,6 @@ arena_decay_init(arena_t *arena, ssize_t decay_time) arena->decay.jitter_state = (uint64_t)(uintptr_t)arena; arena_decay_deadline_init(arena); arena->decay.ndirty = arena->ndirty; - arena->decay.backlog_npages_limit = 0; memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); } @@ -682,7 +693,6 @@ static void arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena) { nstime_t time; - size_t ndirty_limit; assert(opt_purge == purge_mode_decay); @@ -698,32 +708,29 @@ arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena) if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch, &time) > 0)) { /* - * Time went backwards. Move the epoch back in time, with the - * expectation that time typically flows forward for long enough - * periods of time that epochs complete. Unfortunately, - * this strategy is susceptible to clock jitter triggering - * premature epoch advances, but clock jitter estimation and - * compensation isn't feasible here because calls into this code - * are event-driven. + * Time went backwards. Move the epoch back in time and + * generate a new deadline, with the expectation that time + * typically flows forward for long enough periods of time that + * epochs complete. Unfortunately, this strategy is susceptible + * to clock jitter triggering premature epoch advances, but + * clock jitter estimation and compensation isn't feasible here + * because calls into this code are event-driven. */ nstime_copy(&arena->decay.epoch, &time); + arena_decay_deadline_init(arena); } else { /* Verify that time does not go backwards. */ assert(nstime_compare(&arena->decay.epoch, &time) <= 0); } - if (arena_decay_deadline_reached(arena, &time)) - arena_decay_epoch_advance(arena, &time); - - ndirty_limit = arena_decay_npages_limit(arena); - /* - * Don't try to purge unless the number of purgeable pages exceeds the - * current limit. + * If the deadline has been reached, advance to the current epoch and + * purge to the new limit if necessary. Note that dirty pages created + * during the current epoch are not subject to purge until a future + * epoch, so as a result purging only happens during epoch advances. */ - if (arena->ndirty <= ndirty_limit) - return; - arena_purge_to_limit(tsdn, arena, ndirty_limit); + if (arena_decay_deadline_reached(arena, &time)) + arena_decay_epoch_advance(tsdn, arena, &time); } void |