summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorQi Wang <interwq@gwu.edu>2017-04-28 20:31:09 (GMT)
committerQi Wang <interwq@gmail.com>2017-05-01 21:49:42 (GMT)
commitfc1aaf13fed6f9344c0681440e5a5782c889d0dc (patch)
tree4dadbd1ce3d59dd0d45e61ecd840405601f76412
parent209f2926b8e734317942231332f24b4bfd94587e (diff)
downloadjemalloc-fc1aaf13fed6f9344c0681440e5a5782c889d0dc.zip
jemalloc-fc1aaf13fed6f9344c0681440e5a5782c889d0dc.tar.gz
jemalloc-fc1aaf13fed6f9344c0681440e5a5782c889d0dc.tar.bz2
Revert "Use trylock in tcache_bin_flush when possible."
This reverts commit 8584adc451f31adfc4ab8693d9189cf3a7e5d858. Production results not favorable. Will investigate separately.
-rw-r--r--include/jemalloc/internal/tcache_externs.h30
-rw-r--r--include/jemalloc/internal/tcache_inlines.h4
-rw-r--r--src/tcache.c171
3 files changed, 63 insertions, 142 deletions
diff --git a/include/jemalloc/internal/tcache_externs.h b/include/jemalloc/internal/tcache_externs.h
index 95dfe44..abe133f 100644
--- a/include/jemalloc/internal/tcache_externs.h
+++ b/include/jemalloc/internal/tcache_externs.h
@@ -27,27 +27,23 @@ extern size_t tcache_maxclass;
*/
extern tcaches_t *tcaches;
-size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
-void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
-void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
+size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
+void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
+void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
-void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
+void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
szind_t binind, unsigned rem);
-unsigned tcache_bin_try_flush_small(tsd_t *tsd, tcache_t *tcache,
- tcache_bin_t *tbin, szind_t binind, unsigned rem);
-void tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
- szind_t binind, unsigned rem);
-unsigned tcache_bin_try_flush_large(tsd_t *tsd, tcache_t *tcache,
- tcache_bin_t *tbin, szind_t binind, unsigned rem);
-void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
+void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
+ unsigned rem, tcache_t *tcache);
+void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
arena_t *arena);
tcache_t *tcache_create_explicit(tsd_t *tsd);
-void tcache_cleanup(tsd_t *tsd);
-void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
-bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
-void tcaches_flush(tsd_t *tsd, unsigned ind);
-void tcaches_destroy(tsd_t *tsd, unsigned ind);
-bool tcache_boot(tsdn_t *tsdn);
+void tcache_cleanup(tsd_t *tsd);
+void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
+bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
+void tcaches_flush(tsd_t *tsd, unsigned ind);
+void tcaches_destroy(tsd_t *tsd, unsigned ind);
+bool tcache_boot(tsdn_t *tsdn);
void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
void tcache_prefork(tsdn_t *tsdn);
void tcache_postfork_parent(tsdn_t *tsdn);
diff --git a/include/jemalloc/internal/tcache_inlines.h b/include/jemalloc/internal/tcache_inlines.h
index 5e9a7a0..8a65ba2 100644
--- a/include/jemalloc/internal/tcache_inlines.h
+++ b/include/jemalloc/internal/tcache_inlines.h
@@ -227,8 +227,8 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
tbin = tcache_large_bin_get(tcache, binind);
tbin_info = &tcache_bin_info[binind];
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
- tcache_bin_flush_large(tsd, tcache, tbin, binind,
- (tbin_info->ncached_max >> 1));
+ tcache_bin_flush_large(tsd, tbin, binind,
+ (tbin_info->ncached_max >> 1), tcache);
}
assert(tbin->ncached < tbin_info->ncached_max);
tbin->ncached++;
diff --git a/src/tcache.c b/src/tcache.c
index afb1faa..ee5e816 100644
--- a/src/tcache.c
+++ b/src/tcache.c
@@ -45,16 +45,14 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
} else {
tbin = tcache_large_bin_get(tcache, binind);
}
- bool repeat_bin;
if (tbin->low_water > 0) {
/*
* Flush (ceiling) 3/4 of the objects below the low water mark.
*/
- unsigned nflushed;
if (binind < NBINS) {
- nflushed = tcache_bin_try_flush_small(tsd, tcache, tbin,
- binind, tbin->ncached - tbin->low_water +
- (tbin->low_water >> 2));
+ tcache_bin_flush_small(tsd, tcache, tbin, binind,
+ tbin->ncached - tbin->low_water + (tbin->low_water
+ >> 2));
/*
* Reduce fill count by 2X. Limit lg_fill_div such that
* the fill count is always at least 1.
@@ -65,29 +63,23 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
tcache->lg_fill_div[binind]++;
}
} else {
- nflushed = tcache_bin_try_flush_large(tsd, tcache, tbin,
- binind, tbin->ncached - tbin->low_water +
- (tbin->low_water >> 2));
+ tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
+ - tbin->low_water + (tbin->low_water >> 2), tcache);
}
- repeat_bin = (nflushed == 0);
- } else {
- if (tbin->low_water < 0) {
- /*
- * Increase fill count by 2X for small bins. Make sure
- * lg_fill_div stays greater than 0.
- */
- if (binind < NBINS && tcache->lg_fill_div[binind] > 1) {
- tcache->lg_fill_div[binind]--;
- }
+ } else if (tbin->low_water < 0) {
+ /*
+ * Increase fill count by 2X for small bins. Make sure
+ * lg_fill_div stays greater than 0.
+ */
+ if (binind < NBINS && tcache->lg_fill_div[binind] > 1) {
+ tcache->lg_fill_div[binind]--;
}
- repeat_bin = false;
}
- if (!repeat_bin) {
- tcache->next_gc_bin++;
- if (tcache->next_gc_bin == nhbins) {
- tcache->next_gc_bin = 0;
- }
- tbin->low_water = tbin->ncached;
+ tbin->low_water = tbin->ncached;
+
+ tcache->next_gc_bin++;
+ if (tcache->next_gc_bin == nhbins) {
+ tcache->next_gc_bin = 0;
}
}
@@ -107,9 +99,11 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
return ret;
}
-static inline unsigned
-tcache_bin_flush_small_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
- szind_t binind, unsigned rem, bool must_flush) {
+void
+tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
+ szind_t binind, unsigned rem) {
+ bool merged_stats = false;
+
assert(binind < NBINS);
assert(rem <= tbin->ncached);
@@ -122,12 +116,9 @@ tcache_bin_flush_small_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
}
- bool merged_stats = false;
- unsigned nflushed = 0;
- unsigned nskipped = 0;
while (nflush > 0) {
/* Lock the arena bin associated with the first object. */
- extent_t *extent = item_extent[nskipped];
+ extent_t *extent = item_extent[0];
arena_t *bin_arena = extent_arena_get(extent);
arena_bin_t *bin = &bin_arena->bins[binind];
@@ -139,16 +130,7 @@ tcache_bin_flush_small_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
tcache->prof_accumbytes = 0;
}
- if (must_flush) {
- malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
- } else {
- /* Make best effort to flush w/o blocking. */
- if (malloc_mutex_trylock(tsd_tsdn(tsd), &bin->lock)) {
- nskipped++;
- nflush--;
- continue;
- }
- }
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
if (config_stats && bin_arena == arena) {
assert(!merged_stats);
merged_stats = true;
@@ -157,7 +139,7 @@ tcache_bin_flush_small_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
tbin->tstats.nrequests = 0;
}
unsigned ndeferred = 0;
- for (unsigned i = nskipped; i < nflush; i++) {
+ for (unsigned i = 0; i < nflush; i++) {
void *ptr = *(tbin->avail - 1 - i);
extent = item_extent[i];
assert(ptr != NULL && extent != NULL);
@@ -172,14 +154,13 @@ tcache_bin_flush_small_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
* locked. Stash the object, so that it can be
* handled in a future pass.
*/
- *(tbin->avail - 1 - ndeferred - nskipped) = ptr;
- item_extent[ndeferred + nskipped] = extent;
+ *(tbin->avail - 1 - ndeferred) = ptr;
+ item_extent[ndeferred] = extent;
ndeferred++;
}
}
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
- nflushed += nflush - ndeferred;
nflush = ndeferred;
}
if (config_stats && !merged_stats) {
@@ -188,49 +169,26 @@ tcache_bin_flush_small_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
* arena, so the stats didn't get merged. Manually do so now.
*/
arena_bin_t *bin = &arena->bins[binind];
- if (must_flush) {
- malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
- }
- if (must_flush ||
- !malloc_mutex_trylock(tsd_tsdn(tsd), &bin->lock)) {
- malloc_mutex_assert_owner(tsd_tsdn(tsd), &bin->lock);
- bin->stats.nflushes++;
- bin->stats.nrequests += tbin->tstats.nrequests;
- tbin->tstats.nrequests = 0;
- malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
- }
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ bin->stats.nflushes++;
+ bin->stats.nrequests += tbin->tstats.nrequests;
+ tbin->tstats.nrequests = 0;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
}
- assert(nflushed == tbin->ncached - rem - nskipped);
- assert(nskipped == 0 || !must_flush);
- if (nflushed > 0) {
- memmove(tbin->avail - (rem + nskipped), tbin->avail -
- tbin->ncached, rem * sizeof(void *));
- }
- tbin->ncached = rem + nskipped;
+ memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
+ sizeof(void *));
+ tbin->ncached = rem;
if ((low_water_t)tbin->ncached < tbin->low_water) {
tbin->low_water = tbin->ncached;
}
-
- return nflushed;
}
void
-tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
- szind_t binind, unsigned rem) {
- tcache_bin_flush_small_impl(tsd, tcache, tbin, binind, rem, true);
-}
-
-unsigned
-tcache_bin_try_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
- szind_t binind, unsigned rem) {
- return tcache_bin_flush_small_impl(tsd, tcache, tbin, binind, rem,
- false);
-}
+tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
+ unsigned rem, tcache_t *tcache) {
+ bool merged_stats = false;
-static inline unsigned
-tcache_bin_flush_large_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
- szind_t binind, unsigned rem, bool must_flush) {
assert(binind < nhbins);
assert(rem <= tbin->ncached);
@@ -243,31 +201,18 @@ tcache_bin_flush_large_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
}
- bool merged_stats = false;
- unsigned nflushed = 0;
- unsigned nskipped = 0;
while (nflush > 0) {
/* Lock the arena associated with the first object. */
- extent_t *extent = item_extent[nskipped];
+ extent_t *extent = item_extent[0];
arena_t *locked_arena = extent_arena_get(extent);
UNUSED bool idump;
if (config_prof) {
idump = false;
}
- if (must_flush) {
- malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx);
- } else {
- /* Make best effort to flush w/o blocking. */
- if (malloc_mutex_trylock(tsd_tsdn(tsd),
- &locked_arena->large_mtx)) {
- nskipped++;
- nflush--;
- continue;
- }
- }
- for (unsigned i = nskipped; i < nflush; i++) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx);
+ for (unsigned i = 0; i < nflush; i++) {
void *ptr = *(tbin->avail - 1 - i);
assert(ptr != NULL);
extent = item_extent[i];
@@ -293,7 +238,7 @@ tcache_bin_flush_large_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx);
unsigned ndeferred = 0;
- for (unsigned i = nskipped; i < nflush; i++) {
+ for (unsigned i = 0; i < nflush; i++) {
void *ptr = *(tbin->avail - 1 - i);
extent = item_extent[i];
assert(ptr != NULL && extent != NULL);
@@ -307,8 +252,8 @@ tcache_bin_flush_large_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
* Stash the object, so that it can be handled
* in a future pass.
*/
- *(tbin->avail - 1 - ndeferred - nskipped) = ptr;
- item_extent[ndeferred + nskipped] = extent;
+ *(tbin->avail - 1 - ndeferred) = ptr;
+ item_extent[ndeferred] = extent;
ndeferred++;
}
}
@@ -317,7 +262,6 @@ tcache_bin_flush_large_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
}
arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
ndeferred);
- nflushed += nflush - ndeferred;
nflush = ndeferred;
}
if (config_stats && !merged_stats) {
@@ -330,31 +274,12 @@ tcache_bin_flush_large_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
tbin->tstats.nrequests = 0;
}
- assert(nflushed == tbin->ncached - rem - nskipped);
- assert(nskipped == 0 || !must_flush);
-
- if (nflushed > 0) {
- memmove(tbin->avail - (rem + nskipped), tbin->avail -
- tbin->ncached, rem * sizeof(void *));
- }
- tbin->ncached = rem + nskipped;
+ memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
+ sizeof(void *));
+ tbin->ncached = rem;
if ((low_water_t)tbin->ncached < tbin->low_water) {
tbin->low_water = tbin->ncached;
}
- return nflushed;
-}
-
-void
-tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
- szind_t binind, unsigned rem) {
- tcache_bin_flush_large_impl(tsd, tcache, tbin, binind, rem, true);
-}
-
-unsigned
-tcache_bin_try_flush_large(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
- szind_t binind, unsigned rem) {
- return tcache_bin_flush_large_impl(tsd, tcache, tbin, binind, rem,
- false);
}
void
@@ -533,7 +458,7 @@ tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
}
for (unsigned i = NBINS; i < nhbins; i++) {
tcache_bin_t *tbin = tcache_large_bin_get(tcache, i);
- tcache_bin_flush_large(tsd, tcache, tbin, i, 0);
+ tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
if (config_stats) {
assert(tbin->tstats.nrequests == 0);