summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorQi Wang <interwq@gwu.edu>2021-11-29 23:45:24 (GMT)
committerQi Wang <interwq@gmail.com>2021-12-29 22:44:43 (GMT)
commit06aac61c4b261e5d1c8dcf3c7dd7921e9e395d62 (patch)
tree2beb15329a26f4fb46bccf5f6a5fd5e0e20ec1cd
parentd038160f3b76ac1e5203e11008169366629c81cd (diff)
downloadjemalloc-06aac61c4b261e5d1c8dcf3c7dd7921e9e395d62.zip
jemalloc-06aac61c4b261e5d1c8dcf3c7dd7921e9e395d62.tar.gz
jemalloc-06aac61c4b261e5d1c8dcf3c7dd7921e9e395d62.tar.bz2
Split the core logic of tcache flush into a separate function.
The core function takes a ptr array as input (containing items to be flushed), which will be reused to flush sanitizer-stashed items.
-rw-r--r--src/tcache.c40
1 files changed, 24 insertions, 16 deletions
diff --git a/src/tcache.c b/src/tcache.c
index 39a4ea6..5c3d5b1 100644
--- a/src/tcache.c
+++ b/src/tcache.c
@@ -300,7 +300,7 @@ tcache_bin_flush_match(edata_t *edata, unsigned cur_arena_ind,
JEMALLOC_ALWAYS_INLINE void
tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
- szind_t binind, unsigned rem, bool small) {
+ szind_t binind, cache_bin_ptr_array_t *ptrs, unsigned nflush, bool small) {
tcache_slow_t *tcache_slow = tcache->tcache_slow;
/*
* A couple lookup calls take tsdn; declare it once for convenience
@@ -313,24 +313,15 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
} else {
assert(binind < nhbins);
}
- cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
- &tcache_bin_info[binind]);
- assert((cache_bin_sz_t)rem <= ncached);
arena_t *tcache_arena = tcache_slow->arena;
assert(tcache_arena != NULL);
- unsigned nflush = ncached - rem;
/*
* Variable length array must have > 0 length; the last element is never
* touched (it's just included to satisfy the no-zero-length rule).
*/
VARIABLE_ARRAY(emap_batch_lookup_result_t, item_edata, nflush + 1);
- CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nflush);
-
- cache_bin_init_ptr_array_for_flush(cache_bin, &tcache_bin_info[binind],
- &ptrs, nflush);
-
- tcache_bin_flush_edatas_lookup(tsd, &ptrs, binind, nflush, item_edata);
+ tcache_bin_flush_edatas_lookup(tsd, ptrs, binind, nflush, item_edata);
/*
* The slabs where we freed the last remaining object in the slab (and
@@ -407,7 +398,7 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
*/
if (!small) {
for (unsigned i = 0; i < nflush; i++) {
- void *ptr = ptrs.ptr[i];
+ void *ptr = ptrs->ptr[i];
edata = item_edata[i].edata;
assert(ptr != NULL && edata != NULL);
@@ -429,7 +420,7 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
arena_dalloc_bin_locked_begin(&dalloc_bin_info, binind);
}
for (unsigned i = 0; i < nflush; i++) {
- void *ptr = ptrs.ptr[i];
+ void *ptr = ptrs->ptr[i];
edata = item_edata[i].edata;
assert(ptr != NULL && edata != NULL);
if (!tcache_bin_flush_match(edata, cur_arena_ind,
@@ -440,7 +431,7 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
* arena. Either way, stash the object so that
* it can be handled in a future pass.
*/
- ptrs.ptr[ndeferred] = ptr;
+ ptrs->ptr[ndeferred] = ptr;
item_edata[ndeferred].edata = edata;
ndeferred++;
continue;
@@ -501,6 +492,23 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
}
}
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tcache_bin_flush_bottom(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
+ szind_t binind, unsigned rem, bool small) {
+ cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
+ &tcache_bin_info[binind]);
+ assert((cache_bin_sz_t)rem <= ncached);
+ unsigned nflush = ncached - rem;
+
+ CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nflush);
+ cache_bin_init_ptr_array_for_flush(cache_bin, &tcache_bin_info[binind],
+ &ptrs, nflush);
+
+ tcache_bin_flush_impl(tsd, tcache, cache_bin, binind, &ptrs, nflush,
+ small);
+
cache_bin_finish_flush(cache_bin, &tcache_bin_info[binind], &ptrs,
ncached - rem);
}
@@ -508,13 +516,13 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
void
tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
szind_t binind, unsigned rem) {
- tcache_bin_flush_impl(tsd, tcache, cache_bin, binind, rem, true);
+ tcache_bin_flush_bottom(tsd, tcache, cache_bin, binind, rem, true);
}
void
tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
szind_t binind, unsigned rem) {
- tcache_bin_flush_impl(tsd, tcache, cache_bin, binind, rem, false);
+ tcache_bin_flush_bottom(tsd, tcache, cache_bin, binind, rem, false);
}
void