summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2014-10-11 06:01:03 (GMT)
committerJason Evans <jasone@canonware.com>2014-10-11 06:01:03 (GMT)
commit381c23dd9d3bf019cc4c7523a900be1e888802a7 (patch)
tree476e6da4c0dfd837779f7cc4979962e5e55a6d03 /include
parent81e547566e9bd55db7c317c5848ab9dc189047cb (diff)
downloadjemalloc-381c23dd9d3bf019cc4c7523a900be1e888802a7.zip
jemalloc-381c23dd9d3bf019cc4c7523a900be1e888802a7.tar.gz
jemalloc-381c23dd9d3bf019cc4c7523a900be1e888802a7.tar.bz2
Remove arena_dalloc_bin_run() clean page preservation.
Remove code in arena_dalloc_bin_run() that preserved the "clean" state of trailing clean pages by splitting them into a separate run during deallocation. This was a useful mechanism for reducing dirty page churn when bin runs comprised many pages, but bin runs are now quite small. Remove the nextind field from arena_run_t now that it is no longer needed, and change arena_run_t's bin field (arena_bin_t *) to binind (index_t). These two changes remove 8 bytes of chunk header overhead per page, which saves 1/512 of all arena chunk memory.
Diffstat (limited to 'include')
-rw-r--r--include/jemalloc/internal/arena.h14
1 files changed, 6 insertions, 8 deletions
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index f5b9fc6..28ff727 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -36,11 +36,8 @@ typedef struct arena_s arena_t;
#ifdef JEMALLOC_H_STRUCTS
struct arena_run_s {
- /* Bin this run is associated with. */
- arena_bin_t *bin;
-
- /* Index of next region that has never been allocated, or nregs. */
- uint32_t nextind;
+ /* Index of bin this run is associated with. */
+ index_t binind;
/* Number of free regions in run. */
unsigned nfree;
@@ -756,7 +753,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
size_t rpages_ind;
arena_run_t *run;
arena_bin_t *bin;
- index_t actual_binind;
+ index_t run_binind, actual_binind;
arena_bin_info_t *bin_info;
arena_chunk_map_misc_t *miscelm;
void *rpages;
@@ -774,9 +771,10 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
pageind);
miscelm = arena_miscelm_get(chunk, rpages_ind);
run = &miscelm->run;
- bin = run->bin;
+ run_binind = run->binind;
+ bin = &arena->bins[run_binind];
actual_binind = bin - arena->bins;
- assert(binind == actual_binind);
+ assert(run_binind == actual_binind);
bin_info = &arena_bin_info[actual_binind];
rpages = arena_miscelm_to_rpages(miscelm);
assert(((uintptr_t)ptr - ((uintptr_t)rpages +