summaryrefslogtreecommitdiffstats
path: root/src/base.c
diff options
context:
space:
mode:
authorQi Wang <interwq@gwu.edu>2017-10-31 21:17:40 (GMT)
committerQi Wang <interwq@gmail.com>2017-11-01 20:52:06 (GMT)
commit58eba024c0fbda463eaf8b42772407894dba6eff (patch)
treee3d16aae1d58c22094a031c76f1e0d4abe31a198 /src/base.c
parent47203d5f422452def4cb29c0b7128cc068031100 (diff)
downloadjemalloc-58eba024c0fbda463eaf8b42772407894dba6eff.zip
jemalloc-58eba024c0fbda463eaf8b42772407894dba6eff.tar.gz
jemalloc-58eba024c0fbda463eaf8b42772407894dba6eff.tar.bz2
metadata_thp: auto mode adjustment for a0.
We observed that arena 0 can have much more metadata allocated comparing to other arenas. Tune the auto mode to only switch to huge page on the 5th block (instead of 3 previously) for a0.
Diffstat (limited to 'src/base.c')
-rw-r--r--src/base.c41
1 files changed, 22 insertions, 19 deletions
diff --git a/src/base.c b/src/base.c
index c6db425..1036936 100644
--- a/src/base.c
+++ b/src/base.c
@@ -125,42 +125,45 @@ base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
extent_binit(extent, addr, size, sn);
}
+static size_t
+base_get_num_blocks(base_t *base, bool with_new_block) {
+ base_block_t *b = base->blocks;
+ assert(b != NULL);
+
+ size_t n_blocks = with_new_block ? 2 : 1;
+ while (b->next != NULL) {
+ n_blocks++;
+ b = b->next;
+ }
+
+ return n_blocks;
+}
+
static bool
base_auto_thp_triggered(base_t *base, bool with_new_block) {
assert(opt_metadata_thp == metadata_thp_auto);
- base_block_t *b1 = base->blocks;
- assert(b1 != NULL);
- base_block_t *b2 = b1->next;
if (base_ind_get(base) != 0) {
- return with_new_block ? true: b2 != NULL;
+ return base_get_num_blocks(base, with_new_block) >=
+ BASE_AUTO_THP_THRESHOLD;
}
- base_block_t *b3 = (b2 != NULL) ? b2->next : NULL;
- return with_new_block ? b2 != NULL : b3 != NULL;
+ return base_get_num_blocks(base, with_new_block) >=
+ BASE_AUTO_THP_THRESHOLD_A0;
}
static void
base_auto_thp_switch(base_t *base) {
assert(opt_metadata_thp == metadata_thp_auto);
- base_block_t *b1 = base->blocks;
- assert(b1 != NULL);
- base_block_t *b2 = b1->next;
-
/* Called when adding a new block. */
bool should_switch;
if (base_ind_get(base) != 0) {
- /* Makes the switch on the 2nd block. */
- should_switch = (b2 == NULL);
+ should_switch = (base_get_num_blocks(base, true) ==
+ BASE_AUTO_THP_THRESHOLD);
} else {
- /*
- * a0 switches to thp on the 3rd block, since rtree nodes are
- * allocated from a0 base, which takes an entire block on init.
- */
- base_block_t *b3 = (b2 != NULL) ? b2->next :
- NULL;
- should_switch = (b2 != NULL) && (b3 == NULL);
+ should_switch = (base_get_num_blocks(base, true) ==
+ BASE_AUTO_THP_THRESHOLD_A0);
}
if (!should_switch) {
return;