summaryrefslogtreecommitdiffstats
path: root/src/base.c
diff options
context:
space:
mode:
authorJoerg Koenig <jck@techsat.com>2018-01-19 10:53:15 (GMT)
committerJoerg Koenig <jck@techsat.com>2018-01-19 10:53:15 (GMT)
commitaa502047980c13cdbe5a1adb7f024199c367254d (patch)
tree8cc6a214bde51a42beb77ac92fe0d00dd44c0c90 /src/base.c
parent3f5049340e66c6929c3270f7359617f62e053b11 (diff)
parentf78d4ca3fbff6cab0c704c787706a53ddafcbe13 (diff)
downloadjemalloc-master.zip
jemalloc-master.tar.gz
jemalloc-master.tar.bz2
Merge https://github.com/jemalloc/jemallocHEADmaster
Change-Id: I44881ca21f4710f7ad5154a45c4a7204ae71c84c
Diffstat (limited to 'src/base.c')
-rw-r--r--src/base.c151
1 files changed, 131 insertions, 20 deletions
diff --git a/src/base.c b/src/base.c
index 97078b1..cc3d978 100644
--- a/src/base.c
+++ b/src/base.c
@@ -10,25 +10,39 @@
/******************************************************************************/
/* Data. */
-static base_t *b0;
+static base_t *b0;
+
+metadata_thp_mode_t opt_metadata_thp = METADATA_THP_DEFAULT;
+
+const char *metadata_thp_mode_names[] = {
+ "disabled",
+ "auto",
+ "always"
+};
/******************************************************************************/
+static inline bool
+metadata_thp_madvise(void) {
+ return (metadata_thp_enabled() && thp_state_madvise);
+}
+
static void *
base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
void *addr;
bool zero = true;
bool commit = true;
+ /* Use huge page sizes and alignment regardless of opt_metadata_thp. */
assert(size == HUGEPAGE_CEILING(size));
-
+ size_t alignment = HUGEPAGE;
if (extent_hooks == &extent_hooks_default) {
- addr = extent_alloc_mmap(NULL, size, PAGE, &zero, &commit);
+ addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit);
} else {
/* No arena context as we are creating new arenas. */
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
pre_reentrancy(tsd, NULL);
- addr = extent_hooks->alloc(extent_hooks, NULL, size, PAGE,
+ addr = extent_hooks->alloc(extent_hooks, NULL, size, alignment,
&zero, &commit, ind);
post_reentrancy(tsd);
}
@@ -51,16 +65,16 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
*/
if (extent_hooks == &extent_hooks_default) {
if (!extent_dalloc_mmap(addr, size)) {
- return;
+ goto label_done;
}
if (!pages_decommit(addr, size)) {
- return;
+ goto label_done;
}
if (!pages_purge_forced(addr, size)) {
- return;
+ goto label_done;
}
if (!pages_purge_lazy(addr, size)) {
- return;
+ goto label_done;
}
/* Nothing worked. This should never happen. */
not_reached();
@@ -70,27 +84,33 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
if (extent_hooks->dalloc != NULL &&
!extent_hooks->dalloc(extent_hooks, addr, size, true,
ind)) {
- goto label_done;
+ goto label_post_reentrancy;
}
if (extent_hooks->decommit != NULL &&
!extent_hooks->decommit(extent_hooks, addr, size, 0, size,
ind)) {
- goto label_done;
+ goto label_post_reentrancy;
}
if (extent_hooks->purge_forced != NULL &&
!extent_hooks->purge_forced(extent_hooks, addr, size, 0,
size, ind)) {
- goto label_done;
+ goto label_post_reentrancy;
}
if (extent_hooks->purge_lazy != NULL &&
!extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
ind)) {
- goto label_done;
+ goto label_post_reentrancy;
}
/* Nothing worked. That's the application's problem. */
- label_done:
+ label_post_reentrancy:
post_reentrancy(tsd);
- return;
+ }
+label_done:
+ if (metadata_thp_madvise()) {
+ /* Set NOHUGEPAGE after unmap to avoid kernel defrag. */
+ assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
+ (size & HUGEPAGE_MASK) == 0);
+ pages_nohuge(addr, size);
}
}
@@ -105,6 +125,56 @@ base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
extent_binit(extent, addr, size, sn);
}
+static size_t
+base_get_num_blocks(base_t *base, bool with_new_block) {
+ base_block_t *b = base->blocks;
+ assert(b != NULL);
+
+ size_t n_blocks = with_new_block ? 2 : 1;
+ while (b->next != NULL) {
+ n_blocks++;
+ b = b->next;
+ }
+
+ return n_blocks;
+}
+
+static void
+base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
+ assert(opt_metadata_thp == metadata_thp_auto);
+ malloc_mutex_assert_owner(tsdn, &base->mtx);
+ if (base->auto_thp_switched) {
+ return;
+ }
+ /* Called when adding a new block. */
+ bool should_switch;
+ if (base_ind_get(base) != 0) {
+ should_switch = (base_get_num_blocks(base, true) ==
+ BASE_AUTO_THP_THRESHOLD);
+ } else {
+ should_switch = (base_get_num_blocks(base, true) ==
+ BASE_AUTO_THP_THRESHOLD_A0);
+ }
+ if (!should_switch) {
+ return;
+ }
+
+ base->auto_thp_switched = true;
+ assert(!config_stats || base->n_thp == 0);
+ /* Make the initial blocks THP lazily. */
+ base_block_t *block = base->blocks;
+ while (block != NULL) {
+ assert((block->size & HUGEPAGE_MASK) == 0);
+ pages_huge(block, block->size);
+ if (config_stats) {
+ base->n_thp += HUGEPAGE_CEILING(block->size -
+ extent_bsize_get(&block->extent)) >> LG_HUGEPAGE;
+ }
+ block = block->next;
+ assert(block == NULL || (base_ind_get(base) == 0));
+ }
+}
+
static void *
base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
size_t alignment) {
@@ -140,12 +210,20 @@ base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent,
base->allocated += size;
/*
* Add one PAGE to base_resident for every page boundary that is
- * crossed by the new allocation.
+ * crossed by the new allocation. Adjust n_thp similarly when
+ * metadata_thp is enabled.
*/
base->resident += PAGE_CEILING((uintptr_t)addr + size) -
PAGE_CEILING((uintptr_t)addr - gap_size);
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
+ if (metadata_thp_madvise() && (opt_metadata_thp ==
+ metadata_thp_always || base->auto_thp_switched)) {
+ base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size)
+ - HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >>
+ LG_HUGEPAGE;
+ assert(base->mapped >= base->n_thp << LG_HUGEPAGE);
+ }
}
}
@@ -166,8 +244,8 @@ base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, extent_t *extent,
* On success a pointer to the initialized base_block_t header is returned.
*/
static base_block_t *
-base_block_alloc(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind,
- pszind_t *pind_last, size_t *extent_sn_next, size_t size,
+base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
+ unsigned ind, pszind_t *pind_last, size_t *extent_sn_next, size_t size,
size_t alignment) {
alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
size_t usize = ALIGNMENT_CEILING(size, alignment);
@@ -193,6 +271,25 @@ base_block_alloc(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind,
if (block == NULL) {
return NULL;
}
+
+ if (metadata_thp_madvise()) {
+ void *addr = (void *)block;
+ assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
+ (block_size & HUGEPAGE_MASK) == 0);
+ if (opt_metadata_thp == metadata_thp_always) {
+ pages_huge(addr, block_size);
+ } else if (opt_metadata_thp == metadata_thp_auto &&
+ base != NULL) {
+ /* base != NULL indicates this is not a new base. */
+ malloc_mutex_lock(tsdn, &base->mtx);
+ base_auto_thp_switch(tsdn, base);
+ if (base->auto_thp_switched) {
+ pages_huge(addr, block_size);
+ }
+ malloc_mutex_unlock(tsdn, &base->mtx);
+ }
+ }
+
*pind_last = sz_psz2ind(block_size);
block->size = block_size;
block->next = NULL;
@@ -216,7 +313,7 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
* called.
*/
malloc_mutex_unlock(tsdn, &base->mtx);
- base_block_t *block = base_block_alloc(tsdn, extent_hooks,
+ base_block_t *block = base_block_alloc(tsdn, base, extent_hooks,
base_ind_get(base), &base->pind_last, &base->extent_sn_next, size,
alignment);
malloc_mutex_lock(tsdn, &base->mtx);
@@ -229,8 +326,16 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
base->allocated += sizeof(base_block_t);
base->resident += PAGE_CEILING(sizeof(base_block_t));
base->mapped += block->size;
+ if (metadata_thp_madvise() &&
+ !(opt_metadata_thp == metadata_thp_auto
+ && !base->auto_thp_switched)) {
+ assert(base->n_thp > 0);
+ base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >>
+ LG_HUGEPAGE;
+ }
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
+ assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
}
return &block->extent;
}
@@ -244,7 +349,7 @@ base_t *
base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
pszind_t pind_last = 0;
size_t extent_sn_next = 0;
- base_block_t *block = base_block_alloc(tsdn, extent_hooks, ind,
+ base_block_t *block = base_block_alloc(tsdn, NULL, extent_hooks, ind,
&pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
if (block == NULL) {
return NULL;
@@ -265,6 +370,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
base->pind_last = pind_last;
base->extent_sn_next = extent_sn_next;
base->blocks = block;
+ base->auto_thp_switched = false;
for (szind_t i = 0; i < NSIZES; i++) {
extent_heap_new(&base->avail[i]);
}
@@ -272,8 +378,12 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
base->allocated = sizeof(base_block_t);
base->resident = PAGE_CEILING(sizeof(base_block_t));
base->mapped = block->size;
+ base->n_thp = (opt_metadata_thp == metadata_thp_always) &&
+ metadata_thp_madvise() ? HUGEPAGE_CEILING(sizeof(base_block_t))
+ >> LG_HUGEPAGE : 0;
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
+ assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
}
base_extent_bump_alloc_post(tsdn, base, &block->extent, gap_size, base,
base_size);
@@ -368,7 +478,7 @@ base_alloc_extent(tsdn_t *tsdn, base_t *base) {
void
base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
- size_t *mapped) {
+ size_t *mapped, size_t *n_thp) {
cassert(config_stats);
malloc_mutex_lock(tsdn, &base->mtx);
@@ -377,6 +487,7 @@ base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
*allocated = base->allocated;
*resident = base->resident;
*mapped = base->mapped;
+ *n_thp = base->n_thp;
malloc_mutex_unlock(tsdn, &base->mtx);
}