summaryrefslogtreecommitdiffstats
path: root/include/jemalloc
diff options
context:
space:
mode:
authorJason Evans <je@fb.com>2015-03-19 01:55:33 (GMT)
committerJason Evans <je@fb.com>2015-03-19 01:55:33 (GMT)
commit8d6a3e8321a7767cb2ca0930b85d5d488a8cc659 (patch)
tree7b7f9628079ffcf68d1d3cea4c797ad394d40339 /include/jemalloc
parentc9db461ffb608ad32aed0e34663ae58a992e1003 (diff)
downloadjemalloc-8d6a3e8321a7767cb2ca0930b85d5d488a8cc659.zip
jemalloc-8d6a3e8321a7767cb2ca0930b85d5d488a8cc659.tar.gz
jemalloc-8d6a3e8321a7767cb2ca0930b85d5d488a8cc659.tar.bz2
Implement dynamic per arena control over dirty page purging.
Add mallctls: - arenas.lg_dirty_mult is initialized via opt.lg_dirty_mult, and can be modified to change the initial lg_dirty_mult setting for newly created arenas. - arena.<i>.lg_dirty_mult controls an individual arena's dirty page purging threshold, and synchronously triggers any purging that may be necessary to maintain the constraint. - arena.<i>.chunk.purge allows the per arena dirty page purging function to be replaced. This resolves #93.
Diffstat (limited to 'include/jemalloc')
-rw-r--r--include/jemalloc/internal/arena.h16
-rw-r--r--include/jemalloc/internal/chunk.h6
-rw-r--r--include/jemalloc/internal/private_symbols.txt7
-rw-r--r--include/jemalloc/jemalloc_typedefs.h.in1
4 files changed, 26 insertions, 4 deletions
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index 9cbc591..56ee74a 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -16,10 +16,10 @@
/*
* The minimum ratio of active:dirty pages per arena is computed as:
*
- * (nactive >> opt_lg_dirty_mult) >= ndirty
+ * (nactive >> lg_dirty_mult) >= ndirty
*
- * So, supposing that opt_lg_dirty_mult is 3, there can be no less than 8 times
- * as many active pages as dirty pages.
+ * So, supposing that lg_dirty_mult is 3, there can be no less than 8 times as
+ * many active pages as dirty pages.
*/
#define LG_DIRTY_MULT_DEFAULT 3
@@ -304,6 +304,9 @@ struct arena_s {
*/
arena_chunk_t *spare;
+ /* Minimum ratio (log base 2) of nactive:ndirty. */
+ ssize_t lg_dirty_mult;
+
/* Number of pages in active runs and huge regions. */
size_t nactive;
@@ -376,10 +379,11 @@ struct arena_s {
malloc_mutex_t node_cache_mtx;
/*
- * User-configurable chunk allocation and deallocation functions.
+ * User-configurable chunk allocation/deallocation/purge functions.
*/
chunk_alloc_t *chunk_alloc;
chunk_dalloc_t *chunk_dalloc;
+ chunk_purge_t *chunk_purge;
/* bins is used to store trees of free regions. */
arena_bin_t bins[NBINS];
@@ -416,6 +420,8 @@ void arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk,
size_t oldsize, size_t usize);
bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk,
size_t oldsize, size_t usize, bool *zero);
+ssize_t arena_lg_dirty_mult_get(arena_t *arena);
+bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult);
void arena_maybe_purge(arena_t *arena);
void arena_purge_all(arena_t *arena);
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
@@ -462,6 +468,8 @@ void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t extra, size_t alignment, bool zero, tcache_t *tcache);
dss_prec_t arena_dss_prec_get(arena_t *arena);
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
+ssize_t arena_lg_dirty_mult_default_get(void);
+bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
diff --git a/include/jemalloc/internal/chunk.h b/include/jemalloc/internal/chunk.h
index 1af5b24..8093814 100644
--- a/include/jemalloc/internal/chunk.h
+++ b/include/jemalloc/internal/chunk.h
@@ -54,6 +54,12 @@ void chunk_dalloc_arena(arena_t *arena, void *chunk, size_t size,
bool chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind);
void chunk_dalloc_wrapper(arena_t *arena, chunk_dalloc_t *chunk_dalloc,
void *chunk, size_t size);
+bool chunk_purge_arena(arena_t *arena, void *chunk, size_t offset,
+ size_t length);
+bool chunk_purge_default(void *chunk, size_t offset, size_t length,
+ unsigned arena_ind);
+bool chunk_purge_wrapper(arena_t *arena, chunk_purge_t *chunk_purge,
+ void *chunk, size_t offset, size_t length);
bool chunk_boot(void);
void chunk_prefork(void);
void chunk_postfork_parent(void);
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index d086db1..bc0f2a6 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -30,6 +30,10 @@ arena_dalloc_small
arena_dss_prec_get
arena_dss_prec_set
arena_init
+arena_lg_dirty_mult_default_get
+arena_lg_dirty_mult_default_set
+arena_lg_dirty_mult_get
+arena_lg_dirty_mult_set
arena_malloc
arena_malloc_large
arena_malloc_small
@@ -151,6 +155,9 @@ chunk_npages
chunk_postfork_child
chunk_postfork_parent
chunk_prefork
+chunk_purge_arena
+chunk_purge_default
+chunk_purge_wrapper
chunk_record
chunk_register
chunks_rtree
diff --git a/include/jemalloc/jemalloc_typedefs.h.in b/include/jemalloc/jemalloc_typedefs.h.in
index 8092f1b..d4b4690 100644
--- a/include/jemalloc/jemalloc_typedefs.h.in
+++ b/include/jemalloc/jemalloc_typedefs.h.in
@@ -1,2 +1,3 @@
typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, unsigned);
typedef bool (chunk_dalloc_t)(void *, size_t, unsigned);
+typedef bool (chunk_purge_t)(void *, size_t, size_t, unsigned);