diff options
author | Jason Evans <je@fb.com> | 2015-07-28 15:28:19 (GMT) |
---|---|---|
committer | Jason Evans <je@fb.com> | 2015-08-04 04:49:02 (GMT) |
commit | b49a334a645b854dbb1649f15c38d646fee66738 (patch) | |
tree | 221f5a09618cadbfd0b2570d802ca68971dec440 /include/jemalloc | |
parent | d059b9d6a1ac3e7f834260ba001bf0d1599fb0bf (diff) | |
download | jemalloc-b49a334a645b854dbb1649f15c38d646fee66738.zip jemalloc-b49a334a645b854dbb1649f15c38d646fee66738.tar.gz jemalloc-b49a334a645b854dbb1649f15c38d646fee66738.tar.bz2 |
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
Diffstat (limited to 'include/jemalloc')
-rw-r--r-- | include/jemalloc/internal/arena.h | 19 | ||||
-rw-r--r-- | include/jemalloc/internal/chunk.h | 44 | ||||
-rw-r--r-- | include/jemalloc/internal/chunk_mmap.h | 2 | ||||
-rw-r--r-- | include/jemalloc/internal/extent.h | 28 | ||||
-rw-r--r-- | include/jemalloc/internal/jemalloc_internal.h.in | 4 | ||||
-rw-r--r-- | include/jemalloc/internal/pages.h | 26 | ||||
-rw-r--r-- | include/jemalloc/internal/private_symbols.txt | 12 | ||||
-rw-r--r-- | include/jemalloc/jemalloc_typedefs.h.in | 54 |
8 files changed, 151 insertions, 38 deletions
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h index 8811f2e..29f73e7 100644 --- a/include/jemalloc/internal/arena.h +++ b/include/jemalloc/internal/arena.h @@ -379,23 +379,18 @@ struct arena_s { * orderings are needed, which is why there are two trees with the same * contents. */ - extent_tree_t chunks_szad_cache; - extent_tree_t chunks_ad_cache; - extent_tree_t chunks_szad_mmap; - extent_tree_t chunks_ad_mmap; - extent_tree_t chunks_szad_dss; - extent_tree_t chunks_ad_dss; + extent_tree_t chunks_szad_cached; + extent_tree_t chunks_ad_cached; + extent_tree_t chunks_szad_retained; + extent_tree_t chunks_ad_retained; + malloc_mutex_t chunks_mtx; /* Cache of nodes that were allocated via base_alloc(). */ ql_head(extent_node_t) node_cache; malloc_mutex_t node_cache_mtx; - /* - * User-configurable chunk allocation/deallocation/purge functions. - */ - chunk_alloc_t *chunk_alloc; - chunk_dalloc_t *chunk_dalloc; - chunk_purge_t *chunk_purge; + /* User-configurable chunk hook functions. */ + chunk_hooks_t chunk_hooks; /* bins is used to store trees of free regions. */ arena_bin_t bins[NBINS]; diff --git a/include/jemalloc/internal/chunk.h b/include/jemalloc/internal/chunk.h index 91aefad..8e51134 100644 --- a/include/jemalloc/internal/chunk.h +++ b/include/jemalloc/internal/chunk.h @@ -19,6 +19,16 @@ #define CHUNK_CEILING(s) \ (((s) + chunksize_mask) & ~chunksize_mask) +#define CHUNK_HOOKS_INITIALIZER { \ + NULL, \ + NULL, \ + NULL, \ + NULL, \ + NULL, \ + NULL, \ + NULL \ +} + #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS @@ -36,30 +46,30 @@ extern size_t chunksize; extern size_t chunksize_mask; /* (chunksize - 1). */ extern size_t chunk_npages; +extern const chunk_hooks_t chunk_hooks_default; + +chunk_hooks_t chunk_hooks_get(arena_t *arena); +chunk_hooks_t chunk_hooks_set(arena_t *arena, + const chunk_hooks_t *chunk_hooks); + bool chunk_register(const void *chunk, const extent_node_t *node); void chunk_deregister(const void *chunk, const extent_node_t *node); void *chunk_alloc_base(size_t size); -void *chunk_alloc_cache(arena_t *arena, void *new_addr, size_t size, - size_t alignment, bool *zero, bool dalloc_node); -void *chunk_alloc_default(void *new_addr, size_t size, size_t alignment, - bool *zero, unsigned arena_ind); -void *chunk_alloc_wrapper(arena_t *arena, chunk_alloc_t *chunk_alloc, +void *chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, + void *new_addr, size_t size, size_t alignment, bool *zero, + bool dalloc_node); +void *chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, bool *zero); -void chunk_record(arena_t *arena, extent_tree_t *chunks_szad, - extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, - bool zeroed); -void chunk_dalloc_cache(arena_t *arena, void *chunk, size_t size); -void chunk_dalloc_arena(arena_t *arena, void *chunk, size_t size, - bool zeroed); -bool chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind); -void chunk_dalloc_wrapper(arena_t *arena, chunk_dalloc_t *chunk_dalloc, +void chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, + void *chunk, size_t size); +void chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, + void *chunk, size_t size, bool zeroed); +void chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size); bool chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length); -bool chunk_purge_default(void *chunk, size_t offset, size_t length, - unsigned arena_ind); -bool chunk_purge_wrapper(arena_t *arena, chunk_purge_t *chunk_purge, - void *chunk, size_t offset, size_t length); +bool chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, + void *chunk, size_t size, size_t offset, size_t length); bool chunk_boot(void); void chunk_prefork(void); void chunk_postfork_parent(void); diff --git a/include/jemalloc/internal/chunk_mmap.h b/include/jemalloc/internal/chunk_mmap.h index c5d5c6c..e81dc3a 100644 --- a/include/jemalloc/internal/chunk_mmap.h +++ b/include/jemalloc/internal/chunk_mmap.h @@ -9,8 +9,6 @@ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS -bool pages_purge(void *addr, size_t length); - void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero); bool chunk_dalloc_mmap(void *chunk, size_t size); diff --git a/include/jemalloc/internal/extent.h b/include/jemalloc/internal/extent.h index 3751adc..b2ac2b6 100644 --- a/include/jemalloc/internal/extent.h +++ b/include/jemalloc/internal/extent.h @@ -19,6 +19,13 @@ struct extent_node_s { size_t en_size; /* + * True if physical memory is committed to the extent, whether + * explicitly or implicitly as on a system that overcommits and + * satisfies physical mamory needs on demand via soft page faults. + */ + bool en_committed; + + /* * The zeroed flag is used by chunk recycling code to track whether * memory is zero-filled. */ @@ -66,17 +73,19 @@ rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t) arena_t *extent_node_arena_get(const extent_node_t *node); void *extent_node_addr_get(const extent_node_t *node); size_t extent_node_size_get(const extent_node_t *node); +bool extent_node_committed_get(const extent_node_t *node); bool extent_node_zeroed_get(const extent_node_t *node); bool extent_node_achunk_get(const extent_node_t *node); prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node); void extent_node_arena_set(extent_node_t *node, arena_t *arena); void extent_node_addr_set(extent_node_t *node, void *addr); void extent_node_size_set(extent_node_t *node, size_t size); +void extent_node_committed_set(extent_node_t *node, bool committed); void extent_node_zeroed_set(extent_node_t *node, bool zeroed); void extent_node_achunk_set(extent_node_t *node, bool achunk); void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx); void extent_node_init(extent_node_t *node, arena_t *arena, void *addr, - size_t size, bool zeroed); + size_t size, bool committed, bool zeroed); void extent_node_dirty_linkage_init(extent_node_t *node); void extent_node_dirty_insert(extent_node_t *node, arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty); @@ -106,6 +115,13 @@ extent_node_size_get(const extent_node_t *node) } JEMALLOC_INLINE bool +extent_node_committed_get(const extent_node_t *node) +{ + + return (node->en_committed); +} + +JEMALLOC_INLINE bool extent_node_zeroed_get(const extent_node_t *node) { @@ -148,6 +164,13 @@ extent_node_size_set(extent_node_t *node, size_t size) } JEMALLOC_INLINE void +extent_node_committed_set(extent_node_t *node, bool committed) +{ + + node->en_committed = committed; +} + +JEMALLOC_INLINE void extent_node_zeroed_set(extent_node_t *node, bool zeroed) { @@ -170,12 +193,13 @@ extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx) JEMALLOC_INLINE void extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size, - bool zeroed) + bool committed, bool zeroed) { extent_node_arena_set(node, arena); extent_node_addr_set(node, addr); extent_node_size_set(node, size); + extent_node_committed_set(node, committed); extent_node_zeroed_set(node, zeroed); extent_node_achunk_set(node, false); if (config_prof) diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in index 496997d..7a137b6 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -367,6 +367,7 @@ typedef unsigned index_t; #include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/base.h" #include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/pages.h" #include "jemalloc/internal/chunk.h" #include "jemalloc/internal/huge.h" #include "jemalloc/internal/tcache.h" @@ -398,6 +399,7 @@ typedef unsigned index_t; #undef JEMALLOC_ARENA_STRUCTS_B #include "jemalloc/internal/base.h" #include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/pages.h" #include "jemalloc/internal/chunk.h" #include "jemalloc/internal/huge.h" #include "jemalloc/internal/tcache.h" @@ -477,6 +479,7 @@ void jemalloc_postfork_child(void); #include "jemalloc/internal/arena.h" #include "jemalloc/internal/base.h" #include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/pages.h" #include "jemalloc/internal/chunk.h" #include "jemalloc/internal/huge.h" #include "jemalloc/internal/tcache.h" @@ -503,6 +506,7 @@ void jemalloc_postfork_child(void); #include "jemalloc/internal/extent.h" #include "jemalloc/internal/base.h" #include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/pages.h" #include "jemalloc/internal/chunk.h" #include "jemalloc/internal/huge.h" diff --git a/include/jemalloc/internal/pages.h b/include/jemalloc/internal/pages.h new file mode 100644 index 0000000..da7eb96 --- /dev/null +++ b/include/jemalloc/internal/pages.h @@ -0,0 +1,26 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void *pages_map(void *addr, size_t size); +void pages_unmap(void *addr, size_t size); +void *pages_trim(void *addr, size_t alloc_size, size_t leadsize, + size_t size); +bool pages_commit(void *addr, size_t size); +bool pages_decommit(void *addr, size_t size); +bool pages_purge(void *addr, size_t size); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt index aaf6978..0e6216f 100644 --- a/include/jemalloc/internal/private_symbols.txt +++ b/include/jemalloc/internal/private_symbols.txt @@ -132,14 +132,12 @@ bt_init buferror chunk_alloc_cache chunk_alloc_base -chunk_alloc_default chunk_alloc_dss chunk_alloc_mmap chunk_alloc_wrapper chunk_boot chunk_dalloc_arena chunk_dalloc_cache -chunk_dalloc_default chunk_dalloc_mmap chunk_dalloc_wrapper chunk_deregister @@ -149,6 +147,9 @@ chunk_dss_postfork_parent chunk_dss_prec_get chunk_dss_prec_set chunk_dss_prefork +chunk_hooks_default +chunk_hooks_get +chunk_hooks_set chunk_in_dss chunk_lookup chunk_npages @@ -156,9 +157,7 @@ chunk_postfork_child chunk_postfork_parent chunk_prefork chunk_purge_arena -chunk_purge_default chunk_purge_wrapper -chunk_record chunk_register chunks_rtree chunksize @@ -347,7 +346,12 @@ opt_utrace opt_xmalloc opt_zero p2rz +pages_commit +pages_decommit +pages_map pages_purge +pages_trim +pages_unmap pow2_ceil prof_active_get prof_active_get_unlocked diff --git a/include/jemalloc/jemalloc_typedefs.h.in b/include/jemalloc/jemalloc_typedefs.h.in index d4b4690..26eb9ad 100644 --- a/include/jemalloc/jemalloc_typedefs.h.in +++ b/include/jemalloc/jemalloc_typedefs.h.in @@ -1,3 +1,55 @@ +/* + * void * + * chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero, + * unsigned arena_ind); + */ typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, unsigned); + +/* + * bool + * chunk_dalloc(void *chunk, size_t size, unsigned arena_ind); + */ typedef bool (chunk_dalloc_t)(void *, size_t, unsigned); -typedef bool (chunk_purge_t)(void *, size_t, size_t, unsigned); + +/* + * bool + * chunk_commit(void *chunk, size_t size, unsigned arena_ind); + */ +typedef bool (chunk_commit_t)(void *, size_t, unsigned); + +/* + * bool + * chunk_decommit(void *chunk, size_t size, unsigned arena_ind); + */ +typedef bool (chunk_decommit_t)(void *, size_t, unsigned); + +/* + * bool + * chunk_purge(void *chunk, size_t size, size_t offset, size_t length, + * unsigned arena_ind); + */ +typedef bool (chunk_purge_t)(void *, size_t, size_t, size_t, unsigned); + +/* + * bool + * chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b, + * bool committed, unsigned arena_ind); + */ +typedef bool (chunk_split_t)(void *, size_t, size_t, size_t, bool, unsigned); + +/* + * bool + * chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, + * bool committed, unsigned arena_ind); + */ +typedef bool (chunk_merge_t)(void *, size_t, void *, size_t, bool, unsigned); + +typedef struct { + chunk_alloc_t *alloc; + chunk_dalloc_t *dalloc; + chunk_commit_t *commit; + chunk_decommit_t *decommit; + chunk_purge_t *purge; + chunk_split_t *split; + chunk_merge_t *merge; +} chunk_hooks_t; |