summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2017-03-01 03:09:23 (GMT)
committerJason Evans <jasone@canonware.com>2017-03-01 03:09:23 (GMT)
commit04380e79f1e2428bd0ad000bbc6e3d2dfc6b66a5 (patch)
tree85f88607465908a7b66ad70d4caca69a0836189f /include
parentf1f76357313e7dcad7262f17a48ff0a2e005fcdc (diff)
parent700253e1f2f40d4a74e361fa1e688986c361dba4 (diff)
downloadjemalloc-4.5.0.zip
jemalloc-4.5.0.tar.gz
jemalloc-4.5.0.tar.bz2
Merge branch 'rc-4.5.0'4.5.0
Diffstat (limited to 'include')
-rw-r--r--include/jemalloc/internal/arena.h1
-rw-r--r--include/jemalloc/internal/chunk.h4
-rw-r--r--include/jemalloc/internal/extent.h5
-rw-r--r--include/jemalloc/internal/jemalloc_internal.h.in8
-rw-r--r--include/jemalloc/internal/jemalloc_internal_defs.h.in12
-rw-r--r--include/jemalloc/internal/mb.h2
-rw-r--r--include/jemalloc/internal/mutex.h12
-rw-r--r--include/jemalloc/internal/private_symbols.txt10
-rw-r--r--include/jemalloc/internal/tcache.h3
-rw-r--r--include/jemalloc/internal/tsd.h7
-rw-r--r--include/jemalloc/internal/witness.h72
11 files changed, 99 insertions, 37 deletions
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index ce4e602..119e3a5 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -506,6 +506,7 @@ static const size_t large_pad =
#endif
;
+extern bool opt_thp;
extern purge_mode_t opt_purge;
extern const char *purge_mode_names[];
extern ssize_t opt_lg_dirty_mult;
diff --git a/include/jemalloc/internal/chunk.h b/include/jemalloc/internal/chunk.h
index 50b9904..55df9ac 100644
--- a/include/jemalloc/internal/chunk.h
+++ b/include/jemalloc/internal/chunk.h
@@ -52,8 +52,8 @@ chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena);
chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
const chunk_hooks_t *chunk_hooks);
-bool chunk_register(tsdn_t *tsdn, const void *chunk,
- const extent_node_t *node);
+bool chunk_register(const void *chunk, const extent_node_t *node,
+ bool *gdump);
void chunk_deregister(const void *chunk, const extent_node_t *node);
void *chunk_alloc_base(size_t size);
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
diff --git a/include/jemalloc/internal/extent.h b/include/jemalloc/internal/extent.h
index 168ffe6..fc77f9f 100644
--- a/include/jemalloc/internal/extent.h
+++ b/include/jemalloc/internal/extent.h
@@ -75,6 +75,11 @@ typedef rb_tree(extent_node_t) extent_tree_t;
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
+#ifdef JEMALLOC_JET
+size_t extent_size_quantize_floor(size_t size);
+#endif
+size_t extent_size_quantize_ceil(size_t size);
+
rb_proto(, extent_tree_szsnad_, extent_tree_t, extent_node_t)
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index e7ace7d..e3b499a 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -99,6 +99,13 @@ static const bool config_tcache =
false
#endif
;
+static const bool config_thp =
+#ifdef JEMALLOC_THP
+ true
+#else
+ false
+#endif
+ ;
static const bool config_tls =
#ifdef JEMALLOC_TLS
true
@@ -158,7 +165,6 @@ static const bool config_cache_oblivious =
#include <mach/mach_error.h>
#include <mach/mach_init.h>
#include <mach/vm_map.h>
-#include <malloc/malloc.h>
#endif
#include "jemalloc/internal/ph.h"
diff --git a/include/jemalloc/internal/jemalloc_internal_defs.h.in b/include/jemalloc/internal/jemalloc_internal_defs.h.in
index def4ba5..7c88b0d 100644
--- a/include/jemalloc/internal/jemalloc_internal_defs.h.in
+++ b/include/jemalloc/internal/jemalloc_internal_defs.h.in
@@ -239,7 +239,6 @@
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
#undef JEMALLOC_ZONE
-#undef JEMALLOC_ZONE_VERSION
/*
* Methods for determining whether the OS overcommits.
@@ -254,6 +253,12 @@
#undef JEMALLOC_HAVE_MADVISE
/*
+ * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
+ * arguments to madvise(2).
+ */
+#undef JEMALLOC_HAVE_MADVISE_HUGE
+
+/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
@@ -265,10 +270,7 @@
#undef JEMALLOC_PURGE_MADVISE_FREE
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
-/*
- * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
- * arguments to madvise(2).
- */
+/* Defined if transparent huge page support is enabled. */
#undef JEMALLOC_THP
/* Define if operating system has alloca.h header. */
diff --git a/include/jemalloc/internal/mb.h b/include/jemalloc/internal/mb.h
index 5384728..e58da5c 100644
--- a/include/jemalloc/internal/mb.h
+++ b/include/jemalloc/internal/mb.h
@@ -76,7 +76,7 @@ mb_write(void)
: "memory" /* Clobbers. */
);
}
-#elif defined(__sparc64__)
+#elif defined(__sparc__) && defined(__arch64__)
JEMALLOC_INLINE void
mb_write(void)
{
diff --git a/include/jemalloc/internal/mutex.h b/include/jemalloc/internal/mutex.h
index b442d2d..2b4b1c3 100644
--- a/include/jemalloc/internal/mutex.h
+++ b/include/jemalloc/internal/mutex.h
@@ -85,8 +85,8 @@ JEMALLOC_INLINE void
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
+ witness_assert_not_owner(tsdn, &mutex->witness);
if (isthreaded) {
- witness_assert_not_owner(tsdn, &mutex->witness);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
AcquireSRWLockExclusive(&mutex->lock);
@@ -100,16 +100,16 @@ malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex)
#else
pthread_mutex_lock(&mutex->lock);
#endif
- witness_lock(tsdn, &mutex->witness);
}
+ witness_lock(tsdn, &mutex->witness);
}
JEMALLOC_INLINE void
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
+ witness_unlock(tsdn, &mutex->witness);
if (isthreaded) {
- witness_unlock(tsdn, &mutex->witness);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
ReleaseSRWLockExclusive(&mutex->lock);
@@ -130,16 +130,14 @@ JEMALLOC_INLINE void
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
- if (isthreaded)
- witness_assert_owner(tsdn, &mutex->witness);
+ witness_assert_owner(tsdn, &mutex->witness);
}
JEMALLOC_INLINE void
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
- if (isthreaded)
- witness_assert_not_owner(tsdn, &mutex->witness);
+ witness_assert_not_owner(tsdn, &mutex->witness);
}
#endif
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index c1c6c40..60b57e5 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -223,6 +223,8 @@ extent_node_sn_get
extent_node_sn_set
extent_node_zeroed_get
extent_node_zeroed_set
+extent_size_quantize_ceil
+extent_size_quantize_floor
extent_tree_ad_destroy
extent_tree_ad_destroy_recurse
extent_tree_ad_empty
@@ -390,6 +392,7 @@ opt_quarantine
opt_redzone
opt_stats_print
opt_tcache
+opt_thp
opt_utrace
opt_xmalloc
opt_zero
@@ -529,6 +532,9 @@ tcache_flush
tcache_get
tcache_get_hard
tcache_maxclass
+tcache_postfork_child
+tcache_postfork_parent
+tcache_prefork
tcache_salloc
tcache_stats_merge
tcaches
@@ -612,14 +618,16 @@ valgrind_freelike_block
valgrind_make_mem_defined
valgrind_make_mem_noaccess
valgrind_make_mem_undefined
+witness_assert_depth
+witness_assert_depth_to_rank
witness_assert_lockless
witness_assert_not_owner
witness_assert_owner
+witness_depth_error
witness_fork_cleanup
witness_init
witness_lock
witness_lock_error
-witness_lockless_error
witness_not_owner_error
witness_owner
witness_owner_error
diff --git a/include/jemalloc/internal/tcache.h b/include/jemalloc/internal/tcache.h
index 01ba062..5fe5ebf 100644
--- a/include/jemalloc/internal/tcache.h
+++ b/include/jemalloc/internal/tcache.h
@@ -149,6 +149,9 @@ bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
void tcaches_flush(tsd_t *tsd, unsigned ind);
void tcaches_destroy(tsd_t *tsd, unsigned ind);
bool tcache_boot(tsdn_t *tsdn);
+void tcache_prefork(tsdn_t *tsdn);
+void tcache_postfork_parent(tsdn_t *tsdn);
+void tcache_postfork_child(tsdn_t *tsdn);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/include/jemalloc/internal/tsd.h b/include/jemalloc/internal/tsd.h
index 9055aca..9f37433 100644
--- a/include/jemalloc/internal/tsd.h
+++ b/include/jemalloc/internal/tsd.h
@@ -479,13 +479,14 @@ a_name##tsd_wrapper_get(bool init) \
\
if (init && unlikely(wrapper == NULL)) { \
tsd_init_block_t block; \
- wrapper = tsd_init_check_recursion( \
- &a_name##tsd_init_head, &block); \
+ wrapper = (a_name##tsd_wrapper_t *) \
+ tsd_init_check_recursion(&a_name##tsd_init_head, \
+ &block); \
if (wrapper) \
return (wrapper); \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
- block.data = wrapper; \
+ block.data = (void *)wrapper; \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
diff --git a/include/jemalloc/internal/witness.h b/include/jemalloc/internal/witness.h
index cdf15d7..30d8c7e 100644
--- a/include/jemalloc/internal/witness.h
+++ b/include/jemalloc/internal/witness.h
@@ -12,21 +12,32 @@ typedef int witness_comp_t (const witness_t *, const witness_t *);
*/
#define WITNESS_RANK_OMIT 0U
+#define WITNESS_RANK_MIN 1U
+
#define WITNESS_RANK_INIT 1U
#define WITNESS_RANK_CTL 1U
-#define WITNESS_RANK_ARENAS 2U
+#define WITNESS_RANK_TCACHES 2U
+#define WITNESS_RANK_ARENAS 3U
+
+#define WITNESS_RANK_PROF_DUMP 4U
+#define WITNESS_RANK_PROF_BT2GCTX 5U
+#define WITNESS_RANK_PROF_TDATAS 6U
+#define WITNESS_RANK_PROF_TDATA 7U
+#define WITNESS_RANK_PROF_GCTX 8U
-#define WITNESS_RANK_PROF_DUMP 3U
-#define WITNESS_RANK_PROF_BT2GCTX 4U
-#define WITNESS_RANK_PROF_TDATAS 5U
-#define WITNESS_RANK_PROF_TDATA 6U
-#define WITNESS_RANK_PROF_GCTX 7U
+/*
+ * Used as an argument to witness_assert_depth_to_rank() in order to validate
+ * depth excluding non-core locks with lower ranks. Since the rank argument to
+ * witness_assert_depth_to_rank() is inclusive rather than exclusive, this
+ * definition can have the same value as the minimally ranked core lock.
+ */
+#define WITNESS_RANK_CORE 9U
-#define WITNESS_RANK_ARENA 8U
-#define WITNESS_RANK_ARENA_CHUNKS 9U
-#define WITNESS_RANK_ARENA_NODE_CACHE 10
+#define WITNESS_RANK_ARENA 9U
+#define WITNESS_RANK_ARENA_CHUNKS 10U
+#define WITNESS_RANK_ARENA_NODE_CACHE 11U
-#define WITNESS_RANK_BASE 11U
+#define WITNESS_RANK_BASE 12U
#define WITNESS_RANK_LEAF 0xffffffffU
#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF
@@ -91,10 +102,12 @@ extern witness_not_owner_error_t *witness_not_owner_error;
void witness_not_owner_error(const witness_t *witness);
#endif
#ifdef JEMALLOC_JET
-typedef void (witness_lockless_error_t)(const witness_list_t *);
-extern witness_lockless_error_t *witness_lockless_error;
+typedef void (witness_depth_error_t)(const witness_list_t *,
+ witness_rank_t rank_inclusive, unsigned depth);
+extern witness_depth_error_t *witness_depth_error;
#else
-void witness_lockless_error(const witness_list_t *witnesses);
+void witness_depth_error(const witness_list_t *witnesses,
+ witness_rank_t rank_inclusive, unsigned depth);
#endif
void witnesses_cleanup(tsd_t *tsd);
@@ -111,6 +124,9 @@ void witness_postfork_child(tsd_t *tsd);
bool witness_owner(tsd_t *tsd, const witness_t *witness);
void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness);
void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness);
+void witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive,
+ unsigned depth);
+void witness_assert_depth(tsdn_t *tsdn, unsigned depth);
void witness_assert_lockless(tsdn_t *tsdn);
void witness_lock(tsdn_t *tsdn, witness_t *witness);
void witness_unlock(tsdn_t *tsdn, witness_t *witness);
@@ -123,6 +139,8 @@ witness_owner(tsd_t *tsd, const witness_t *witness)
witness_list_t *witnesses;
witness_t *w;
+ cassert(config_debug);
+
witnesses = tsd_witnessesp_get(tsd);
ql_foreach(w, witnesses, link) {
if (w == witness)
@@ -175,9 +193,10 @@ witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness)
}
JEMALLOC_INLINE void
-witness_assert_lockless(tsdn_t *tsdn)
-{
+witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive,
+ unsigned depth) {
tsd_t *tsd;
+ unsigned d;
witness_list_t *witnesses;
witness_t *w;
@@ -188,10 +207,29 @@ witness_assert_lockless(tsdn_t *tsdn)
return;
tsd = tsdn_tsd(tsdn);
+ d = 0;
witnesses = tsd_witnessesp_get(tsd);
w = ql_last(witnesses, link);
- if (w != NULL)
- witness_lockless_error(witnesses);
+ if (w != NULL) {
+ ql_reverse_foreach(w, witnesses, link) {
+ if (w->rank < rank_inclusive) {
+ break;
+ }
+ d++;
+ }
+ }
+ if (d != depth)
+ witness_depth_error(witnesses, rank_inclusive, depth);
+}
+
+JEMALLOC_INLINE void
+witness_assert_depth(tsdn_t *tsdn, unsigned depth) {
+ witness_assert_depth_to_rank(tsdn, WITNESS_RANK_MIN, depth);
+}
+
+JEMALLOC_INLINE void
+witness_assert_lockless(tsdn_t *tsdn) {
+ witness_assert_depth(tsdn, 0);
}
JEMALLOC_INLINE void