summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorQi Wang <interwq@gwu.edu>2017-03-28 04:50:38 (GMT)
committerQi Wang <interwq@gmail.com>2017-04-07 16:55:14 (GMT)
commitfde3e20cc04db459f3c76134bc6dfb0ee5c422bb (patch)
treec5dd961498e734f52f02c40159ce0d8a52bb0af6
parenteeabdd246693fbf7c54e03ff8957889e63dc9a0c (diff)
downloadjemalloc-fde3e20cc04db459f3c76134bc6dfb0ee5c422bb.zip
jemalloc-fde3e20cc04db459f3c76134bc6dfb0ee5c422bb.tar.gz
jemalloc-fde3e20cc04db459f3c76134bc6dfb0ee5c422bb.tar.bz2
Integrate auto tcache into TSD.
The embedded tcache is initialized upon tsd initialization. The avail arrays for the tbins will be allocated / deallocated accordingly during init / cleanup. With this change, the pointer to the auto tcache will always be available, as long as we have access to the TSD. tcache_available() (called in tcache_get()) is provided to check if we should use tcache.
-rw-r--r--include/jemalloc/internal/arena_inlines_a.h10
-rw-r--r--include/jemalloc/internal/jemalloc_internal.h.in96
-rw-r--r--include/jemalloc/internal/private_symbols.txt8
-rw-r--r--include/jemalloc/internal/rtree_inlines.h7
-rw-r--r--include/jemalloc/internal/tcache_externs.h7
-rw-r--r--include/jemalloc/internal/tcache_inlines.h65
-rw-r--r--include/jemalloc/internal/tcache_structs.h6
-rw-r--r--include/jemalloc/internal/tcache_types.h3
-rw-r--r--include/jemalloc/internal/tsd_externs.h5
-rw-r--r--include/jemalloc/internal/tsd_structs.h7
-rw-r--r--include/jemalloc/internal/tsd_types.h12
-rw-r--r--src/ctl.c6
-rw-r--r--src/jemalloc.c16
-rw-r--r--src/tcache.c207
-rw-r--r--src/tsd.c5
-rw-r--r--test/unit/tsd.c6
16 files changed, 294 insertions, 172 deletions
diff --git a/include/jemalloc/internal/arena_inlines_a.h b/include/jemalloc/internal/arena_inlines_a.h
index e1c4765..cf92342 100644
--- a/include/jemalloc/internal/arena_inlines_a.h
+++ b/include/jemalloc/internal/arena_inlines_a.h
@@ -57,12 +57,10 @@ percpu_arena_update(tsd_t *tsd, unsigned cpu) {
/* Set new arena/tcache associations. */
arena_migrate(tsd, oldind, newind);
- if (config_tcache) {
- tcache_t *tcache = tsd_tcache_get(tsd);
- if (tcache) {
- tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
- newarena);
- }
+ tcache_t *tcache = tcache_get(tsd);
+ if (config_tcache && tcache) {
+ tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
+ newarena);
}
}
}
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index 04f91c0..449a4ab 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -488,23 +488,24 @@ extern size_t const index2size_tab[NSIZES];
*/
extern uint8_t const size2index_tab[];
-void *a0malloc(size_t size);
-void a0dalloc(void *ptr);
-void *bootstrap_malloc(size_t size);
-void *bootstrap_calloc(size_t num, size_t size);
-void bootstrap_free(void *ptr);
-void arena_set(unsigned ind, arena_t *arena);
-unsigned narenas_total_get(void);
-arena_t *arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
-arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
-arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
-void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
-void iarena_cleanup(tsd_t *tsd);
-void arena_cleanup(tsd_t *tsd);
-void arenas_tdata_cleanup(tsd_t *tsd);
-void jemalloc_prefork(void);
-void jemalloc_postfork_parent(void);
-void jemalloc_postfork_child(void);
+void *a0malloc(size_t size);
+void a0dalloc(void *ptr);
+void *bootstrap_malloc(size_t size);
+void *bootstrap_calloc(size_t num, size_t size);
+void bootstrap_free(void *ptr);
+void arena_set(unsigned ind, arena_t *arena);
+unsigned narenas_total_get(void);
+arena_t *arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
+arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
+arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
+void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
+void iarena_cleanup(tsd_t *tsd);
+void arena_cleanup(tsd_t *tsd);
+void arenas_tdata_cleanup(tsd_t *tsd);
+void jemalloc_prefork(void);
+void jemalloc_postfork_parent(void);
+void jemalloc_postfork_child(void);
+bool malloc_initialized(void);
#include "jemalloc/internal/nstime_externs.h"
#include "jemalloc/internal/ckh_externs.h"
@@ -559,6 +560,8 @@ arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind,
bool refresh_if_missing);
arena_t *arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing);
ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
+bool tcache_available(tsd_t *tsd);
+tcache_t *tcache_get(tsd_t *tsd);
malloc_cpuid_t malloc_getcpu(void);
unsigned percpu_arena_choose(void);
unsigned percpu_arena_ind_limit(void);
@@ -929,6 +932,38 @@ decay_ticker_get(tsd_t *tsd, unsigned ind) {
}
return &tdata->decay_ticker;
}
+
+JEMALLOC_ALWAYS_INLINE bool
+tcache_available(tsd_t *tsd) {
+ cassert(config_tcache);
+
+ /*
+ * Thread specific auto tcache might be unavailable if: 1) during tcache
+ * initialization, or 2) disabled through thread.tcache.enabled mallctl
+ * or config options. This check covers all cases.
+ */
+ if (likely(tsd_tcache_enabled_get(tsd) == tcache_enabled_true)) {
+ /* Associated arena == null implies tcache init in progress. */
+ if (tsd_tcachep_get(tsd)->arena != NULL) {
+ assert(tsd_tcachep_get(tsd)->tbins[0].avail != NULL);
+ }
+ return true;
+ }
+
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE tcache_t *
+tcache_get(tsd_t *tsd) {
+ if (!config_tcache) {
+ return NULL;
+ }
+ if (!tcache_available(tsd)) {
+ return NULL;
+ }
+
+ return tsd_tcachep_get(tsd);
+}
#endif
#include "jemalloc/internal/rtree_inlines.h"
@@ -959,9 +994,24 @@ arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
if (unlikely(ret == NULL)) {
ret = arena_choose_hard(tsd, internal);
+ assert(ret);
+ if (config_tcache && tcache_available(tsd)) {
+ tcache_t *tcache = tcache_get(tsd);
+ if (tcache->arena != NULL) {
+ /* See comments in tcache_data_init().*/
+ assert(tcache->arena ==
+ arena_get(tsd_tsdn(tsd), 0, false));
+ if (tcache->arena != ret) {
+ tcache_arena_reassociate(tsd_tsdn(tsd),
+ tcache, ret);
+ }
+ } else {
+ tcache_arena_associate(tsd_tsdn(tsd), tcache,
+ ret);
+ }
+ }
}
- assert(ret != NULL);
/*
* Note that for percpu arena, if the current arena is outside of the
* auto percpu arena range, (i.e. thread is assigned to a manually
@@ -1069,8 +1119,8 @@ iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
JEMALLOC_ALWAYS_INLINE void *
ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) {
- return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true),
- false, NULL, slow_path);
+ return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd), false,
+ NULL, slow_path);
}
JEMALLOC_ALWAYS_INLINE void *
@@ -1102,7 +1152,7 @@ ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
JEMALLOC_ALWAYS_INLINE void *
ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) {
return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
- tcache_get(tsd, true), false, NULL);
+ tcache_get(tsd), false, NULL);
}
JEMALLOC_ALWAYS_INLINE size_t
@@ -1127,7 +1177,7 @@ idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_internal,
JEMALLOC_ALWAYS_INLINE void
idalloc(tsd_t *tsd, void *ptr) {
- idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd, false), false, true);
+ idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd), false, true);
}
JEMALLOC_ALWAYS_INLINE void
@@ -1199,7 +1249,7 @@ JEMALLOC_ALWAYS_INLINE void *
iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
bool zero) {
return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, zero,
- tcache_get(tsd, true), NULL);
+ tcache_get(tsd), NULL);
}
JEMALLOC_ALWAYS_INLINE bool
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index 1cced60..e2bb059 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -274,6 +274,7 @@ lg_floor
lg_prof_sample
malloc_cprintf
malloc_getcpu
+malloc_initialized
malloc_mutex_prof_data_reset
malloc_mutex_assert_not_owner
malloc_mutex_assert_owner
@@ -293,7 +294,6 @@ malloc_tsd_boot1
malloc_tsd_cleanup_register
malloc_tsd_dalloc
malloc_tsd_malloc
-malloc_tsd_no_cleanup
malloc_vcprintf
malloc_vsnprintf
malloc_write
@@ -475,22 +475,23 @@ tcache_alloc_easy
tcache_alloc_large
tcache_alloc_small
tcache_alloc_small_hard
+tcache_arena_associate
tcache_arena_reassociate
tcache_bin_flush_large
tcache_bin_flush_small
tcache_bin_info
tcache_boot
tcache_cleanup
-tcache_create
+tcache_create_explicit
tcache_dalloc_large
tcache_dalloc_small
+tcache_data_init
tcache_enabled_get
tcache_enabled_set
tcache_event
tcache_event_hard
tcache_flush
tcache_get
-tcache_get_hard
tcache_maxclass
tcache_prefork
tcache_postfork_child
@@ -521,7 +522,6 @@ tsd_booted
tsd_booted_get
tsd_cleanup
tsd_cleanup_wrapper
-tsd_data_init
tsd_fetch
tsd_fetch_impl
tsd_get
diff --git a/include/jemalloc/internal/rtree_inlines.h b/include/jemalloc/internal/rtree_inlines.h
index bebe49e..ce03c57 100644
--- a/include/jemalloc/internal/rtree_inlines.h
+++ b/include/jemalloc/internal/rtree_inlines.h
@@ -321,13 +321,18 @@ rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
assert(!dependent || !init_missing);
uintptr_t leafkey = rtree_leafkey(key);
+ assert(leafkey != RTREE_LEAFKEY_INVALID);
+
#define RTREE_CACHE_CHECK(i) do { \
if (likely(rtree_ctx->cache[i].leafkey == leafkey)) { \
rtree_leaf_elm_t *leaf = rtree_ctx->cache[i].leaf; \
assert(leaf != NULL); \
if (i > 0) { \
/* Bubble up by one. */ \
- rtree_ctx->cache[i] = rtree_ctx->cache[i - 1]; \
+ rtree_ctx->cache[i].leafkey = \
+ rtree_ctx->cache[i - 1].leafkey; \
+ rtree_ctx->cache[i].leaf = \
+ rtree_ctx->cache[i - 1].leaf; \
rtree_ctx->cache[i - 1].leafkey = leafkey; \
rtree_ctx->cache[i - 1].leaf = leaf; \
} \
diff --git a/include/jemalloc/internal/tcache_externs.h b/include/jemalloc/internal/tcache_externs.h
index 8364303..75ff321 100644
--- a/include/jemalloc/internal/tcache_externs.h
+++ b/include/jemalloc/internal/tcache_externs.h
@@ -35,16 +35,19 @@ void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
unsigned rem, tcache_t *tcache);
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
arena_t *arena);
-tcache_t *tcache_get_hard(tsd_t *tsd);
-tcache_t *tcache_create(tsdn_t *tsdn, arena_t *arena);
+tcache_t *tcache_create_explicit(tsd_t *tsd);
void tcache_cleanup(tsd_t *tsd);
void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
void tcaches_flush(tsd_t *tsd, unsigned ind);
void tcaches_destroy(tsd_t *tsd, unsigned ind);
bool tcache_boot(tsdn_t *tsdn);
+void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
void tcache_prefork(tsdn_t *tsdn);
void tcache_postfork_parent(tsdn_t *tsdn);
void tcache_postfork_child(tsdn_t *tsdn);
+void tcache_flush(void);
+bool tsd_tcache_data_init(tsd_t *tsd);
+bool tsd_tcache_enabled_data_init(tsd_t *tsd);
#endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */
diff --git a/include/jemalloc/internal/tcache_inlines.h b/include/jemalloc/internal/tcache_inlines.h
index fd7e176..c366096 100644
--- a/include/jemalloc/internal/tcache_inlines.h
+++ b/include/jemalloc/internal/tcache_inlines.h
@@ -4,9 +4,9 @@
#ifndef JEMALLOC_ENABLE_INLINE
void tcache_event(tsd_t *tsd, tcache_t *tcache);
void tcache_flush(void);
-bool tcache_enabled_get(void);
-tcache_t *tcache_get(tsd_t *tsd, bool create);
-void tcache_enabled_set(bool enabled);
+bool tcache_enabled_get(tsd_t *tsd);
+tcache_t *tcache_get(tsd_t *tsd);
+void tcache_enabled_set(tsd_t *tsd, bool enabled);
void *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success);
void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
size_t size, szind_t ind, bool zero, bool slow_path);
@@ -20,68 +20,32 @@ tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
-JEMALLOC_INLINE void
-tcache_flush(void) {
- tsd_t *tsd;
-
- cassert(config_tcache);
-
- tsd = tsd_fetch();
- tcache_cleanup(tsd);
-}
-
JEMALLOC_INLINE bool
-tcache_enabled_get(void) {
- tsd_t *tsd;
+tcache_enabled_get(tsd_t *tsd) {
tcache_enabled_t tcache_enabled;
cassert(config_tcache);
- tsd = tsd_fetch();
tcache_enabled = tsd_tcache_enabled_get(tsd);
- if (tcache_enabled == tcache_enabled_default) {
- tcache_enabled = (tcache_enabled_t)opt_tcache;
- tsd_tcache_enabled_set(tsd, tcache_enabled);
- }
+ assert(tcache_enabled != tcache_enabled_default);
return (bool)tcache_enabled;
}
JEMALLOC_INLINE void
-tcache_enabled_set(bool enabled) {
- tsd_t *tsd;
- tcache_enabled_t tcache_enabled;
-
+tcache_enabled_set(tsd_t *tsd, bool enabled) {
cassert(config_tcache);
- tsd = tsd_fetch();
-
- tcache_enabled = (tcache_enabled_t)enabled;
- tsd_tcache_enabled_set(tsd, tcache_enabled);
+ tcache_enabled_t old = tsd_tcache_enabled_get(tsd);
- if (!enabled) {
+ if ((old != tcache_enabled_true) && enabled) {
+ tsd_tcache_data_init(tsd);
+ } else if ((old == tcache_enabled_true) && !enabled) {
tcache_cleanup(tsd);
}
-}
-
-JEMALLOC_ALWAYS_INLINE tcache_t *
-tcache_get(tsd_t *tsd, bool create) {
- tcache_t *tcache;
-
- if (!config_tcache) {
- return NULL;
- }
-
- tcache = tsd_tcache_get(tsd);
- if (!create) {
- return tcache;
- }
- if (unlikely(tcache == NULL) && tsd_nominal(tsd)) {
- tcache = tcache_get_hard(tsd);
- tsd_tcache_set(tsd, tcache);
- }
-
- return tcache;
+ /* Commit the state last. Above calls check current state. */
+ tcache_enabled_t tcache_enabled = (tcache_enabled_t)enabled;
+ tsd_tcache_enabled_set(tsd, tcache_enabled);
}
JEMALLOC_ALWAYS_INLINE void
@@ -300,8 +264,7 @@ JEMALLOC_ALWAYS_INLINE tcache_t *
tcaches_get(tsd_t *tsd, unsigned ind) {
tcaches_t *elm = &tcaches[ind];
if (unlikely(elm->tcache == NULL)) {
- elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd,
- NULL));
+ elm->tcache = tcache_create_explicit(tsd);
}
return elm->tcache;
}
diff --git a/include/jemalloc/internal/tcache_structs.h b/include/jemalloc/internal/tcache_structs.h
index a9b7031..c9c05cd 100644
--- a/include/jemalloc/internal/tcache_structs.h
+++ b/include/jemalloc/internal/tcache_structs.h
@@ -36,13 +36,17 @@ struct tcache_s {
ticker_t gc_ticker; /* Drives incremental GC. */
szind_t next_gc_bin; /* Next bin to GC. */
arena_t *arena; /* Associated arena. */
- tcache_bin_t tbins[1]; /* Dynamically sized. */
/*
* The pointer stacks associated with tbins follow as a contiguous
* array. During tcache initialization, the avail pointer in each
* element of tbins is initialized to point to the proper offset within
* this array.
*/
+#ifdef JEMALLOC_TCACHE
+ tcache_bin_t tbins[NSIZES];
+#else
+ tcache_bin_t tbins[0];
+#endif
};
/* Linkage for list of available (previously used) explicit tcache IDs. */
diff --git a/include/jemalloc/internal/tcache_types.h b/include/jemalloc/internal/tcache_types.h
index 2d396bf..8624ac2 100644
--- a/include/jemalloc/internal/tcache_types.h
+++ b/include/jemalloc/internal/tcache_types.h
@@ -47,4 +47,7 @@ typedef struct tcaches_s tcaches_t;
#define TCACHE_GC_INCR \
((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
+/* Used in TSD static initializer only. Real init in tcache_data_init(). */
+#define TCACHE_ZERO_INITIALIZER {{NULL}}
+
#endif /* JEMALLOC_INTERNAL_TCACHE_TYPES_H */
diff --git a/include/jemalloc/internal/tsd_externs.h b/include/jemalloc/internal/tsd_externs.h
index 9b88a56..d15fd59 100644
--- a/include/jemalloc/internal/tsd_externs.h
+++ b/include/jemalloc/internal/tsd_externs.h
@@ -3,7 +3,6 @@
void *malloc_tsd_malloc(size_t size);
void malloc_tsd_dalloc(void *wrapper);
-void malloc_tsd_no_cleanup(void *arg);
void malloc_tsd_cleanup_register(bool (*f)(void));
tsd_t *malloc_tsd_boot0(void);
void malloc_tsd_boot1(void);
@@ -13,7 +12,7 @@ void *tsd_init_check_recursion(tsd_init_head_t *head,
tsd_init_block_t *block);
void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
#endif
-void tsd_cleanup(void *arg);
-bool tsd_data_init(void *arg);
+bool tsd_data_init(void *arg);
+void tsd_cleanup(void *arg);
#endif /* JEMALLOC_INTERNAL_TSD_EXTERNS_H */
diff --git a/include/jemalloc/internal/tsd_structs.h b/include/jemalloc/internal/tsd_structs.h
index b4ac09f..d399563 100644
--- a/include/jemalloc/internal/tsd_structs.h
+++ b/include/jemalloc/internal/tsd_structs.h
@@ -16,7 +16,7 @@ struct tsd_init_head_s {
#define MALLOC_TSD \
/* O(name, type, [gs]et, init, cleanup) */ \
- O(tcache, tcache_t *, yes, no, yes) \
+ O(tcache, tcache_t, yes, no, yes) \
O(thread_allocated, uint64_t, yes, no, no) \
O(thread_deallocated, uint64_t, yes, no, no) \
O(prof_tdata, prof_tdata_t *, yes, no, yes) \
@@ -26,7 +26,7 @@ struct tsd_init_head_s {
O(narenas_tdata, unsigned, yes, no, no) \
O(arenas_tdata_bypass, bool, no, no, no) \
O(tcache_enabled, tcache_enabled_t, \
- yes, no, no) \
+ yes, yes, no) \
O(rtree_ctx, rtree_ctx_t, no, yes, no) \
O(witnesses, witness_list_t, no, no, yes) \
O(rtree_leaf_elm_witnesses, rtree_leaf_elm_witness_tsd_t, \
@@ -35,7 +35,7 @@ struct tsd_init_head_s {
#define TSD_INITIALIZER { \
tsd_state_uninitialized, \
- NULL, \
+ TCACHE_ZERO_INITIALIZER, \
0, \
0, \
NULL, \
@@ -69,6 +69,7 @@ struct tsdn_s {
};
static const tsd_t tsd_initializer = TSD_INITIALIZER;
+UNUSED static const void *malloc_tsd_no_cleanup = (void *)0;
malloc_tsd_types(, tsd_t)
diff --git a/include/jemalloc/internal/tsd_types.h b/include/jemalloc/internal/tsd_types.h
index 195b649..29c6378 100644
--- a/include/jemalloc/internal/tsd_types.h
+++ b/include/jemalloc/internal/tsd_types.h
@@ -357,8 +357,10 @@ a_name##tsd_boot1(void) { \
" TSD for "#a_name"\n"); \
abort(); \
} \
- memcpy(wrapper, &a_name##tsd_boot_wrapper, \
- sizeof(a_name##tsd_wrapper_t)); \
+ a_name##tsd_boot_wrapper.initialized = false; \
+ a_cleanup(&a_name##tsd_boot_wrapper.val); \
+ wrapper->initialized = false; \
+ wrapper->val = a_initializer; \
a_name##tsd_wrapper_set(wrapper); \
} \
a_attr bool \
@@ -487,8 +489,10 @@ a_name##tsd_boot1(void) { \
" TSD for "#a_name"\n"); \
abort(); \
} \
- memcpy(wrapper, &a_name##tsd_boot_wrapper, \
- sizeof(a_name##tsd_wrapper_t)); \
+ a_name##tsd_boot_wrapper.initialized = false; \
+ a_cleanup(&a_name##tsd_boot_wrapper.val); \
+ wrapper->initialized = false; \
+ wrapper->val = a_initializer; \
a_name##tsd_wrapper_set(wrapper); \
} \
a_attr bool \
diff --git a/src/ctl.c b/src/ctl.c
index 36f5634..a59a741 100644
--- a/src/ctl.c
+++ b/src/ctl.c
@@ -1532,7 +1532,7 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
/* Set new arena/tcache associations. */
arena_migrate(tsd, oldind, newind);
if (config_tcache) {
- tcache_t *tcache = tsd_tcache_get(tsd);
+ tcache_t *tcache = tsd_tcachep_get(tsd);
if (tcache != NULL) {
tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
newarena);
@@ -1564,13 +1564,13 @@ thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
return ENOENT;
}
- oldval = tcache_enabled_get();
+ oldval = tcache_enabled_get(tsd);
if (newp != NULL) {
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
}
- tcache_enabled_set(*(bool *)newp);
+ tcache_enabled_set(tsd, *(bool *)newp);
}
READ(oldval, bool);
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 94ae030..9d66f7f 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -275,7 +275,7 @@ static bool malloc_init_hard(void);
* Begin miscellaneous support functions.
*/
-JEMALLOC_ALWAYS_INLINE_C bool
+bool
malloc_initialized(void) {
return (malloc_init_state == malloc_init_initialized);
}
@@ -1536,7 +1536,7 @@ imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
/* Fill in the tcache. */
if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) {
- tcache = tcache_get(tsd, true);
+ tcache = tcache_get(tsd);
} else if (dopts->tcache_ind == TCACHE_IND_NONE) {
tcache = NULL;
} else {
@@ -2056,7 +2056,7 @@ je_realloc(void *ptr, size_t size) {
/* realloc(ptr, 0) is equivalent to free(ptr). */
UTRACE(ptr, 0, 0);
tsd = tsd_fetch();
- ifree(tsd, ptr, tcache_get(tsd, false), true);
+ ifree(tsd, ptr, tcache_get(tsd), true);
return NULL;
}
size = 1;
@@ -2113,9 +2113,9 @@ je_free(void *ptr) {
tsd_t *tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
if (likely(!malloc_slow)) {
- ifree(tsd, ptr, tcache_get(tsd, false), false);
+ ifree(tsd, ptr, tcache_get(tsd), false);
} else {
- ifree(tsd, ptr, tcache_get(tsd, false), true);
+ ifree(tsd, ptr, tcache_get(tsd), true);
}
witness_assert_lockless(tsd_tsdn(tsd));
}
@@ -2393,7 +2393,7 @@ je_rallocx(void *ptr, size_t size, int flags) {
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
}
} else {
- tcache = tcache_get(tsd, true);
+ tcache = tcache_get(tsd);
}
old_usize = isalloc(tsd_tsdn(tsd), ptr);
@@ -2605,7 +2605,7 @@ je_dallocx(void *ptr, int flags) {
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
}
} else {
- tcache = tcache_get(tsd, false);
+ tcache = tcache_get(tsd);
}
UTRACE(ptr, 0, 0);
@@ -2652,7 +2652,7 @@ je_sdallocx(void *ptr, size_t size, int flags) {
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
}
} else {
- tcache = tcache_get(tsd, false);
+ tcache = tcache_get(tsd);
}
UTRACE(ptr, 0, 0);
diff --git a/src/tcache.c b/src/tcache.c
index 6057c89..aa2917b 100644
--- a/src/tcache.c
+++ b/src/tcache.c
@@ -4,7 +4,13 @@
/******************************************************************************/
/* Data. */
-bool opt_tcache = true;
+bool opt_tcache =
+#ifdef JEMALLOC_TCACHE
+ true
+#else
+ false
+#endif
+ ;
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
tcache_bin_info_t *tcache_bin_info;
@@ -78,6 +84,7 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
tcache_bin_t *tbin, szind_t binind, bool *tcache_success) {
void *ret;
+ assert(tcache->arena);
arena_tcache_fill_small(tsdn, arena, tbin, binind, config_prof ?
tcache->prof_accumbytes : 0);
if (config_prof) {
@@ -271,9 +278,11 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
}
}
-static void
+void
tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
+ assert(tcache->arena == NULL);
tcache->arena = arena;
+
if (config_stats) {
/* Link into list of extant tcaches. */
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
@@ -286,6 +295,7 @@ tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
static void
tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) {
arena_t *arena = tcache->arena;
+ assert(arena);
if (config_stats) {
/* Unlink from list of extant tcaches. */
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
@@ -304,6 +314,7 @@ tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) {
tcache_stats_merge(tsdn, tcache, arena);
malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
}
+ tcache->arena = NULL;
}
void
@@ -312,30 +323,101 @@ tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
tcache_arena_associate(tsdn, tcache, arena);
}
-tcache_t *
-tcache_get_hard(tsd_t *tsd) {
- arena_t *arena;
+bool
+tsd_tcache_enabled_data_init(tsd_t *tsd) {
+ /* Called upon tsd initialization. */
+ tsd_tcache_enabled_set(tsd, (tcache_enabled_t)opt_tcache);
+ if (opt_tcache) {
+ /* Trigger tcache init. */
+ tsd_tcache_data_init(tsd);
+ }
- if (!tcache_enabled_get()) {
- if (tsd_nominal(tsd)) {
- tcache_enabled_set(false); /* Memoize. */
- }
- return NULL;
+ return false;
+}
+
+/* Initialize auto tcache (embedded in TSD). */
+static void
+tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) {
+ memset(&tcache->link, 0, sizeof(ql_elm(tcache_t)));
+ tcache->prof_accumbytes = 0;
+ tcache->next_gc_bin = 0;
+ tcache->arena = NULL;
+
+ ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
+
+ size_t stack_offset = 0;
+ assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
+ memset(tcache->tbins, 0, sizeof(tcache_bin_t) * nhbins);
+ for (unsigned i = 0; i < nhbins; i++) {
+ tcache->tbins[i].lg_fill_div = 1;
+ stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
+ /*
+ * avail points past the available space. Allocations will
+ * access the slots toward higher addresses (for the benefit of
+ * prefetch).
+ */
+ tcache->tbins[i].avail = (void **)((uintptr_t)avail_stack +
+ (uintptr_t)stack_offset);
}
- arena = arena_choose(tsd, NULL);
- if (unlikely(arena == NULL)) {
- return NULL;
+ assert(stack_offset == stack_nelms * sizeof(void *));
+}
+
+/* Initialize auto tcache (embedded in TSD). */
+bool
+tsd_tcache_data_init(tsd_t *tsd) {
+ if (!config_tcache) {
+ return false;
+ }
+
+ tcache_t *tcache = &tsd->tcache;
+ assert(tcache->tbins[0].avail == NULL);
+ size_t size = stack_nelms * sizeof(void *);
+ /* Avoid false cacheline sharing. */
+ size = sa2u(size, CACHELINE);
+
+ /* Manually initialize rcache as we may need it for allocation. */
+ tsd_rtree_ctx_data_init(tsd);
+
+ void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true,
+ NULL, true, arena_get(TSDN_NULL, 0, true));
+ if (avail_array == NULL) {
+ return true;
}
- return tcache_create(tsd_tsdn(tsd), arena);
+
+ tcache_init(tsd, tcache, avail_array);
+ /*
+ * Initialization is a bit tricky here. After malloc init is done, all
+ * threads can rely on arena_choose and associate tcache accordingly.
+ * However, the thread that does actual malloc bootstrapping relies on
+ * functional tsd, and it can only rely on a0. In that case, we
+ * associate its tcache to a0 temporarily, and later on
+ * arena_choose_hard() will re-associate properly.
+ */
+ tcache->arena = NULL;
+ arena_t *arena;
+ if (!malloc_initialized()) {
+ /* If in initialization, assign to a0. */
+ arena = arena_get(tsd_tsdn(tsd), 0, false);
+ tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
+ } else {
+ arena = arena_choose(tsd, NULL);
+ /* This may happen if thread.tcache.enabled is used. */
+ if (tcache->arena == NULL) {
+ tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
+ }
+ }
+ assert(arena == tcache->arena);
+
+ return false;
}
+/* Created manual tcache for tcache.create mallctl. */
tcache_t *
-tcache_create(tsdn_t *tsdn, arena_t *arena) {
+tcache_create_explicit(tsd_t *tsd) {
tcache_t *tcache;
size_t size, stack_offset;
- unsigned i;
- size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
+ size = sizeof(tcache_t);
/* Naturally align the pointer stacks. */
size = PTR_CEILING(size);
stack_offset = size;
@@ -343,34 +425,21 @@ tcache_create(tsdn_t *tsdn, arena_t *arena) {
/* Avoid false cacheline sharing. */
size = sa2u(size, CACHELINE);
- tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true,
+ tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true,
arena_get(TSDN_NULL, 0, true));
if (tcache == NULL) {
return NULL;
}
- tcache_arena_associate(tsdn, tcache, arena);
-
- ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
-
- assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
- for (i = 0; i < nhbins; i++) {
- tcache->tbins[i].lg_fill_div = 1;
- stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
- /*
- * avail points past the available space. Allocations will
- * access the slots toward higher addresses (for the benefit of
- * prefetch).
- */
- tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
- (uintptr_t)stack_offset);
- }
+ tcache_init(tsd, tcache,
+ (void *)((uintptr_t)tcache + (uintptr_t)stack_offset));
+ tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL));
return tcache;
}
static void
-tcache_destroy(tsd_t *tsd, tcache_t *tcache) {
+tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
unsigned i;
for (i = 0; i < NBINS; i++) {
@@ -381,7 +450,6 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache) {
assert(tbin->tstats.nrequests == 0);
}
}
-
for (; i < nhbins; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
@@ -391,32 +459,60 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache) {
}
}
- /*
- * Get arena after flushing -- when using percpu arena, the associated
- * arena could change during flush.
- */
- arena_t *arena = arena_choose(tsd, NULL);
- tcache_arena_dissociate(tsd_tsdn(tsd), tcache);
-
- if (config_prof && tcache->prof_accumbytes > 0 &&
+ arena_t *arena = tcache->arena;
+ if (config_prof && arena && tcache->prof_accumbytes > 0 &&
arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes)) {
prof_idump(tsd_tsdn(tsd));
}
+}
+
+void
+tcache_flush(void) {
+ tsd_t *tsd;
- idalloctm(tsd_tsdn(tsd), tcache, NULL, true, true);
+ cassert(config_tcache);
+
+ tsd = tsd_fetch();
+ tcache_flush_cache(tsd, tsd_tcachep_get(tsd));
+}
+
+static void
+tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) {
+ tcache_flush_cache(tsd, tcache);
+ tcache_arena_dissociate(tsd_tsdn(tsd), tcache);
+
+ if (tsd_tcache) {
+ /* Release the avail array for the TSD embedded auto tcache. */
+ void *avail_array = (void *)((uintptr_t)tcache->tbins[0].avail -
+ (uintptr_t)tcache_bin_info[0].ncached_max * sizeof(void *));
+ idalloctm(tsd_tsdn(tsd), avail_array, NULL, true, true);
+ } else {
+ /* Release both the tcache struct and avail array. */
+ idalloctm(tsd_tsdn(tsd), tcache, NULL, true, true);
+ }
}
+/* For auto tcache (embedded in TSD) only. */
void
tcache_cleanup(tsd_t *tsd) {
- tcache_t *tcache;
-
if (!config_tcache) {
return;
}
- if ((tcache = tsd_tcache_get(tsd)) != NULL) {
- tcache_destroy(tsd, tcache);
- tsd_tcache_set(tsd, NULL);
+ tcache_t *tcache = tsd_tcachep_get(tsd);
+ if (!tcache_available(tsd)) {
+ assert(tsd_tcache_enabled_get(tsd) == tcache_enabled_false);
+ if (config_debug) {
+ assert(tcache->tbins[0].avail == NULL);
+ }
+ return;
+ }
+ assert(tsd_tcache_enabled_get(tsd) == tcache_enabled_true);
+ assert(tcache->tbins[0].avail != NULL);
+
+ tcache_destroy(tsd, tcache, true);
+ if (config_debug) {
+ tcache->tbins[0].avail = NULL;
}
}
@@ -481,12 +577,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind) {
goto label_return;
}
- arena_t *arena = arena_ichoose(tsd, NULL);
- if (unlikely(arena == NULL)) {
- err = true;
- goto label_return;
- }
- tcache_t *tcache = tcache_create(tsd_tsdn(tsd), arena);
+ tcache_t *tcache = tcache_create_explicit(tsd);
if (tcache == NULL) {
err = true;
goto label_return;
@@ -531,7 +622,7 @@ tcaches_flush(tsd_t *tsd, unsigned ind) {
tcache_t *tcache = tcaches_elm_remove(tsd, &tcaches[ind]);
malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
if (tcache != NULL) {
- tcache_destroy(tsd, tcache);
+ tcache_destroy(tsd, tcache, false);
}
}
@@ -544,7 +635,7 @@ tcaches_destroy(tsd_t *tsd, unsigned ind) {
tcaches_avail = elm;
malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
if (tcache != NULL) {
- tcache_destroy(tsd, tcache);
+ tcache_destroy(tsd, tcache, false);
}
}
diff --git a/src/tsd.c b/src/tsd.c
index 8650211..8b54770 100644
--- a/src/tsd.c
+++ b/src/tsd.c
@@ -21,11 +21,6 @@ malloc_tsd_dalloc(void *wrapper) {
a0dalloc(wrapper);
}
-void
-malloc_tsd_no_cleanup(void *arg) {
- not_reached();
-}
-
#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
#ifndef _WIN32
JEMALLOC_EXPORT
diff --git a/test/unit/tsd.c b/test/unit/tsd.c
index e033bb7..5bfcdf4 100644
--- a/test/unit/tsd.c
+++ b/test/unit/tsd.c
@@ -5,6 +5,7 @@
typedef unsigned int data_t;
static bool data_cleanup_executed;
+static bool data_test_started;
malloc_tsd_types(data_, data_t)
malloc_tsd_protos(, data_, data_t)
@@ -13,6 +14,9 @@ void
data_cleanup(void *arg) {
data_t *data = (data_t *)arg;
+ if (!data_test_started) {
+ return;
+ }
if (!data_cleanup_executed) {
assert_x_eq(*data, THREAD_DATA,
"Argument passed into cleanup function should match tsd "
@@ -135,7 +139,9 @@ main(void) {
malloc_printf("Initialization error");
return test_status_fail;
}
+ data_test_started = false;
data_tsd_boot();
+ data_test_started = true;
return test(
test_tsd_main_thread,