summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorQi Wang <interwq@gwu.edu>2017-06-07 23:12:50 (GMT)
committerQi Wang <interwq@gmail.com>2017-06-08 17:02:18 (GMT)
commit5642f03cae54eb8798dc4fa5ea28d9569572c1af (patch)
tree1aba5f85db4c5cfc18205b551f7031fdcc3a11f0
parent73713fbb27cd1cf6754259b19a960e91a16c3638 (diff)
downloadjemalloc-5642f03cae54eb8798dc4fa5ea28d9569572c1af.zip
jemalloc-5642f03cae54eb8798dc4fa5ea28d9569572c1af.tar.gz
jemalloc-5642f03cae54eb8798dc4fa5ea28d9569572c1af.tar.bz2
Add internal tsd for background_thread.
-rw-r--r--include/jemalloc/internal/tsd.h15
-rw-r--r--src/background_thread.c9
-rw-r--r--src/tsd.c11
3 files changed, 24 insertions, 11 deletions
diff --git a/include/jemalloc/internal/tsd.h b/include/jemalloc/internal/tsd.h
index 4efaf4e..cab0b2f 100644
--- a/include/jemalloc/internal/tsd.h
+++ b/include/jemalloc/internal/tsd.h
@@ -155,7 +155,7 @@ void malloc_tsd_cleanup_register(bool (*f)(void));
tsd_t *malloc_tsd_boot0(void);
void malloc_tsd_boot1(void);
void tsd_cleanup(void *arg);
-tsd_t *tsd_fetch_slow(tsd_t *tsd);
+tsd_t *tsd_fetch_slow(tsd_t *tsd, bool internal);
void tsd_slow_update(tsd_t *tsd);
/*
@@ -250,7 +250,7 @@ tsd_fast(tsd_t *tsd) {
}
JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_fetch_impl(bool init) {
+tsd_fetch_impl(bool init, bool internal) {
tsd_t *tsd = tsd_get(init);
if (!init && tsd_get_allocates() && tsd == NULL) {
@@ -259,7 +259,7 @@ tsd_fetch_impl(bool init) {
assert(tsd != NULL);
if (unlikely(tsd->state != tsd_state_nominal)) {
- return tsd_fetch_slow(tsd);
+ return tsd_fetch_slow(tsd, internal);
}
assert(tsd_fast(tsd));
tsd_assert_fast(tsd);
@@ -268,8 +268,13 @@ tsd_fetch_impl(bool init) {
}
JEMALLOC_ALWAYS_INLINE tsd_t *
+tsd_internal_fetch(void) {
+ return tsd_fetch_impl(true, true);
+}
+
+JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_fetch(void) {
- return tsd_fetch_impl(true);
+ return tsd_fetch_impl(true, false);
}
static inline bool
@@ -283,7 +288,7 @@ tsdn_fetch(void) {
return NULL;
}
- return tsd_tsdn(tsd_fetch_impl(false));
+ return tsd_tsdn(tsd_fetch_impl(false, false));
}
JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
diff --git a/src/background_thread.c b/src/background_thread.c
index 50812c3..190fa2f 100644
--- a/src/background_thread.c
+++ b/src/background_thread.c
@@ -316,12 +316,11 @@ background_thread_entry(void *ind_arg) {
set_current_thread_affinity((int)thread_ind);
}
/*
- * Start periodic background work. We avoid fetching tsd to keep the
- * background thread "outside", since there may be side effects, for
- * example triggering new arena creation (which in turn triggers
- * background thread creation).
+ * Start periodic background work. We use internal tsd which avoids
+ * side effects, for example triggering new arena creation (which in
+ * turn triggers another background thread creation).
*/
- background_work(TSDN_NULL, thread_ind);
+ background_work(tsd_tsdn(tsd_internal_fetch()), thread_ind);
assert(pthread_equal(pthread_self(),
background_thread_info[thread_ind].thread));
diff --git a/src/tsd.c b/src/tsd.c
index 6eb3b88..9733033 100644
--- a/src/tsd.c
+++ b/src/tsd.c
@@ -103,7 +103,16 @@ tsd_data_init_nocleanup(tsd_t *tsd) {
}
tsd_t *
-tsd_fetch_slow(tsd_t *tsd) {
+tsd_fetch_slow(tsd_t *tsd, bool internal) {
+ if (internal) {
+ /* For internal background threads use only. */
+ assert(tsd->state == tsd_state_uninitialized);
+ tsd->state = tsd_state_reincarnated;
+ tsd_set(tsd);
+ tsd_data_init_nocleanup(tsd);
+ return tsd;
+ }
+
if (tsd->state == tsd_state_nominal_slow) {
/* On slow path but no work needed. */
assert(malloc_slow || !tsd_tcache_enabled_get(tsd) ||