summaryrefslogtreecommitdiffstats
path: root/include/jemalloc
diff options
context:
space:
mode:
authorQi Wang <interwq@gwu.edu>2017-03-15 22:31:37 (GMT)
committerQi Wang <interwq@gmail.com>2017-03-23 07:03:28 (GMT)
commit74f78cafdaa0adc885f9670066d3ecf13aee1ba5 (patch)
tree55796a7a3bd13bf00c52c2f7969fd637ea46734e /include/jemalloc
parent20b8c70e9f0177d3276504ec5e3f631e1b69df87 (diff)
downloadjemalloc-74f78cafdaa0adc885f9670066d3ecf13aee1ba5.zip
jemalloc-74f78cafdaa0adc885f9670066d3ecf13aee1ba5.tar.gz
jemalloc-74f78cafdaa0adc885f9670066d3ecf13aee1ba5.tar.bz2
Added custom mutex spin.
A fixed max spin count is used -- with benchmark results showing it solves almost all problems. As the benchmark used was rather intense, the upper bound could be a little bit high. However it should offer a good tradeoff between spinning and blocking.
Diffstat (limited to 'include/jemalloc')
-rw-r--r--include/jemalloc/internal/mutex_inlines.h12
-rw-r--r--include/jemalloc/internal/mutex_types.h16
2 files changed, 13 insertions, 15 deletions
diff --git a/include/jemalloc/internal/mutex_inlines.h b/include/jemalloc/internal/mutex_inlines.h
index d4703d2..7adcff4 100644
--- a/include/jemalloc/internal/mutex_inlines.h
+++ b/include/jemalloc/internal/mutex_inlines.h
@@ -52,11 +52,13 @@ malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
malloc_mutex_lock_slow(mutex);
}
/* We own the lock now. Update a few counters. */
- mutex_prof_data_t *data = &mutex->prof_data;
- data->n_lock_ops++;
- if (data->prev_owner != tsdn) {
- data->prev_owner = tsdn;
- data->n_owner_switches++;
+ if (config_stats) {
+ mutex_prof_data_t *data = &mutex->prof_data;
+ data->n_lock_ops++;
+ if (data->prev_owner != tsdn) {
+ data->prev_owner = tsdn;
+ data->n_owner_switches++;
+ }
}
}
witness_lock(tsdn, &mutex->witness);
diff --git a/include/jemalloc/internal/mutex_types.h b/include/jemalloc/internal/mutex_types.h
index 257f69c..3cc7bc2 100644
--- a/include/jemalloc/internal/mutex_types.h
+++ b/include/jemalloc/internal/mutex_types.h
@@ -4,6 +4,12 @@
typedef struct mutex_prof_data_s mutex_prof_data_t;
typedef struct malloc_mutex_s malloc_mutex_t;
+/*
+ * Based on benchmark results, a fixed spin with this amount of retries works
+ * well for our critical sections.
+ */
+#define MALLOC_MUTEX_MAX_SPIN 250
+
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
@@ -45,20 +51,10 @@ typedef struct malloc_mutex_s malloc_mutex_t;
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#else
-/* TODO: get rid of adaptive mutex once we do our own spin. */
-# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \
- defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
-# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
-# define MALLOC_MUTEX_INITIALIZER \
- {{{LOCK_PROF_DATA_INITIALIZER, \
- PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}}, \
- WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
-# else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
-# endif
#endif
#endif /* JEMALLOC_INTERNAL_MUTEX_TYPES_H */