summaryrefslogtreecommitdiffstats
path: root/src/jemalloc.c
diff options
context:
space:
mode:
authorJason Evans <je@fb.com>2015-08-25 23:13:59 (GMT)
committerJason Evans <je@fb.com>2015-08-28 03:32:35 (GMT)
commit30949da601f7405c294a71d30bd67be29cfbc2a5 (patch)
treee251b697d547dcbafcc6d1a8e79ef63a6d44277d /src/jemalloc.c
parent5d2e875ac9283cb99ff714c5cb56e1fc98a7f007 (diff)
downloadjemalloc-30949da601f7405c294a71d30bd67be29cfbc2a5.zip
jemalloc-30949da601f7405c294a71d30bd67be29cfbc2a5.tar.gz
jemalloc-30949da601f7405c294a71d30bd67be29cfbc2a5.tar.bz2
Fix arenas_cache_cleanup() and arena_get_hard().
Fix arenas_cache_cleanup() and arena_get_hard() to handle allocation/deallocation within the application's thread-specific data cleanup functions even after arenas_cache is torn down. This is a more general fix that complements 45e9f66c280e1ba8bebf7bed387a43bc9e45536d (Fix arenas_cache_cleanup().).
Diffstat (limited to 'src/jemalloc.c')
-rw-r--r--src/jemalloc.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 0361913..df962c6 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -510,17 +510,17 @@ arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
assert(ind < narenas_actual || !init_if_missing);
narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1;
- if (!*arenas_cache_bypassp) {
+ if (tsd_nominal(tsd) && !*arenas_cache_bypassp) {
*arenas_cache_bypassp = true;
arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
narenas_cache);
*arenas_cache_bypassp = false;
- } else
- arenas_cache = NULL;
+ }
if (arenas_cache == NULL) {
/*
* This function must always tell the truth, even if
- * it's slow, so don't let OOM or recursive allocation
+ * it's slow, so don't let OOM, thread cleanup (note
+ * tsd_nominal check), nor recursive allocation
* avoidance (note arenas_cache_bypass check) get in the
* way.
*/
@@ -531,6 +531,7 @@ arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
malloc_mutex_unlock(&arenas_lock);
return (arena);
}
+ assert(tsd_nominal(tsd) && !*arenas_cache_bypassp);
tsd_arenas_cache_set(tsd, arenas_cache);
tsd_narenas_cache_set(tsd, narenas_cache);
}
@@ -650,8 +651,6 @@ arenas_cache_cleanup(tsd_t *tsd)
arenas_cache = tsd_arenas_cache_get(tsd);
if (arenas_cache != NULL) {
- bool *arenas_cache_bypassp = tsd_arenas_cache_bypassp_get(tsd);
- *arenas_cache_bypassp = true;
tsd_arenas_cache_set(tsd, NULL);
a0dalloc(arenas_cache);
}