summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason Evans <je@facebook.com>2010-04-14 18:27:13 (GMT)
committerJason Evans <je@facebook.com>2010-04-14 18:27:13 (GMT)
commit5055f4516c8852e67668b0e746863a7d6a1c148e (patch)
tree0199eb3067af365c1c5268a49cf4b76d7efd2e15
parent38cda690ddd6c9db0321b06abaa4d19f884326b6 (diff)
downloadjemalloc-5055f4516c8852e67668b0e746863a7d6a1c148e.zip
jemalloc-5055f4516c8852e67668b0e746863a7d6a1c148e.tar.gz
jemalloc-5055f4516c8852e67668b0e746863a7d6a1c148e.tar.bz2
Fix tcache crash during thread cleanup.
Properly maintain tcache_bin_t's avail pointer such that it is NULL if no objects are cached. This only caused problems during thread cache destruction, since cache flushing otherwise never occurs on an empty bin.
-rw-r--r--jemalloc/src/tcache.c26
1 files changed, 12 insertions, 14 deletions
diff --git a/jemalloc/src/tcache.c b/jemalloc/src/tcache.c
index ce6ec99..ace24ce 100644
--- a/jemalloc/src/tcache.c
+++ b/jemalloc/src/tcache.c
@@ -55,12 +55,14 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
{
void *flush, *deferred, *ptr;
unsigned i, nflush, ndeferred;
+ bool first_pass;
assert(binind < nbins);
assert(rem <= tbin->ncached);
+ assert(tbin->ncached > 0 || tbin->avail == NULL);
- for (flush = tbin->avail, nflush = tbin->ncached - rem; flush != NULL;
- flush = deferred, nflush = ndeferred) {
+ for (flush = tbin->avail, nflush = tbin->ncached - rem, first_pass =
+ true; flush != NULL; flush = deferred, nflush = ndeferred) {
/* Lock the arena bin associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(flush);
arena_t *arena = chunk->arena;
@@ -110,12 +112,9 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
}
malloc_mutex_unlock(&bin->lock);
- if (flush != NULL) {
- /*
- * This was the first pass, and rem cached objects
- * remain.
- */
+ if (first_pass) {
tbin->avail = flush;
+ first_pass = false;
}
}
@@ -133,12 +132,14 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
{
void *flush, *deferred, *ptr;
unsigned i, nflush, ndeferred;
+ bool first_pass;
assert(binind < nhbins);
assert(rem <= tbin->ncached);
+ assert(tbin->ncached > 0 || tbin->avail == NULL);
- for (flush = tbin->avail, nflush = tbin->ncached - rem; flush != NULL;
- flush = deferred, nflush = ndeferred) {
+ for (flush = tbin->avail, nflush = tbin->ncached - rem, first_pass =
+ true; flush != NULL; flush = deferred, nflush = ndeferred) {
/* Lock the arena associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(flush);
arena_t *arena = chunk->arena;
@@ -183,12 +184,9 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
}
malloc_mutex_unlock(&arena->lock);
- if (flush != NULL) {
- /*
- * This was the first pass, and rem cached objects
- * remain.
- */
+ if (first_pass) {
tbin->avail = flush;
+ first_pass = false;
}
}