summaryrefslogtreecommitdiffstats
path: root/jemalloc
diff options
context:
space:
mode:
authorJason Evans <je@fb.com>2011-01-15 01:27:44 (GMT)
committerJason Evans <je@fb.com>2011-01-15 01:27:44 (GMT)
commit10e45230940bc0db43945d7b1445557e1405ebe0 (patch)
treedc312549ab51e7c968817084f347f80a1598677e /jemalloc
parent624f2f3cc9e6aa2a5eab18bd1eb84f89a6ac8b5c (diff)
downloadjemalloc-10e45230940bc0db43945d7b1445557e1405ebe0.zip
jemalloc-10e45230940bc0db43945d7b1445557e1405ebe0.tar.gz
jemalloc-10e45230940bc0db43945d7b1445557e1405ebe0.tar.bz2
Fix a heap dumping deadlock.
Restructure the ctx initialization code such that the ctx isn't locked across portions of the initialization code where allocation could occur. Instead artificially inflate the cnt_merged.curobjs field, just as is done elsewhere to avoid similar races to the one that would otherwise be created by the reduction in locking scope. This bug affected interval- and growth-triggered heap dumping, but not manual heap dumping.
Diffstat (limited to 'jemalloc')
-rw-r--r--jemalloc/src/prof.c30
1 files changed, 22 insertions, 8 deletions
diff --git a/jemalloc/src/prof.c b/jemalloc/src/prof.c
index 636ccce..3566c6d 100644
--- a/jemalloc/src/prof.c
+++ b/jemalloc/src/prof.c
@@ -432,6 +432,7 @@ prof_lookup(prof_bt_t *bt)
prof_ctx_t *p;
void *v;
} ctx;
+ bool new_ctx;
/*
* This thread's cache lacks bt. Look for it in the global
@@ -468,12 +469,14 @@ prof_lookup(prof_bt_t *bt)
idalloc(ctx.v);
return (NULL);
}
- }
- /*
- * Acquire ctx's lock before releasing bt2ctx_mtx, in order to
- * avoid a race condition with prof_ctx_destroy().
- */
- malloc_mutex_lock(&ctx.p->lock);
+ /*
+ * Artificially raise curobjs, in order to avoid a race
+ * condition with prof_ctx_merge()/prof_ctx_destroy().
+ */
+ ctx.p->cnt_merged.curobjs++;
+ new_ctx = true;
+ } else
+ new_ctx = false;
prof_leave();
/* Link a prof_thd_cnt_t into ctx for this thread. */
@@ -498,7 +501,11 @@ prof_lookup(prof_bt_t *bt)
/* Allocate and partially initialize a new cnt. */
ret.v = imalloc(sizeof(prof_thr_cnt_t));
if (ret.p == NULL) {
- malloc_mutex_unlock(&ctx.p->lock);
+ if (new_ctx) {
+ malloc_mutex_lock(&ctx.p->lock);
+ ctx.p->cnt_merged.curobjs--;
+ malloc_mutex_unlock(&ctx.p->lock);
+ }
return (NULL);
}
ql_elm_new(ret.p, cnts_link);
@@ -509,12 +516,19 @@ prof_lookup(prof_bt_t *bt)
ret.p->epoch = 0;
memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
if (ckh_insert(&prof_tdata->bt2cnt, btkey.v, ret.v)) {
- malloc_mutex_unlock(&ctx.p->lock);
+ if (new_ctx) {
+ malloc_mutex_lock(&ctx.p->lock);
+ ctx.p->cnt_merged.curobjs--;
+ malloc_mutex_unlock(&ctx.p->lock);
+ }
idalloc(ret.v);
return (NULL);
}
ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
+ malloc_mutex_lock(&ctx.p->lock);
ql_tail_insert(&ctx.p->cnts_ql, ret.p, cnts_link);
+ if (new_ctx)
+ ctx.p->cnt_merged.curobjs--;
malloc_mutex_unlock(&ctx.p->lock);
} else {
/* Move ret to the front of the LRU. */