summaryrefslogtreecommitdiffstats
path: root/src/prof.c
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2014-10-03 06:01:10 (GMT)
committerJason Evans <jasone@canonware.com>2014-10-03 06:01:10 (GMT)
commit20c31deaae38ed9aa4fe169ed65e0c45cd542955 (patch)
tree8b9660e136f4ee5ede2b9e8f3a6b31bf59dc27a7 /src/prof.c
parentcc9e626ea97eb294f337c674685b8b5c9d5524b7 (diff)
downloadjemalloc-20c31deaae38ed9aa4fe169ed65e0c45cd542955.zip
jemalloc-20c31deaae38ed9aa4fe169ed65e0c45cd542955.tar.gz
jemalloc-20c31deaae38ed9aa4fe169ed65e0c45cd542955.tar.bz2
Test prof.reset mallctl and fix numerous discovered bugs.
Diffstat (limited to 'src/prof.c')
-rw-r--r--src/prof.c213
1 files changed, 149 insertions, 64 deletions
diff --git a/src/prof.c b/src/prof.c
index 9f10b53..0a96d85 100644
--- a/src/prof.c
+++ b/src/prof.c
@@ -137,10 +137,18 @@ rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
JEMALLOC_INLINE_C int
prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b)
{
+ int ret;
uint64_t a_uid = a->thr_uid;
uint64_t b_uid = b->thr_uid;
- return ((a_uid > b_uid) - (a_uid < b_uid));
+ ret = ((a_uid > b_uid) - (a_uid < b_uid));
+ if (ret == 0) {
+ uint64_t a_discrim = a->thr_discrim;
+ uint64_t b_discrim = b->thr_discrim;
+
+ ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
+ }
+ return (ret);
}
rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
@@ -504,7 +512,7 @@ prof_gctx_create(tsd_t *tsd, prof_bt_t *bt)
gctx->lock = prof_gctx_mutex_choose();
/*
* Set nlimbo to 1, in order to avoid a race condition with
- * prof_tctx_destroy()/prof_gctx_maybe_destroy().
+ * prof_tctx_destroy()/prof_gctx_try_destroy().
*/
gctx->nlimbo = 1;
tctx_tree_new(&gctx->tctxs);
@@ -516,7 +524,7 @@ prof_gctx_create(tsd_t *tsd, prof_bt_t *bt)
}
static void
-prof_gctx_maybe_destroy(tsd_t *tsd, prof_gctx_t *gctx, prof_tdata_t *tdata)
+prof_gctx_try_destroy(tsd_t *tsd, prof_gctx_t *gctx, prof_tdata_t *tdata)
{
cassert(config_prof);
@@ -530,6 +538,7 @@ prof_gctx_maybe_destroy(tsd_t *tsd, prof_gctx_t *gctx, prof_tdata_t *tdata)
*/
prof_enter(tdata);
malloc_mutex_lock(gctx->lock);
+ assert(gctx->nlimbo != 0);
if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
/* Remove gctx from bt2gctx. */
if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL))
@@ -605,10 +614,10 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
*
* 1) Sample an allocation associated with gctx.
* 2) Deallocate the sampled object.
- * 3) Successfully prof_gctx_maybe_destroy(gctx).
+ * 3) Successfully prof_gctx_try_destroy(gctx).
*
* The result would be that gctx no longer exists by the time
- * this thread accesses it in prof_gctx_maybe_destroy().
+ * this thread accesses it in prof_gctx_try_destroy().
*/
gctx->nlimbo++;
destroy_gctx = true;
@@ -616,7 +625,7 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
destroy_gctx = false;
malloc_mutex_unlock(gctx->lock);
if (destroy_gctx)
- prof_gctx_maybe_destroy(tsd, gctx, tdata);
+ prof_gctx_try_destroy(tsd, gctx, tdata);
if (destroy_tdata)
prof_tdata_destroy(tsd, tdata);
@@ -657,7 +666,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
} else {
/*
* Increment nlimbo, in order to avoid a race condition with
- * prof_tctx_destroy()/prof_gctx_maybe_destroy().
+ * prof_tctx_destroy()/prof_gctx_try_destroy().
*/
malloc_mutex_lock(gctx.p->lock);
gctx.p->nlimbo++;
@@ -710,7 +719,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
ret.v = imalloc(tsd, sizeof(prof_tctx_t));
if (ret.p == NULL) {
if (new_gctx)
- prof_gctx_maybe_destroy(tsd, gctx, tdata);
+ prof_gctx_try_destroy(tsd, gctx, tdata);
return (NULL);
}
ret.p->tdata = tdata;
@@ -723,7 +732,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
malloc_mutex_unlock(tdata->lock);
if (error) {
if (new_gctx)
- prof_gctx_maybe_destroy(tsd, gctx, tdata);
+ prof_gctx_try_destroy(tsd, gctx, tdata);
idalloc(tsd, ret.v);
return (NULL);
}
@@ -793,6 +802,31 @@ prof_sample_threshold_update(prof_tdata_t *tdata)
}
#ifdef JEMALLOC_JET
+static prof_tdata_t *
+prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
+{
+ size_t *tdata_count = (size_t *)arg;
+
+ (*tdata_count)++;
+
+ return (NULL);
+}
+
+size_t
+prof_tdata_count(void)
+{
+ size_t tdata_count = 0;
+
+ malloc_mutex_lock(&tdatas_mtx);
+ tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
+ (void *)&tdata_count);
+ malloc_mutex_unlock(&tdatas_mtx);
+
+ return (tdata_count);
+}
+#endif
+
+#ifdef JEMALLOC_JET
size_t
prof_bt_count(void)
{
@@ -998,7 +1032,6 @@ static prof_tctx_t *
prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
{
prof_tctx_t *ret;
- tsd_t *tsd = (tsd_t *)arg;
switch (tctx->state) {
case prof_tctx_state_nominal:
@@ -1008,9 +1041,7 @@ prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
tctx->state = prof_tctx_state_nominal;
break;
case prof_tctx_state_purgatory:
- ret = tctx_tree_next(tctxs, tctx);
- tctx_tree_remove(tctxs, tctx);
- idalloc(tsd, tctx);
+ ret = tctx;
goto label_return;
default:
not_reached();
@@ -1056,27 +1087,47 @@ prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
return (NULL);
}
-static prof_gctx_t *
-prof_gctx_finish_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
+static void
+prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
{
- tsd_t *tsd = (tsd_t *)arg;
prof_tdata_t *tdata = prof_tdata_get(tsd, false);
- prof_tctx_t *next;
- bool destroy_gctx;
-
- malloc_mutex_lock(gctx->lock);
- next = NULL;
- do {
- next = tctx_tree_iter(&gctx->tctxs, next, prof_tctx_finish_iter,
- tsd);
- } while (next != NULL);
- gctx->nlimbo--;
- destroy_gctx = prof_gctx_should_destroy(gctx);
- malloc_mutex_unlock(gctx->lock);
- if (destroy_gctx)
- prof_gctx_maybe_destroy(tsd, gctx, tdata);
+ prof_gctx_t *gctx;
- return (NULL);
+ /*
+ * Standard tree iteration won't work here, because as soon as we
+ * decrement gctx->nlimbo and unlock gctx, another thread can
+ * concurrently destroy it, which will corrupt the tree. Therefore,
+ * tear down the tree one node at a time during iteration.
+ */
+ while ((gctx = gctx_tree_first(gctxs)) != NULL) {
+ gctx_tree_remove(gctxs, gctx);
+ malloc_mutex_lock(gctx->lock);
+ {
+ prof_tctx_t *next;
+
+ next = NULL;
+ do {
+ prof_tctx_t *to_destroy =
+ tctx_tree_iter(&gctx->tctxs, next,
+ prof_tctx_finish_iter, NULL);
+ if (to_destroy != NULL) {
+ next = tctx_tree_next(&gctx->tctxs,
+ to_destroy);
+ tctx_tree_remove(&gctx->tctxs,
+ to_destroy);
+ idalloc(tsd, to_destroy);
+ } else
+ next = NULL;
+ } while (next != NULL);
+ }
+ gctx->nlimbo--;
+ if (prof_gctx_should_destroy(gctx)) {
+ gctx->nlimbo++;
+ malloc_mutex_unlock(gctx->lock);
+ prof_gctx_try_destroy(tsd, gctx, tdata);
+ } else
+ malloc_mutex_unlock(gctx->lock);
+ }
}
static prof_tdata_t *
@@ -1085,7 +1136,7 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
prof_cnt_t *cnt_all = (prof_cnt_t *)arg;
malloc_mutex_lock(tdata->lock);
- if (tdata->state != prof_tdata_state_expired) {
+ if (!tdata->expired) {
size_t tabind;
union {
prof_tctx_t *p;
@@ -1130,6 +1181,10 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
return (NULL);
}
+#ifdef JEMALLOC_JET
+#undef prof_dump_header
+#define prof_dump_header JEMALLOC_N(prof_dump_header_impl)
+#endif
static bool
prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
{
@@ -1148,6 +1203,11 @@ prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
malloc_mutex_unlock(&tdatas_mtx);
return (ret);
}
+#ifdef JEMALLOC_JET
+#undef prof_dump_header
+#define prof_dump_header JEMALLOC_N(prof_dump_header)
+prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
+#endif
/* gctx->lock is held. */
static bool
@@ -1277,7 +1337,7 @@ prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
malloc_mutex_lock(gctx->lock);
if (prof_dump_gctx(propagate_err, gctx, &gctx->bt, gctxs)) {
- ret = gctx_tree_next(gctxs, gctx);
+ ret = gctx;
goto label_return;
}
@@ -1302,7 +1362,7 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
cassert(config_prof);
- tdata = prof_tdata_get(tsd, false);
+ tdata = prof_tdata_get(tsd, true);
if (tdata == NULL)
return (true);
@@ -1352,7 +1412,7 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
if (prof_dump_close(propagate_err))
goto label_open_close_error;
- gctx_tree_iter(&gctxs, NULL, prof_gctx_finish_iter, tsd);
+ prof_gctx_finish(tsd, &gctxs);
malloc_mutex_unlock(&prof_dump_mtx);
if (leakcheck)
@@ -1362,7 +1422,7 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
label_write_error:
prof_dump_close(propagate_err);
label_open_close_error:
- gctx_tree_iter(&gctxs, NULL, prof_gctx_finish_iter, tsd);
+ prof_gctx_finish(tsd, &gctxs);
malloc_mutex_unlock(&prof_dump_mtx);
return (true);
}
@@ -1533,7 +1593,7 @@ prof_thr_uid_alloc(void)
}
static prof_tdata_t *
-prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid)
+prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim)
{
prof_tdata_t *tdata;
@@ -1546,8 +1606,10 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid)
tdata->lock = prof_tdata_mutex_choose(thr_uid);
tdata->thr_uid = thr_uid;
+ tdata->thr_discrim = thr_discrim;
tdata->thread_name = NULL;
- tdata->state = prof_tdata_state_attached;
+ tdata->attached = true;
+ tdata->expired = false;
if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS,
prof_bt_hash, prof_bt_keycomp)) {
@@ -1576,14 +1638,7 @@ prof_tdata_t *
prof_tdata_init(tsd_t *tsd)
{
- return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc()));
-}
-
-prof_tdata_t *
-prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
-{
-
- return (prof_tdata_init_impl(tsd, tdata->thr_uid));
+ return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0));
}
/* tdata->lock must be held. */
@@ -1591,22 +1646,21 @@ static bool
prof_tdata_should_destroy(prof_tdata_t *tdata)
{
- if (tdata->state == prof_tdata_state_attached)
+ if (tdata->attached)
return (false);
if (ckh_count(&tdata->bt2tctx) != 0)
return (false);
return (true);
}
+/* tdatas_mtx must be held. */
static void
-prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata)
+prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata)
{
assert(prof_tdata_should_destroy(tdata));
- malloc_mutex_lock(&tdatas_mtx);
tdata_tree_remove(&tdatas, tdata);
- malloc_mutex_unlock(&tdatas_mtx);
if (tdata->thread_name != NULL)
idalloc(tsd, tdata->thread_name);
@@ -1615,14 +1669,22 @@ prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata)
}
static void
-prof_tdata_state_transition(tsd_t *tsd, prof_tdata_t *tdata,
- prof_tdata_state_t state)
+prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata)
+{
+
+ malloc_mutex_lock(&tdatas_mtx);
+ prof_tdata_destroy_locked(tsd, tdata);
+ malloc_mutex_unlock(&tdatas_mtx);
+}
+
+static void
+prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
{
bool destroy_tdata;
malloc_mutex_lock(tdata->lock);
- if (tdata->state != state) {
- tdata->state = state;
+ if (tdata->attached) {
+ tdata->attached = false;
destroy_tdata = prof_tdata_should_destroy(tdata);
} else
destroy_tdata = false;
@@ -1631,32 +1693,44 @@ prof_tdata_state_transition(tsd_t *tsd, prof_tdata_t *tdata,
prof_tdata_destroy(tsd, tdata);
}
-static void
-prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
+prof_tdata_t *
+prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
{
+ uint64_t thr_uid = tdata->thr_uid;
+ uint64_t thr_discrim = tdata->thr_discrim + 1;
- prof_tdata_state_transition(tsd, tdata, prof_tdata_state_detached);
+ prof_tdata_detach(tsd, tdata);
+ return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim));
}
-static void
-prof_tdata_expire(tsd_t *tsd, prof_tdata_t *tdata)
+static bool
+prof_tdata_expire(prof_tdata_t *tdata)
{
+ bool destroy_tdata;
- prof_tdata_state_transition(tsd, tdata, prof_tdata_state_expired);
+ malloc_mutex_lock(tdata->lock);
+ if (!tdata->expired) {
+ tdata->expired = true;
+ destroy_tdata = tdata->attached ? false :
+ prof_tdata_should_destroy(tdata);
+ } else
+ destroy_tdata = false;
+ malloc_mutex_unlock(tdata->lock);
+
+ return (destroy_tdata);
}
static prof_tdata_t *
prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
{
- tsd_t *tsd = (tsd_t *)arg;
- prof_tdata_expire(tsd, tdata);
- return (NULL);
+ return (prof_tdata_expire(tdata) ? tdata : NULL);
}
void
prof_reset(tsd_t *tsd, size_t lg_sample)
{
+ prof_tdata_t *next;
assert(lg_sample < (sizeof(uint64_t) << 3));
@@ -1664,7 +1738,18 @@ prof_reset(tsd_t *tsd, size_t lg_sample)
malloc_mutex_lock(&tdatas_mtx);
lg_prof_sample = lg_sample;
- tdata_tree_iter(&tdatas, NULL, prof_tdata_reset_iter, tsd);
+
+ next = NULL;
+ do {
+ prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
+ prof_tdata_reset_iter, NULL);
+ if (to_destroy != NULL) {
+ next = tdata_tree_next(&tdatas, to_destroy);
+ tdata_tree_remove(&tdatas, to_destroy);
+ prof_tdata_destroy(tsd, to_destroy);
+ } else
+ next = NULL;
+ } while (next != NULL);
malloc_mutex_unlock(&tdatas_mtx);
malloc_mutex_unlock(&prof_dump_mtx);