summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2015-09-15 06:17:25 (GMT)
committerJason Evans <jasone@canonware.com>2015-09-15 06:55:48 (GMT)
commitcec0d63d8bc46205d38456024176a0ece590253e (patch)
tree8604c2c8a337bfb6764eeb63a290e9a6d8297ee5 /include
parentef363de7010b5e13f4e1c0d7b3a109362bda7aa7 (diff)
downloadjemalloc-cec0d63d8bc46205d38456024176a0ece590253e.zip
jemalloc-cec0d63d8bc46205d38456024176a0ece590253e.tar.gz
jemalloc-cec0d63d8bc46205d38456024176a0ece590253e.tar.bz2
Make one call to prof_active_get_unlocked() per allocation event.
Make one call to prof_active_get_unlocked() per allocation event, and use the result throughout the relevant functions that handle an allocation event. Also add a missing check in prof_realloc(). These fixes protect allocation events against concurrent prof_active changes.
Diffstat (limited to 'include')
-rw-r--r--include/jemalloc/internal/prof.h18
1 files changed, 10 insertions, 8 deletions
diff --git a/include/jemalloc/internal/prof.h b/include/jemalloc/internal/prof.h
index eca8aa8..c66611c 100644
--- a/include/jemalloc/internal/prof.h
+++ b/include/jemalloc/internal/prof.h
@@ -331,14 +331,16 @@ bool prof_gdump_get_unlocked(void);
prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
prof_tdata_t **tdata_out);
-prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool update);
+prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
+ bool update);
prof_tctx_t *prof_tctx_get(const void *ptr);
void prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
void prof_malloc_sample_object(const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx);
void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
- prof_tctx_t *tctx, bool updated, size_t old_usize, prof_tctx_t *old_tctx);
+ prof_tctx_t *tctx, bool prof_active, bool updated,
+ size_t old_usize, prof_tctx_t *old_tctx);
void prof_free(tsd_t *tsd, const void *ptr, size_t usize);
#endif
@@ -443,7 +445,7 @@ prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
-prof_alloc_prep(tsd_t *tsd, size_t usize, bool update)
+prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
{
prof_tctx_t *ret;
prof_tdata_t *tdata;
@@ -451,8 +453,8 @@ prof_alloc_prep(tsd_t *tsd, size_t usize, bool update)
assert(usize == s2u(usize));
- if (!prof_active_get_unlocked() || likely(prof_sample_accum_update(tsd,
- usize, update, &tdata)))
+ if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
+ &tdata)))
ret = (prof_tctx_t *)(uintptr_t)1U;
else {
bt_init(&bt, tdata->vec);
@@ -479,17 +481,17 @@ prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx)
JEMALLOC_ALWAYS_INLINE void
prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
- bool updated, size_t old_usize, prof_tctx_t *old_tctx)
+ bool prof_active, bool updated, size_t old_usize, prof_tctx_t *old_tctx)
{
cassert(config_prof);
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
- if (!updated && ptr != NULL) {
+ if (prof_active && !updated && ptr != NULL) {
assert(usize == isalloc(ptr, true));
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
/*
- * Don't sample. The usize passed to PROF_ALLOC_PREP()
+ * Don't sample. The usize passed to prof_alloc_prep()
* was larger than what actually got allocated, so a
* backtrace was captured for this allocation, even
* though its actual usize was insufficient to cross the