summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorJason Evans <je@fb.com>2011-08-31 23:18:38 (GMT)
committerJason Evans <je@fb.com>2011-08-31 23:18:38 (GMT)
commit5bdbae57eecd92755be8e7a59d6ff8b46f50e6ad (patch)
tree77497f6ff41dc0bb6168f2c89f4eefaf48b5113f /src
parent446c3b22f1299ff4a5549b0b36540bceda6c3beb (diff)
parentc67e4fdc712aa5b818d69b7ef8e3963441febb16 (diff)
downloadjemalloc-2.2.3.zip
jemalloc-2.2.3.tar.gz
jemalloc-2.2.3.tar.bz2
Merge branch 'dev'2.2.3
Diffstat (limited to 'src')
-rw-r--r--src/arena.c1
-rw-r--r--src/jemalloc.c80
-rw-r--r--src/prof.c93
3 files changed, 105 insertions, 69 deletions
diff --git a/src/arena.c b/src/arena.c
index e00dccc..e749c1d 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -1657,6 +1657,7 @@ arena_prof_promoted(const void *ptr, size_t size)
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
assert(isalloc(ptr) == PAGE_SIZE);
+ assert(size <= small_maxclass);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
diff --git a/src/jemalloc.c b/src/jemalloc.c
index e287516..fd8bf52 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -84,6 +84,7 @@ static void malloc_conf_error(const char *msg, const char *k, size_t klen,
const char *v, size_t vlen);
static void malloc_conf_init(void);
static bool malloc_init_hard(void);
+static int imemalign(void **memptr, size_t alignment, size_t size);
/******************************************************************************/
/* malloc_message() setup. */
@@ -939,7 +940,8 @@ JEMALLOC_P(malloc)(size_t size)
#ifdef JEMALLOC_PROF
if (opt_prof) {
usize = s2u(size);
- if ((cnt = prof_alloc_prep(usize)) == NULL) {
+ PROF_ALLOC_PREP(1, usize, cnt);
+ if (cnt == NULL) {
ret = NULL;
goto OOM;
}
@@ -988,9 +990,15 @@ RETURN:
}
JEMALLOC_ATTR(nonnull(1))
-JEMALLOC_ATTR(visibility("default"))
-int
-JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size)
+#ifdef JEMALLOC_PROF
+/*
+ * Avoid any uncertainty as to how many backtrace frames to ignore in
+ * PROF_ALLOC_PREP().
+ */
+JEMALLOC_ATTR(noinline)
+#endif
+static int
+imemalign(void **memptr, size_t alignment, size_t size)
{
int ret;
size_t usize
@@ -1057,7 +1065,8 @@ JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size)
#ifdef JEMALLOC_PROF
if (opt_prof) {
- if ((cnt = prof_alloc_prep(usize)) == NULL) {
+ PROF_ALLOC_PREP(2, usize, cnt);
+ if (cnt == NULL) {
result = NULL;
ret = EINVAL;
} else {
@@ -1110,6 +1119,15 @@ RETURN:
return (ret);
}
+JEMALLOC_ATTR(nonnull(1))
+JEMALLOC_ATTR(visibility("default"))
+int
+JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size)
+{
+
+ return imemalign(memptr, alignment, size);
+}
+
JEMALLOC_ATTR(malloc)
JEMALLOC_ATTR(visibility("default"))
void *
@@ -1165,7 +1183,8 @@ JEMALLOC_P(calloc)(size_t num, size_t size)
#ifdef JEMALLOC_PROF
if (opt_prof) {
usize = s2u(num_size);
- if ((cnt = prof_alloc_prep(usize)) == NULL) {
+ PROF_ALLOC_PREP(1, usize, cnt);
+ if (cnt == NULL) {
ret = NULL;
goto RETURN;
}
@@ -1278,7 +1297,9 @@ JEMALLOC_P(realloc)(void *ptr, size_t size)
if (opt_prof) {
usize = s2u(size);
old_ctx = prof_ctx_get(ptr);
- if ((cnt = prof_alloc_prep(usize)) == NULL) {
+ PROF_ALLOC_PREP(1, usize, cnt);
+ if (cnt == NULL) {
+ old_ctx = NULL;
ret = NULL;
goto OOM;
}
@@ -1288,8 +1309,13 @@ JEMALLOC_P(realloc)(void *ptr, size_t size)
false, false);
if (ret != NULL)
arena_prof_promoted(ret, usize);
- } else
+ else
+ old_ctx = NULL;
+ } else {
ret = iralloc(ptr, size, 0, 0, false, false);
+ if (ret == NULL)
+ old_ctx = NULL;
+ }
} else
#endif
{
@@ -1327,7 +1353,8 @@ OOM:
#ifdef JEMALLOC_PROF
if (opt_prof) {
usize = s2u(size);
- if ((cnt = prof_alloc_prep(usize)) == NULL)
+ PROF_ALLOC_PREP(1, usize, cnt);
+ if (cnt == NULL)
ret = NULL;
else {
if (prof_promote && (uintptr_t)cnt !=
@@ -1432,7 +1459,7 @@ JEMALLOC_P(memalign)(size_t alignment, size_t size)
#ifdef JEMALLOC_CC_SILENCE
int result =
#endif
- JEMALLOC_P(posix_memalign)(&ret, alignment, size);
+ imemalign(&ret, alignment, size);
#ifdef JEMALLOC_CC_SILENCE
if (result != 0)
return (NULL);
@@ -1451,7 +1478,7 @@ JEMALLOC_P(valloc)(size_t size)
#ifdef JEMALLOC_CC_SILENCE
int result =
#endif
- JEMALLOC_P(posix_memalign)(&ret, PAGE_SIZE, size);
+ imemalign(&ret, PAGE_SIZE, size);
#ifdef JEMALLOC_CC_SILENCE
if (result != 0)
return (NULL);
@@ -1566,14 +1593,14 @@ JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags)
if (malloc_init())
goto OOM;
- usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment,
- NULL);
+ usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
if (usize == 0)
goto OOM;
#ifdef JEMALLOC_PROF
if (opt_prof) {
- if ((cnt = prof_alloc_prep(usize)) == NULL)
+ PROF_ALLOC_PREP(1, usize, cnt);
+ if (cnt == NULL)
goto OOM;
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
small_maxclass) {
@@ -1590,7 +1617,7 @@ JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags)
if (p == NULL)
goto OOM;
}
-
+ prof_malloc(p, usize, cnt);
if (rsize != NULL)
*rsize = usize;
} else
@@ -1645,7 +1672,6 @@ JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size, size_t extra,
bool no_move = flags & ALLOCM_NO_MOVE;
#ifdef JEMALLOC_PROF
prof_thr_cnt_t *cnt;
- prof_ctx_t *old_ctx;
#endif
assert(ptr != NULL);
@@ -1660,25 +1686,33 @@ JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size, size_t extra,
/*
* usize isn't knowable before iralloc() returns when extra is
* non-zero. Therefore, compute its maximum possible value and
- * use that in prof_alloc_prep() to decide whether to capture a
+ * use that in PROF_ALLOC_PREP() to decide whether to capture a
* backtrace. prof_realloc() will use the actual usize to
* decide whether to sample.
*/
size_t max_usize = (alignment == 0) ? s2u(size+extra) :
sa2u(size+extra, alignment, NULL);
+ prof_ctx_t *old_ctx = prof_ctx_get(p);
old_size = isalloc(p);
- old_ctx = prof_ctx_get(p);
- if ((cnt = prof_alloc_prep(max_usize)) == NULL)
+ PROF_ALLOC_PREP(1, max_usize, cnt);
+ if (cnt == NULL)
goto OOM;
- if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && max_usize
- <= small_maxclass) {
+ /*
+ * Use minimum usize to determine whether promotion may happen.
+ */
+ if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
+ && ((alignment == 0) ? s2u(size) : sa2u(size,
+ alignment, NULL)) <= small_maxclass) {
q = iralloc(p, small_maxclass+1, (small_maxclass+1 >=
size+extra) ? 0 : size+extra - (small_maxclass+1),
alignment, zero, no_move);
if (q == NULL)
goto ERR;
- usize = isalloc(q);
- arena_prof_promoted(q, usize);
+ if (max_usize < PAGE_SIZE) {
+ usize = max_usize;
+ arena_prof_promoted(q, usize);
+ } else
+ usize = isalloc(q);
} else {
q = iralloc(p, size, extra, alignment, zero, no_move);
if (q == NULL)
diff --git a/src/prof.c b/src/prof.c
index 6549375..8a144b4 100644
--- a/src/prof.c
+++ b/src/prof.c
@@ -474,11 +474,23 @@ prof_lookup(prof_bt_t *bt)
/*
* Artificially raise curobjs, in order to avoid a race
* condition with prof_ctx_merge()/prof_ctx_destroy().
+ *
+ * No locking is necessary for ctx here because no other
+ * threads have had the opportunity to fetch it from
+ * bt2ctx yet.
*/
ctx.p->cnt_merged.curobjs++;
new_ctx = true;
- } else
+ } else {
+ /*
+ * Artificially raise curobjs, in order to avoid a race
+ * condition with prof_ctx_merge()/prof_ctx_destroy().
+ */
+ malloc_mutex_lock(&ctx.p->lock);
+ ctx.p->cnt_merged.curobjs++;
+ malloc_mutex_unlock(&ctx.p->lock);
new_ctx = false;
+ }
prof_leave();
/* Link a prof_thd_cnt_t into ctx for this thread. */
@@ -491,8 +503,9 @@ prof_lookup(prof_bt_t *bt)
*/
ret.p = ql_last(&prof_tdata->lru_ql, lru_link);
assert(ret.v != NULL);
- ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt, NULL,
- NULL);
+ if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt,
+ NULL, NULL))
+ assert(false);
ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
prof_ctx_merge(ret.p->ctx, ret.p);
/* ret can now be re-used. */
@@ -503,11 +516,8 @@ prof_lookup(prof_bt_t *bt)
/* Allocate and partially initialize a new cnt. */
ret.v = imalloc(sizeof(prof_thr_cnt_t));
if (ret.p == NULL) {
- if (new_ctx) {
- malloc_mutex_lock(&ctx.p->lock);
- ctx.p->cnt_merged.curobjs--;
- malloc_mutex_unlock(&ctx.p->lock);
- }
+ if (new_ctx)
+ prof_ctx_destroy(ctx.p);
return (NULL);
}
ql_elm_new(ret.p, cnts_link);
@@ -518,19 +528,15 @@ prof_lookup(prof_bt_t *bt)
ret.p->epoch = 0;
memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
if (ckh_insert(&prof_tdata->bt2cnt, btkey.v, ret.v)) {
- if (new_ctx) {
- malloc_mutex_lock(&ctx.p->lock);
- ctx.p->cnt_merged.curobjs--;
- malloc_mutex_unlock(&ctx.p->lock);
- }
+ if (new_ctx)
+ prof_ctx_destroy(ctx.p);
idalloc(ret.v);
return (NULL);
}
ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
malloc_mutex_lock(&ctx.p->lock);
ql_tail_insert(&ctx.p->cnts_ql, ret.p, cnts_link);
- if (new_ctx)
- ctx.p->cnt_merged.curobjs--;
+ ctx.p->cnt_merged.curobjs--;
malloc_mutex_unlock(&ctx.p->lock);
} else {
/* Move ret to the front of the LRU. */
@@ -644,11 +650,10 @@ prof_ctx_destroy(prof_ctx_t *ctx)
/*
* Check that ctx is still unused by any thread cache before destroying
- * it. prof_lookup() interlocks bt2ctx_mtx and ctx->lock in order to
- * avoid a race condition with this function, and prof_ctx_merge()
- * artificially raises ctx->cnt_merged.curobjs in order to avoid a race
- * between the main body of prof_ctx_merge() and entry into this
- * function.
+ * it. prof_lookup() artificially raises ctx->cnt_merge.curobjs in
+ * order to avoid a race condition with this function, as does
+ * prof_ctx_merge() in order to avoid a race between the main body of
+ * prof_ctx_merge() and entry into this function.
*/
prof_enter();
malloc_mutex_lock(&ctx->lock);
@@ -657,7 +662,8 @@ prof_ctx_destroy(prof_ctx_t *ctx)
assert(ctx->cnt_merged.accumobjs == 0);
assert(ctx->cnt_merged.accumbytes == 0);
/* Remove ctx from bt2ctx. */
- ckh_remove(&bt2ctx, ctx->bt, NULL, NULL);
+ if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL))
+ assert(false);
prof_leave();
/* Destroy ctx. */
malloc_mutex_unlock(&ctx->lock);
@@ -665,7 +671,10 @@ prof_ctx_destroy(prof_ctx_t *ctx)
malloc_mutex_destroy(&ctx->lock);
idalloc(ctx);
} else {
- /* Compensate for increment in prof_ctx_merge(). */
+ /*
+ * Compensate for increment in prof_ctx_merge() or
+ * prof_lookup().
+ */
ctx->cnt_merged.curobjs--;
malloc_mutex_unlock(&ctx->lock);
prof_leave();
@@ -1109,7 +1118,6 @@ prof_tdata_init(void)
prof_tdata->vec = imalloc(sizeof(void *) * prof_bt_max);
if (prof_tdata->vec == NULL) {
-
ckh_delete(&prof_tdata->bt2cnt);
idalloc(prof_tdata);
return (NULL);
@@ -1127,33 +1135,26 @@ prof_tdata_init(void)
static void
prof_tdata_cleanup(void *arg)
{
- prof_tdata_t *prof_tdata;
+ prof_thr_cnt_t *cnt;
+ prof_tdata_t *prof_tdata = (prof_tdata_t *)arg;
- prof_tdata = PROF_TCACHE_GET();
- if (prof_tdata != NULL) {
- prof_thr_cnt_t *cnt;
-
- /*
- * Delete the hash table. All of its contents can still be
- * iterated over via the LRU.
- */
- ckh_delete(&prof_tdata->bt2cnt);
+ /*
+ * Delete the hash table. All of its contents can still be iterated
+ * over via the LRU.
+ */
+ ckh_delete(&prof_tdata->bt2cnt);
- /*
- * Iteratively merge cnt's into the global stats and delete
- * them.
- */
- while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) {
- prof_ctx_merge(cnt->ctx, cnt);
- ql_remove(&prof_tdata->lru_ql, cnt, lru_link);
- idalloc(cnt);
- }
+ /* Iteratively merge cnt's into the global stats and delete them. */
+ while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) {
+ ql_remove(&prof_tdata->lru_ql, cnt, lru_link);
+ prof_ctx_merge(cnt->ctx, cnt);
+ idalloc(cnt);
+ }
- idalloc(prof_tdata->vec);
+ idalloc(prof_tdata->vec);
- idalloc(prof_tdata);
- PROF_TCACHE_SET(NULL);
- }
+ idalloc(prof_tdata);
+ PROF_TCACHE_SET(NULL);
}
void