summaryrefslogtreecommitdiffstats
path: root/jemalloc/src
diff options
context:
space:
mode:
Diffstat (limited to 'jemalloc/src')
-rw-r--r--jemalloc/src/arena.c6
-rw-r--r--jemalloc/src/ckh.c14
-rw-r--r--jemalloc/src/ctl.c5
-rw-r--r--jemalloc/src/huge.c6
-rw-r--r--jemalloc/src/jemalloc.c4
-rw-r--r--jemalloc/src/prof.c30
6 files changed, 40 insertions, 25 deletions
diff --git a/jemalloc/src/arena.c b/jemalloc/src/arena.c
index 7f939b3..3cf15ff 100644
--- a/jemalloc/src/arena.c
+++ b/jemalloc/src/arena.c
@@ -1358,8 +1358,6 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind
#endif
malloc_mutex_unlock(&bin->lock);
tbin->ncached = i;
- if (tbin->ncached > tbin->high_water)
- tbin->high_water = tbin->ncached;
}
#endif
@@ -1369,7 +1367,6 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind
* *) bin->run_size >= min_run_size
* *) bin->run_size <= arena_maxclass
* *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
- * *) run header size < PAGE_SIZE
*
* bin->nregs and bin->reg0_offset are also calculated here, since these
* settings are all interdependent.
@@ -1455,8 +1452,7 @@ arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size)
} while (try_run_size <= arena_maxclass
&& try_run_size <= arena_maxclass
&& RUN_MAX_OVRHD * (bin->reg_size << 3) > RUN_MAX_OVRHD_RELAX
- && (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
- && try_hdr_size < PAGE_SIZE);
+ && (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size);
assert(good_hdr_size <= good_reg0_offset);
diff --git a/jemalloc/src/ckh.c b/jemalloc/src/ckh.c
index 682a8db..e386a53 100644
--- a/jemalloc/src/ckh.c
+++ b/jemalloc/src/ckh.c
@@ -73,7 +73,7 @@ ckh_isearch(ckh_t *ckh, const void *key)
size_t hash1, hash2, bucket, cell;
assert(ckh != NULL);
- assert(ckh->magic = CKH_MAGIG);
+ assert(ckh->magic == CKH_MAGIC);
ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
@@ -383,7 +383,7 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
}
#ifdef JEMALLOC_DEBUG
- ckh->magic = CKH_MAGIG;
+ ckh->magic = CKH_MAGIC;
#endif
ret = false;
@@ -396,7 +396,7 @@ ckh_delete(ckh_t *ckh)
{
assert(ckh != NULL);
- assert(ckh->magic = CKH_MAGIG);
+ assert(ckh->magic == CKH_MAGIC);
#ifdef CKH_VERBOSE
malloc_printf(
@@ -421,7 +421,7 @@ ckh_count(ckh_t *ckh)
{
assert(ckh != NULL);
- assert(ckh->magic = CKH_MAGIG);
+ assert(ckh->magic == CKH_MAGIC);
return (ckh->count);
}
@@ -452,7 +452,7 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
bool ret;
assert(ckh != NULL);
- assert(ckh->magic = CKH_MAGIG);
+ assert(ckh->magic == CKH_MAGIC);
assert(ckh_search(ckh, key, NULL, NULL));
#ifdef CKH_COUNT
@@ -477,7 +477,7 @@ ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
size_t cell;
assert(ckh != NULL);
- assert(ckh->magic = CKH_MAGIG);
+ assert(ckh->magic == CKH_MAGIC);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
@@ -509,7 +509,7 @@ ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
size_t cell;
assert(ckh != NULL);
- assert(ckh->magic = CKH_MAGIG);
+ assert(ckh->magic == CKH_MAGIC);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
diff --git a/jemalloc/src/ctl.c b/jemalloc/src/ctl.c
index 3c8adab..0b8b06f 100644
--- a/jemalloc/src/ctl.c
+++ b/jemalloc/src/ctl.c
@@ -1137,6 +1137,11 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
/* Set new arena association. */
ARENA_SET(arena);
+ {
+ tcache_t *tcache = TCACHE_GET();
+ if (tcache != NULL)
+ tcache->arena = arena;
+ }
}
ret = 0;
diff --git a/jemalloc/src/huge.c b/jemalloc/src/huge.c
index 0aadc43..de09198 100644
--- a/jemalloc/src/huge.c
+++ b/jemalloc/src/huge.c
@@ -83,7 +83,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
* alignment, in order to assure the alignment can be achieved, then
* unmap leading and trailing chunks.
*/
- assert(alignment >= chunksize);
+ assert(alignment > chunksize);
chunk_size = CHUNK_CEILING(size);
@@ -192,7 +192,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
* different size class. In that case, fall back to allocating new
* space and copying.
*/
- if (alignment != 0)
+ if (alignment > chunksize)
ret = huge_palloc(size + extra, alignment, zero);
else
ret = huge_malloc(size + extra, zero);
@@ -201,7 +201,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
if (extra == 0)
return (NULL);
/* Try again, this time without extra. */
- if (alignment != 0)
+ if (alignment > chunksize)
ret = huge_palloc(size, alignment, zero);
else
ret = huge_malloc(size, zero);
diff --git a/jemalloc/src/jemalloc.c b/jemalloc/src/jemalloc.c
index 2aebc51..f5434c7 100644
--- a/jemalloc/src/jemalloc.c
+++ b/jemalloc/src/jemalloc.c
@@ -421,8 +421,8 @@ malloc_conf_init(void)
if ((opts = getenv(envname)) != NULL) {
/*
* Do nothing; opts is already initialized to
- * the value of the JEMALLOC_OPTIONS
- * environment variable.
+ * the value of the MALLOC_CONF environment
+ * variable.
*/
} else {
/* No configuration specified. */
diff --git a/jemalloc/src/prof.c b/jemalloc/src/prof.c
index 636ccce..3566c6d 100644
--- a/jemalloc/src/prof.c
+++ b/jemalloc/src/prof.c
@@ -432,6 +432,7 @@ prof_lookup(prof_bt_t *bt)
prof_ctx_t *p;
void *v;
} ctx;
+ bool new_ctx;
/*
* This thread's cache lacks bt. Look for it in the global
@@ -468,12 +469,14 @@ prof_lookup(prof_bt_t *bt)
idalloc(ctx.v);
return (NULL);
}
- }
- /*
- * Acquire ctx's lock before releasing bt2ctx_mtx, in order to
- * avoid a race condition with prof_ctx_destroy().
- */
- malloc_mutex_lock(&ctx.p->lock);
+ /*
+ * Artificially raise curobjs, in order to avoid a race
+ * condition with prof_ctx_merge()/prof_ctx_destroy().
+ */
+ ctx.p->cnt_merged.curobjs++;
+ new_ctx = true;
+ } else
+ new_ctx = false;
prof_leave();
/* Link a prof_thd_cnt_t into ctx for this thread. */
@@ -498,7 +501,11 @@ prof_lookup(prof_bt_t *bt)
/* Allocate and partially initialize a new cnt. */
ret.v = imalloc(sizeof(prof_thr_cnt_t));
if (ret.p == NULL) {
- malloc_mutex_unlock(&ctx.p->lock);
+ if (new_ctx) {
+ malloc_mutex_lock(&ctx.p->lock);
+ ctx.p->cnt_merged.curobjs--;
+ malloc_mutex_unlock(&ctx.p->lock);
+ }
return (NULL);
}
ql_elm_new(ret.p, cnts_link);
@@ -509,12 +516,19 @@ prof_lookup(prof_bt_t *bt)
ret.p->epoch = 0;
memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
if (ckh_insert(&prof_tdata->bt2cnt, btkey.v, ret.v)) {
- malloc_mutex_unlock(&ctx.p->lock);
+ if (new_ctx) {
+ malloc_mutex_lock(&ctx.p->lock);
+ ctx.p->cnt_merged.curobjs--;
+ malloc_mutex_unlock(&ctx.p->lock);
+ }
idalloc(ret.v);
return (NULL);
}
ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
+ malloc_mutex_lock(&ctx.p->lock);
ql_tail_insert(&ctx.p->cnts_ql, ret.p, cnts_link);
+ if (new_ctx)
+ ctx.p->cnt_merged.curobjs--;
malloc_mutex_unlock(&ctx.p->lock);
} else {
/* Move ret to the front of the LRU. */