summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2016-05-03 22:00:42 (GMT)
committerJason Evans <jasone@canonware.com>2016-05-04 00:19:15 (GMT)
commit90827a3f3ef2099dcd480d542aacc9f44a0787e8 (patch)
tree82a806605edd580bb9d68019b9d2ab1ab736a848
parent21cda0dc42bdcb1b5b6ecdb82157a0af84c9f0c4 (diff)
downloadjemalloc-90827a3f3ef2099dcd480d542aacc9f44a0787e8.zip
jemalloc-90827a3f3ef2099dcd480d542aacc9f44a0787e8.tar.gz
jemalloc-90827a3f3ef2099dcd480d542aacc9f44a0787e8.tar.bz2
Fix huge_palloc() regression.
Split arena_choose() into arena_[i]choose() and use arena_ichoose() for arena lookup during internal allocation. This fixes huge_palloc() so that it always succeeds during extent node allocation. This regression was introduced by 66cd953514a18477eb49732e40d5c2ab5f1b12c5 (Do not allocate metadata via non-auto arenas, nor tcaches.).
-rw-r--r--include/jemalloc/internal/jemalloc_internal.h.in24
-rw-r--r--include/jemalloc/internal/private_symbols.txt2
-rw-r--r--include/jemalloc/internal/tcache.h7
-rw-r--r--src/arena.c4
-rw-r--r--src/ckh.c6
-rw-r--r--src/ctl.c2
-rw-r--r--src/huge.c5
-rw-r--r--src/prof.c2
-rw-r--r--src/tcache.c10
9 files changed, 42 insertions, 20 deletions
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index fe58c1c..62d5da2 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -550,7 +550,9 @@ size_t s2u_compute(size_t size);
size_t s2u_lookup(size_t size);
size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment);
-arena_t *arena_choose(tsd_t *tsd, arena_t *arena, bool internal);
+arena_t *arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal);
+arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
+arena_t *arena_ichoose(tsd_t *tsd, arena_t *arena);
arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind,
bool refresh_if_missing);
arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing);
@@ -788,7 +790,7 @@ sa2u(size_t size, size_t alignment)
/* Choose an arena based on a per-thread value. */
JEMALLOC_INLINE arena_t *
-arena_choose(tsd_t *tsd, arena_t *arena, bool internal)
+arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal)
{
arena_t *ret;
@@ -802,6 +804,24 @@ arena_choose(tsd_t *tsd, arena_t *arena, bool internal)
return (ret);
}
+JEMALLOC_INLINE arena_t *
+arena_choose(tsd_t *tsd, arena_t *arena)
+{
+
+ return (arena_choose_impl(tsd, arena, false));
+}
+
+JEMALLOC_INLINE arena_t *
+arena_ichoose(tsd_t *tsd, arena_t *arena)
+{
+
+ assert(tsd != NULL || arena != NULL);
+
+ if (tsd != NULL)
+ return (arena_choose_impl(tsd, NULL, true));
+ return (arena);
+}
+
JEMALLOC_INLINE arena_tdata_t *
arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
{
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index de884fc..7958a4f 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -10,6 +10,7 @@ arena_bitselm_get_mutable
arena_boot
arena_choose
arena_choose_hard
+arena_choose_impl
arena_chunk_alloc_huge
arena_chunk_cache_maybe_insert
arena_chunk_cache_maybe_remove
@@ -35,6 +36,7 @@ arena_decay_time_set
arena_dss_prec_get
arena_dss_prec_set
arena_get
+arena_ichoose
arena_init
arena_lg_dirty_mult_default_get
arena_lg_dirty_mult_default_set
diff --git a/include/jemalloc/internal/tcache.h b/include/jemalloc/internal/tcache.h
index 8272430..59f6023 100644
--- a/include/jemalloc/internal/tcache.h
+++ b/include/jemalloc/internal/tcache.h
@@ -293,7 +293,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
assert(tcache_success == (ret != NULL));
if (unlikely(!tcache_success)) {
bool tcache_hard_success;
- arena = arena_choose(tsd, arena, false);
+ arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL))
return (NULL);
@@ -354,7 +354,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
* Only allocate one large object at a time, because it's quite
* expensive to create one and not use it.
*/
- arena = arena_choose(tsd, arena, false);
+ arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL))
return (NULL);
@@ -460,8 +460,7 @@ tcaches_get(tsd_t *tsd, unsigned ind)
{
tcaches_t *elm = &tcaches[ind];
if (unlikely(elm->tcache == NULL)) {
- elm->tcache = tcache_create(tsd, arena_choose(tsd, NULL,
- false));
+ elm->tcache = tcache_create(tsd, arena_choose(tsd, NULL));
}
return (elm->tcache);
}
diff --git a/src/arena.c b/src/arena.c
index 969ad85..45c53c1 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -2649,7 +2649,7 @@ arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
bool zero)
{
- arena = arena_choose(tsd, arena, false);
+ arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL))
return (NULL);
@@ -2674,7 +2674,7 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
assert(usize == PAGE_CEILING(usize));
- arena = arena_choose(tsd, arena, false);
+ arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL))
return (NULL);
diff --git a/src/ckh.c b/src/ckh.c
index aa9803e..2518597 100644
--- a/src/ckh.c
+++ b/src/ckh.c
@@ -271,7 +271,7 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
goto label_return;
}
tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL,
- true, arena_choose(tsd, NULL, true));
+ true, arena_ichoose(tsd, NULL));
if (tab == NULL) {
ret = true;
goto label_return;
@@ -315,7 +315,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
return;
tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
- arena_choose(tsd, NULL, true));
+ arena_ichoose(tsd, NULL));
if (tab == NULL) {
/*
* An OOM error isn't worth propagating, since it doesn't
@@ -392,7 +392,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
goto label_return;
}
ckh->tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
- arena_choose(tsd, NULL, true));
+ arena_ichoose(tsd, NULL));
if (ckh->tab == NULL) {
ret = true;
goto label_return;
diff --git a/src/ctl.c b/src/ctl.c
index 2e81143..e0392d0 100644
--- a/src/ctl.c
+++ b/src/ctl.c
@@ -1306,7 +1306,7 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
arena_t *oldarena;
unsigned newind, oldind;
- oldarena = arena_choose(tsd, NULL, false);
+ oldarena = arena_choose(tsd, NULL);
if (oldarena == NULL)
return (EAGAIN);
diff --git a/src/huge.c b/src/huge.c
index bac2425..0b3aed0 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -56,8 +56,9 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
assert(ausize >= chunksize);
/* Allocate an extent node with which to track the chunk. */
+ assert(tsd != NULL || arena != NULL);
node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
- CACHELINE, false, NULL, true, arena_choose(tsd, NULL, true));
+ CACHELINE, false, NULL, true, arena_ichoose(tsd, arena));
if (node == NULL)
return (NULL);
@@ -66,7 +67,7 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
- arena = arena_choose(tsd, arena, false);
+ arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsd, arena,
usize, alignment, &is_zeroed)) == NULL) {
idalloctm(tsd, node, NULL, true, true);
diff --git a/src/prof.c b/src/prof.c
index 92edba8..b21cd6b 100644
--- a/src/prof.c
+++ b/src/prof.c
@@ -795,7 +795,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
/* Link a prof_tctx_t into gctx for this thread. */
ret.v = iallocztm(tsd, sizeof(prof_tctx_t),
size2index(sizeof(prof_tctx_t)), false, NULL, true,
- arena_choose(tsd, NULL, true), true);
+ arena_ichoose(tsd, NULL), true);
if (ret.p == NULL) {
if (new_gctx)
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
diff --git a/src/tcache.c b/src/tcache.c
index ca867c7..88005f3 100644
--- a/src/tcache.c
+++ b/src/tcache.c
@@ -97,7 +97,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
assert(binind < NBINS);
assert(rem <= tbin->ncached);
- arena = arena_choose(tsd, NULL, false);
+ arena = arena_choose(tsd, NULL);
assert(arena != NULL);
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena bin associated with the first object. */
@@ -179,7 +179,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
assert(binind < nhbins);
assert(rem <= tbin->ncached);
- arena = arena_choose(tsd, NULL, false);
+ arena = arena_choose(tsd, NULL);
assert(arena != NULL);
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena associated with the first object. */
@@ -307,7 +307,7 @@ tcache_get_hard(tsd_t *tsd)
tcache_enabled_set(false); /* Memoize. */
return (NULL);
}
- arena = arena_choose(tsd, NULL, false);
+ arena = arena_choose(tsd, NULL);
if (unlikely(arena == NULL))
return (NULL);
return (tcache_create(tsd, arena));
@@ -359,7 +359,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
arena_t *arena;
unsigned i;
- arena = arena_choose(tsd, NULL, false);
+ arena = arena_choose(tsd, NULL);
tcache_arena_dissociate(tsd, tcache, arena);
for (i = 0; i < NBINS; i++) {
@@ -459,7 +459,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
return (true);
- arena = arena_choose(tsd, NULL, true);
+ arena = arena_ichoose(tsd, NULL);
if (unlikely(arena == NULL))
return (true);
tcache = tcache_create(tsd, arena);