summaryrefslogtreecommitdiffstats
path: root/src/tcache.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/tcache.c')
-rw-r--r--src/tcache.c53
1 files changed, 12 insertions, 41 deletions
diff --git a/src/tcache.c b/src/tcache.c
index 1bf7026..34224ec 100644
--- a/src/tcache.c
+++ b/src/tcache.c
@@ -117,8 +117,8 @@ tcache_bin_flush_small(tcache_bin_t *tbin, index_t binind, unsigned rem,
(uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_bits_t *bitselm =
arena_bitselm_get(chunk, pageind);
- arena_dalloc_bin_locked(arena, chunk, ptr,
- bitselm);
+ arena_dalloc_bin_junked_locked(arena, chunk,
+ ptr, bitselm);
} else {
/*
* This object was allocated via a different
@@ -193,9 +193,10 @@ tcache_bin_flush_large(tcache_bin_t *tbin, index_t binind, unsigned rem,
ptr = tbin->avail[i];
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk->arena == arena)
- arena_dalloc_large_locked(arena, chunk, ptr);
- else {
+ if (chunk->arena == arena) {
+ arena_dalloc_large_junked_locked(arena, chunk,
+ ptr);
+ } else {
/*
* This object was allocated via a different
* arena than the one that is currently locked.
@@ -279,11 +280,11 @@ tcache_get_hard(tsd_t *tsd)
arena = arena_choose(tsd, NULL);
if (unlikely(arena == NULL))
return (NULL);
- return (tcache_create(arena));
+ return (tcache_create(tsd, arena));
}
tcache_t *
-tcache_create(arena_t *arena)
+tcache_create(tsd_t *tsd, arena_t *arena)
{
tcache_t *tcache;
size_t size, stack_offset;
@@ -294,23 +295,10 @@ tcache_create(arena_t *arena)
size = PTR_CEILING(size);
stack_offset = size;
size += stack_nelms * sizeof(void *);
- /*
- * Round up to the nearest multiple of the cacheline size, in order to
- * avoid the possibility of false cacheline sharing.
- *
- * That this works relies on the same logic as in ipalloc(), but we
- * cannot directly call ipalloc() here due to tcache bootstrapping
- * issues.
- */
- size = (size + CACHELINE_MASK) & (-CACHELINE);
-
- if (size <= SMALL_MAXCLASS)
- tcache = (tcache_t *)arena_malloc_small(arena, size, true);
- else if (size <= tcache_maxclass)
- tcache = (tcache_t *)arena_malloc_large(arena, size, true);
- else
- tcache = (tcache_t *)icalloct(NULL, size, false, arena);
+ /* Avoid false cacheline sharing. */
+ size = sa2u(size, CACHELINE);
+ tcache = ipalloct(tsd, size, CACHELINE, true, false, arena);
if (tcache == NULL)
return (NULL);
@@ -331,7 +319,6 @@ static void
tcache_destroy(tsd_t *tsd, tcache_t *tcache)
{
unsigned i;
- size_t tcache_size;
tcache_arena_dissociate(tcache);
@@ -366,23 +353,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
prof_idump();
- tcache_size = arena_salloc(tcache, false);
- if (tcache_size <= SMALL_MAXCLASS) {
- arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
- arena_t *arena = chunk->arena;
- size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
- LG_PAGE;
- arena_chunk_map_bits_t *bitselm = arena_bitselm_get(chunk,
- pageind);
-
- arena_dalloc_bin(arena, chunk, tcache, pageind, bitselm);
- } else if (tcache_size <= tcache_maxclass) {
- arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
- arena_t *arena = chunk->arena;
-
- arena_dalloc_large(arena, chunk, tcache);
- } else
- idalloct(tsd, tcache, false);
+ idalloct(tsd, tcache, false);
}
void