diff options
author | Jason Evans <jasone@canonware.com> | 2016-03-24 03:29:33 (GMT) |
---|---|---|
committer | Jason Evans <jasone@canonware.com> | 2016-06-03 19:27:41 (GMT) |
commit | db72272bef91fa1b4709e89168aede0f01206d55 (patch) | |
tree | 75d99bde1468ee4fb3bcbfd57752d16afb953ec1 /src/tcache.c | |
parent | 2d2b4e98c947f9fcaf4a9fd2215b685057e89212 (diff) | |
download | jemalloc-db72272bef91fa1b4709e89168aede0f01206d55.zip jemalloc-db72272bef91fa1b4709e89168aede0f01206d55.tar.gz jemalloc-db72272bef91fa1b4709e89168aede0f01206d55.tar.bz2 |
Use rtree-based chunk lookups rather than pointer bit twiddling.
Look up chunk metadata via the radix tree, rather than using
CHUNK_ADDR2BASE().
Propagate pointer's containing extent.
Minimize extent lookups by doing a single lookup (e.g. in free()) and
propagating the pointer's extent into nearly all the functions that may
need it.
Diffstat (limited to 'src/tcache.c')
-rw-r--r-- | src/tcache.c | 31 |
1 files changed, 17 insertions, 14 deletions
diff --git a/src/tcache.c b/src/tcache.c index c4a9900..c02f0f0 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -27,7 +27,7 @@ size_t tcache_salloc(tsdn_t *tsdn, const void *ptr) { - return (arena_salloc(tsdn, ptr, false)); + return (arena_salloc(tsdn, iealloc(ptr), ptr, false)); } void @@ -101,9 +101,8 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, assert(arena != NULL); for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { /* Lock the arena bin associated with the first object. */ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( - *(tbin->avail - 1)); - arena_t *bin_arena = extent_arena_get(&chunk->extent); + extent_t *extent = iealloc(*(tbin->avail - 1)); + arena_t *bin_arena = extent_arena_get(extent); arena_bin_t *bin = &bin_arena->bins[binind]; if (config_prof && bin_arena == arena) { @@ -125,14 +124,17 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, for (i = 0; i < nflush; i++) { ptr = *(tbin->avail - 1 - i); assert(ptr != NULL); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (extent_arena_get(&chunk->extent) == bin_arena) { + + extent = iealloc(ptr); + if (extent_arena_get(extent) == bin_arena) { + arena_chunk_t *chunk = + (arena_chunk_t *)extent_addr_get(extent); size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; arena_chunk_map_bits_t *bitselm = arena_bitselm_get_mutable(chunk, pageind); arena_dalloc_bin_junked_locked(tsd_tsdn(tsd), - bin_arena, chunk, ptr, bitselm); + bin_arena, chunk, extent, ptr, bitselm); } else { /* * This object was allocated via a different @@ -183,9 +185,8 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, assert(arena != NULL); for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { /* Lock the arena associated with the first object. */ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( - *(tbin->avail - 1)); - arena_t *locked_arena = extent_arena_get(&chunk->extent); + extent_t *extent = iealloc(*(tbin->avail - 1)); + arena_t *locked_arena = extent_arena_get(extent); UNUSED bool idump; if (config_prof) @@ -210,10 +211,12 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, for (i = 0; i < nflush; i++) { ptr = *(tbin->avail - 1 - i); assert(ptr != NULL); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (extent_arena_get(&chunk->extent) == locked_arena) { + extent = iealloc(ptr); + if (extent_arena_get(extent) == locked_arena) { + arena_chunk_t *chunk = + (arena_chunk_t *)extent_addr_get(extent); arena_dalloc_large_junked_locked(tsd_tsdn(tsd), - locked_arena, chunk, ptr); + locked_arena, chunk, extent, ptr); } else { /* * This object was allocated via a different @@ -391,7 +394,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache) arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes)) prof_idump(tsd_tsdn(tsd)); - idalloctm(tsd_tsdn(tsd), tcache, NULL, true, true); + idalloctm(tsd_tsdn(tsd), iealloc(tcache), tcache, NULL, true, true); } void |