diff options
author | Qi Wang <interwq@gwu.edu> | 2017-07-31 21:35:33 (GMT) |
---|---|---|
committer | Qi Wang <interwq@gmail.com> | 2017-07-31 22:47:48 (GMT) |
commit | 1ab2ab294c8f29a6f314f3ff30fbf4cdb2f01af6 (patch) | |
tree | 80179b00ac6fa2338feb2d917daa9b78b8475a99 /src | |
parent | 9a39b23c9c823e8157e2e6850014fa67c09f9351 (diff) | |
download | jemalloc-1ab2ab294c8f29a6f314f3ff30fbf4cdb2f01af6.zip jemalloc-1ab2ab294c8f29a6f314f3ff30fbf4cdb2f01af6.tar.gz jemalloc-1ab2ab294c8f29a6f314f3ff30fbf4cdb2f01af6.tar.bz2 |
Only read szind if ptr is not paged aligned in sdallocx.
If ptr is not page aligned, we know the allocation was not sampled. In this case
use the size passed into sdallocx directly w/o accessing rtree. This improve
sdallocx efficiency in the common case (not sampled && small allocation).
Diffstat (limited to 'src')
-rw-r--r-- | src/jemalloc.c | 24 |
1 files changed, 22 insertions, 2 deletions
diff --git a/src/jemalloc.c b/src/jemalloc.c index ed47052..4c73ba4 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -2194,17 +2194,37 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { assert(malloc_initialized() || IS_INITIALIZER); alloc_ctx_t alloc_ctx, *ctx; - if (config_prof && opt_prof) { + if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) { + /* + * When cache_oblivious is disabled and ptr is not page aligned, + * the allocation was not sampled -- usize can be used to + * determine szind directly. + */ + alloc_ctx.szind = sz_size2index(usize); + alloc_ctx.slab = true; + ctx = &alloc_ctx; + if (config_debug) { + alloc_ctx_t dbg_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, + rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind, + &dbg_ctx.slab); + assert(dbg_ctx.szind == alloc_ctx.szind); + assert(dbg_ctx.slab == alloc_ctx.slab); + } + } else if (config_prof && opt_prof) { rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); assert(alloc_ctx.szind == sz_size2index(usize)); ctx = &alloc_ctx; - prof_free(tsd, ptr, usize, ctx); } else { ctx = NULL; } + if (config_prof && opt_prof) { + prof_free(tsd, ptr, usize, ctx); + } if (config_stats) { *tsd_thread_deallocatedp_get(tsd) += usize; } |