summaryrefslogtreecommitdiffstats
path: root/src/arena.c
diff options
context:
space:
mode:
authorJason Evans <je@fb.com>2012-11-13 20:56:27 (GMT)
committerJason Evans <je@fb.com>2012-11-13 21:47:53 (GMT)
commita3b3386ddde8048b9d6b54c397bb93da5e806cef (patch)
tree3c5e10d49e9eb914af8e1edcab69fbda1f8d4d0c /src/arena.c
parent556ddc7fa94f13c388ec6c9d2d54ace250540f2c (diff)
downloadjemalloc-a3b3386ddde8048b9d6b54c397bb93da5e806cef.zip
jemalloc-a3b3386ddde8048b9d6b54c397bb93da5e806cef.tar.gz
jemalloc-a3b3386ddde8048b9d6b54c397bb93da5e806cef.tar.bz2
Avoid arena_prof_accum()-related locking when possible.
Refactor arena_prof_accum() and its callers to avoid arena locking when prof_interval is 0 (as when profiling is disabled). Reported by Ben Maurer.
Diffstat (limited to 'src/arena.c')
-rw-r--r--src/arena.c27
1 files changed, 3 insertions, 24 deletions
diff --git a/src/arena.c b/src/arena.c
index 0c53b07..f9406c7 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -1322,21 +1322,6 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
}
void
-arena_prof_accum(arena_t *arena, uint64_t accumbytes)
-{
-
- cassert(config_prof);
-
- if (config_prof && prof_interval != 0) {
- arena->prof_accumbytes += accumbytes;
- if (arena->prof_accumbytes >= prof_interval) {
- prof_idump();
- arena->prof_accumbytes -= prof_interval;
- }
- }
-}
-
-void
arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
uint64_t prof_accumbytes)
{
@@ -1347,11 +1332,8 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
assert(tbin->ncached == 0);
- if (config_prof) {
- malloc_mutex_lock(&arena->lock);
+ if (config_prof)
arena_prof_accum(arena, prof_accumbytes);
- malloc_mutex_unlock(&arena->lock);
- }
bin = &arena->bins[binind];
malloc_mutex_lock(&bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
@@ -1459,11 +1441,8 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
bin->stats.nrequests++;
}
malloc_mutex_unlock(&bin->lock);
- if (config_prof && isthreaded == false) {
- malloc_mutex_lock(&arena->lock);
+ if (config_prof && isthreaded == false)
arena_prof_accum(arena, size);
- malloc_mutex_unlock(&arena->lock);
- }
if (zero == false) {
if (config_fill) {
@@ -1507,7 +1486,7 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
}
if (config_prof)
- arena_prof_accum(arena, size);
+ arena_prof_accum_locked(arena, size);
malloc_mutex_unlock(&arena->lock);
if (zero == false) {