diff options
author | Jason Evans <jasone@canonware.com> | 2016-06-01 18:35:30 (GMT) |
---|---|---|
committer | Jason Evans <jasone@canonware.com> | 2016-06-06 03:42:23 (GMT) |
commit | c9a76481d8e411e52240a4e4313dbbfa99801073 (patch) | |
tree | 660f60cb2b699c808b43a302df061dc8ca29745d | |
parent | 127026ad989c06feda12371e584b4af4dffaf2db (diff) | |
download | jemalloc-c9a76481d8e411e52240a4e4313dbbfa99801073.zip jemalloc-c9a76481d8e411e52240a4e4313dbbfa99801073.tar.gz jemalloc-c9a76481d8e411e52240a4e4313dbbfa99801073.tar.bz2 |
Rename chunks_{cached,retained,mtx} to extents_{cached,retained,mtx}.
-rw-r--r-- | include/jemalloc/internal/arena.h | 16 | ||||
-rw-r--r-- | include/jemalloc/internal/witness.h | 2 | ||||
-rw-r--r-- | src/arena.c | 19 | ||||
-rw-r--r-- | src/chunk.c | 32 |
4 files changed, 35 insertions, 34 deletions
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h index f60b9d6..0707b86 100644 --- a/include/jemalloc/internal/arena.h +++ b/include/jemalloc/internal/arena.h @@ -235,20 +235,20 @@ struct arena_s { malloc_mutex_t large_mtx; /* - * Heaps of chunks that were previously allocated. These are used when - * allocating chunks, in an attempt to re-use address space. + * Heaps of extents that were previously allocated. These are used when + * allocating extents, in an attempt to re-use address space. */ - extent_heap_t chunks_cached[NPSIZES]; - extent_heap_t chunks_retained[NPSIZES]; + extent_heap_t extents_cached[NPSIZES]; + extent_heap_t extents_retained[NPSIZES]; + /* User-configurable extent hook functions. */ + extent_hooks_t extent_hooks; + /* Protects extents_cached, extents_retained, and extent_hooks. */ + malloc_mutex_t extents_mtx; - malloc_mutex_t chunks_mtx; /* Cache of extent structures that were allocated via base_alloc(). */ ql_head(extent_t) extent_cache; malloc_mutex_t extent_cache_mtx; - /* User-configurable extent hook functions. */ - extent_hooks_t extent_hooks; - /* bins is used to store heaps of free regions. */ arena_bin_t bins[NBINS]; }; diff --git a/include/jemalloc/internal/witness.h b/include/jemalloc/internal/witness.h index 8c56c21..e2f8563 100644 --- a/include/jemalloc/internal/witness.h +++ b/include/jemalloc/internal/witness.h @@ -24,7 +24,7 @@ typedef int witness_comp_t (const witness_t *, void *, const witness_t *, #define WITNESS_RANK_PROF_GCTX 7U #define WITNESS_RANK_ARENA 8U -#define WITNESS_RANK_ARENA_CHUNKS 9U +#define WITNESS_RANK_ARENA_EXTENTS 9U #define WITNESS_RANK_ARENA_EXTENT_CACHE 10 #define WITNESS_RANK_RTREE_ELM 11U diff --git a/src/arena.c b/src/arena.c index 9a8c2e2..de6605a 100644 --- a/src/arena.c +++ b/src/arena.c @@ -1793,20 +1793,21 @@ arena_new(tsdn_t *tsdn, unsigned ind) return (NULL); for (i = 0; i < NPSIZES; i++) { - extent_heap_new(&arena->chunks_cached[i]); - extent_heap_new(&arena->chunks_retained[i]); + extent_heap_new(&arena->extents_cached[i]); + extent_heap_new(&arena->extents_retained[i]); } - if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks", - WITNESS_RANK_ARENA_CHUNKS)) + arena->extent_hooks = extent_hooks_default; + + if (malloc_mutex_init(&arena->extents_mtx, "arena_extents", + WITNESS_RANK_ARENA_EXTENTS)) return (NULL); + ql_new(&arena->extent_cache); if (malloc_mutex_init(&arena->extent_cache_mtx, "arena_extent_cache", WITNESS_RANK_ARENA_EXTENT_CACHE)) return (NULL); - arena->extent_hooks = extent_hooks_default; - /* Initialize bins. */ for (i = 0; i < NBINS; i++) { arena_bin_t *bin = &arena->bins[i]; @@ -1843,7 +1844,7 @@ void arena_prefork1(tsdn_t *tsdn, arena_t *arena) { - malloc_mutex_prefork(tsdn, &arena->chunks_mtx); + malloc_mutex_prefork(tsdn, &arena->extents_mtx); } void @@ -1872,7 +1873,7 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) for (i = 0; i < NBINS; i++) malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock); malloc_mutex_postfork_parent(tsdn, &arena->extent_cache_mtx); - malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx); + malloc_mutex_postfork_parent(tsdn, &arena->extents_mtx); malloc_mutex_postfork_parent(tsdn, &arena->lock); } @@ -1885,6 +1886,6 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) for (i = 0; i < NBINS; i++) malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock); malloc_mutex_postfork_child(tsdn, &arena->extent_cache_mtx); - malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx); + malloc_mutex_postfork_child(tsdn, &arena->extents_mtx); malloc_mutex_postfork_child(tsdn, &arena->lock); } diff --git a/src/chunk.c b/src/chunk.c index 78f08d4..2ac44b0 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -83,9 +83,9 @@ extent_hooks_get(tsdn_t *tsdn, arena_t *arena) { extent_hooks_t extent_hooks; - malloc_mutex_lock(tsdn, &arena->chunks_mtx); + malloc_mutex_lock(tsdn, &arena->extents_mtx); extent_hooks = extent_hooks_get_locked(arena); - malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->extents_mtx); return (extent_hooks); } @@ -96,7 +96,7 @@ extent_hooks_set(tsdn_t *tsdn, arena_t *arena, { extent_hooks_t old_extent_hooks; - malloc_mutex_lock(tsdn, &arena->chunks_mtx); + malloc_mutex_lock(tsdn, &arena->extents_mtx); old_extent_hooks = arena->extent_hooks; /* * Copy each field atomically so that it is impossible for readers to @@ -121,7 +121,7 @@ extent_hooks_set(tsdn_t *tsdn, arena_t *arena, ATOMIC_COPY_HOOK(split); ATOMIC_COPY_HOOK(merge); #undef ATOMIC_COPY_HOOK - malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->extents_mtx); return (old_extent_hooks); } @@ -330,7 +330,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks, /* Beware size_t wrap-around. */ if (alloc_size < usize) return (NULL); - malloc_mutex_lock(tsdn, &arena->chunks_mtx); + malloc_mutex_lock(tsdn, &arena->extents_mtx); extent_hooks_assure_initialized_locked(tsdn, arena, extent_hooks); if (new_addr != NULL) { rtree_elm_t *elm; @@ -350,7 +350,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks, extent = chunk_first_best_fit(arena, extent_heaps, alloc_size); if (extent == NULL || (new_addr != NULL && extent_size_get(extent) < size)) { - malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->extents_mtx); return (NULL); } extent_heaps_remove(extent_heaps, extent); @@ -373,7 +373,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks, leadsize, leadsize, size + trailsize, usize + trailsize); if (extent == NULL) { chunk_leak(tsdn, arena, extent_hooks, cache, lead); - malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->extents_mtx); return (NULL); } extent_heaps_insert(extent_heaps, lead); @@ -386,7 +386,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks, extent, size, usize, trailsize, trailsize); if (trail == NULL) { chunk_leak(tsdn, arena, extent_hooks, cache, extent); - malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->extents_mtx); return (NULL); } extent_heaps_insert(extent_heaps, trail); @@ -402,7 +402,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks, if (!extent_committed_get(extent) && extent_hooks->commit(extent_base_get(extent), extent_size_get(extent), 0, extent_size_get(extent), arena->ind)) { - malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->extents_mtx); chunk_record(tsdn, arena, extent_hooks, extent_heaps, cache, extent); return (NULL); @@ -416,7 +416,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks, chunk_interior_register(tsdn, extent); } - malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->extents_mtx); if (*zero) { if (!extent_zeroed_get(extent)) { @@ -480,7 +480,7 @@ chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks, assert(alignment != 0); commit = true; - extent = chunk_recycle(tsdn, arena, extent_hooks, arena->chunks_cached, + extent = chunk_recycle(tsdn, arena, extent_hooks, arena->extents_cached, true, new_addr, usize, pad, alignment, zero, &commit, slab); if (extent == NULL) return (NULL); @@ -531,7 +531,7 @@ chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks, assert(alignment != 0); extent = chunk_recycle(tsdn, arena, extent_hooks, - arena->chunks_retained, false, new_addr, usize, pad, alignment, + arena->extents_retained, false, new_addr, usize, pad, alignment, zero, commit, slab); if (extent != NULL && config_stats) { size_t size = usize + pad; @@ -640,7 +640,7 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks, assert(!cache || !extent_zeroed_get(extent)); - malloc_mutex_lock(tsdn, &arena->chunks_mtx); + malloc_mutex_lock(tsdn, &arena->extents_mtx); extent_hooks_assure_initialized_locked(tsdn, arena, extent_hooks); extent_usize_set(extent, 0); @@ -671,7 +671,7 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks, extent_heaps, cache); } - malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->extents_mtx); } void @@ -685,7 +685,7 @@ chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks, extent_addr_set(extent, extent_base_get(extent)); extent_zeroed_set(extent, false); - chunk_record(tsdn, arena, extent_hooks, arena->chunks_cached, true, + chunk_record(tsdn, arena, extent_hooks, arena->extents_cached, true, extent); } @@ -732,7 +732,7 @@ chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks, if (config_stats) arena->stats.retained += extent_size_get(extent); - chunk_record(tsdn, arena, extent_hooks, arena->chunks_retained, false, + chunk_record(tsdn, arena, extent_hooks, arena->extents_retained, false, extent); } |