diff options
author | Jason Evans <jasone@canonware.com> | 2015-02-16 02:04:46 (GMT) |
---|---|---|
committer | Jason Evans <jasone@canonware.com> | 2015-02-17 05:02:17 (GMT) |
commit | ee41ad409a43d12900a5a3108f6c14f84e4eb0eb (patch) | |
tree | ad3d3b0aa3b3a6c358f0f7155b76ec69a5d57697 /src/tcache.c | |
parent | 40ab8f98e42fda3816e2a993f136ec4770c202c7 (diff) | |
download | jemalloc-ee41ad409a43d12900a5a3108f6c14f84e4eb0eb.zip jemalloc-ee41ad409a43d12900a5a3108f6c14f84e4eb0eb.tar.gz jemalloc-ee41ad409a43d12900a5a3108f6c14f84e4eb0eb.tar.bz2 |
Integrate whole chunks into unused dirty page purging machinery.
Extend per arena unused dirty page purging to manage unused dirty chunks
in aaddtion to unused dirty runs. Rather than immediately unmapping
deallocated chunks (or purging them in the --disable-munmap case), store
them in a separate set of trees, chunks_[sz]ad_dirty. Preferrentially
allocate dirty chunks. When excessive unused dirty pages accumulate,
purge runs and chunks in ingegrated LRU order (and unmap chunks in the
--enable-munmap case).
Refactor extent_node_t to provide accessor functions.
Diffstat (limited to 'src/tcache.c')
-rw-r--r-- | src/tcache.c | 9 |
1 files changed, 5 insertions, 4 deletions
diff --git a/src/tcache.c b/src/tcache.c index 318e0dc..8d0a6fa 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -103,7 +103,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, /* Lock the arena bin associated with the first object. */ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( tbin->avail[0]); - arena_t *bin_arena = chunk->node.arena; + arena_t *bin_arena = extent_node_arena_get(&chunk->node); arena_bin_t *bin = &bin_arena->bins[binind]; if (config_prof && bin_arena == arena) { @@ -125,7 +125,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, ptr = tbin->avail[i]; assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk->node.arena == bin_arena) { + if (extent_node_arena_get(&chunk->node) == bin_arena) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; arena_chunk_map_bits_t *bitselm = @@ -183,7 +183,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind, /* Lock the arena associated with the first object. */ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( tbin->avail[0]); - arena_t *locked_arena = chunk->node.arena; + arena_t *locked_arena = extent_node_arena_get(&chunk->node); UNUSED bool idump; if (config_prof) @@ -209,7 +209,8 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind, ptr = tbin->avail[i]; assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk->node.arena == locked_arena) { + if (extent_node_arena_get(&chunk->node) == + locked_arena) { arena_dalloc_large_junked_locked(locked_arena, chunk, ptr); } else { |