summaryrefslogtreecommitdiffstats
path: root/src/base.c
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2015-02-16 02:04:46 (GMT)
committerJason Evans <jasone@canonware.com>2015-02-17 05:02:17 (GMT)
commitee41ad409a43d12900a5a3108f6c14f84e4eb0eb (patch)
treead3d3b0aa3b3a6c358f0f7155b76ec69a5d57697 /src/base.c
parent40ab8f98e42fda3816e2a993f136ec4770c202c7 (diff)
downloadjemalloc-ee41ad409a43d12900a5a3108f6c14f84e4eb0eb.zip
jemalloc-ee41ad409a43d12900a5a3108f6c14f84e4eb0eb.tar.gz
jemalloc-ee41ad409a43d12900a5a3108f6c14f84e4eb0eb.tar.bz2
Integrate whole chunks into unused dirty page purging machinery.
Extend per arena unused dirty page purging to manage unused dirty chunks in aaddtion to unused dirty runs. Rather than immediately unmapping deallocated chunks (or purging them in the --disable-munmap case), store them in a separate set of trees, chunks_[sz]ad_dirty. Preferrentially allocate dirty chunks. When excessive unused dirty pages accumulate, purge runs and chunks in ingegrated LRU order (and unmap chunks in the --enable-munmap case). Refactor extent_node_t to provide accessor functions.
Diffstat (limited to 'src/base.c')
-rw-r--r--src/base.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/src/base.c b/src/base.c
index 7b5804e..819fa02 100644
--- a/src/base.c
+++ b/src/base.c
@@ -60,8 +60,8 @@ base_chunk_alloc(size_t minsize)
if (config_stats)
base_allocated += nsize;
}
- node->addr = addr;
- node->size = csize;
+ extent_node_addr_set(node, addr);
+ extent_node_size_set(node, csize);
return (node);
}
@@ -84,8 +84,8 @@ base_alloc(size_t size)
*/
csize = CACHELINE_CEILING(size);
- key.addr = NULL;
- key.size = csize;
+ extent_node_addr_set(&key, NULL);
+ extent_node_size_set(&key, csize);
malloc_mutex_lock(&base_mtx);
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
if (node != NULL) {
@@ -100,10 +100,10 @@ base_alloc(size_t size)
goto label_return;
}
- ret = node->addr;
- if (node->size > csize) {
- node->addr = (void *)((uintptr_t)ret + csize);
- node->size -= csize;
+ ret = extent_node_addr_get(node);
+ if (extent_node_size_get(node) > csize) {
+ extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
+ extent_node_size_set(node, extent_node_size_get(node) - csize);
extent_tree_szad_insert(&base_avail_szad, node);
} else
base_node_dalloc(node);