summaryrefslogtreecommitdiffstats
path: root/jemalloc
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2010-01-25 01:13:07 (GMT)
committerJason Evans <jasone@canonware.com>2010-01-25 01:13:07 (GMT)
commit41631d00618d7262125e501c91d31b4d70e605fa (patch)
treecb90050fa903c2a7a15159781662efb8c91340da /jemalloc
parentbc25a47ee0e2ac8e10a94d5fa070f0dbbdeb7e7e (diff)
downloadjemalloc-41631d00618d7262125e501c91d31b4d70e605fa.zip
jemalloc-41631d00618d7262125e501c91d31b4d70e605fa.tar.gz
jemalloc-41631d00618d7262125e501c91d31b4d70e605fa.tar.bz2
Modify chunk_alloc() to support optional zeroing.
Use optional zeroing in arena_chunk_alloc() to avoid needless zeroing of chunks. This is particularly important in the context of swapfile and DSS allocation, since a long-lived application may commonly recycle chunks.
Diffstat (limited to 'jemalloc')
-rw-r--r--jemalloc/src/internal/jemalloc_chunk.h2
-rw-r--r--jemalloc/src/internal/jemalloc_chunk_dss.h2
-rw-r--r--jemalloc/src/internal/jemalloc_chunk_swap.h2
-rw-r--r--jemalloc/src/jemalloc_arena.c19
-rw-r--r--jemalloc/src/jemalloc_base.c4
-rw-r--r--jemalloc/src/jemalloc_chunk.c12
-rw-r--r--jemalloc/src/jemalloc_chunk_dss.c9
-rw-r--r--jemalloc/src/jemalloc_chunk_swap.c12
-rw-r--r--jemalloc/src/jemalloc_huge.c6
9 files changed, 44 insertions, 24 deletions
diff --git a/jemalloc/src/internal/jemalloc_chunk.h b/jemalloc/src/internal/jemalloc_chunk.h
index 7440168..13b72ed 100644
--- a/jemalloc/src/internal/jemalloc_chunk.h
+++ b/jemalloc/src/internal/jemalloc_chunk.h
@@ -43,7 +43,7 @@ extern size_t chunk_npages;
extern size_t arena_chunk_header_npages;
extern size_t arena_maxclass; /* Max size class for arenas. */
-void *chunk_alloc(size_t size, bool zero);
+void *chunk_alloc(size_t size, bool *zero);
void chunk_dealloc(void *chunk, size_t size);
bool chunk_boot(void);
diff --git a/jemalloc/src/internal/jemalloc_chunk_dss.h b/jemalloc/src/internal/jemalloc_chunk_dss.h
index dc7b38e..6be4ad1 100644
--- a/jemalloc/src/internal/jemalloc_chunk_dss.h
+++ b/jemalloc/src/internal/jemalloc_chunk_dss.h
@@ -16,7 +16,7 @@
*/
extern malloc_mutex_t dss_mtx;
-void *chunk_alloc_dss(size_t size, bool zero);
+void *chunk_alloc_dss(size_t size, bool *zero);
bool chunk_dealloc_dss(void *chunk, size_t size);
bool chunk_dss_boot(void);
diff --git a/jemalloc/src/internal/jemalloc_chunk_swap.h b/jemalloc/src/internal/jemalloc_chunk_swap.h
index 3d5c5d2..5bdf307 100644
--- a/jemalloc/src/internal/jemalloc_chunk_swap.h
+++ b/jemalloc/src/internal/jemalloc_chunk_swap.h
@@ -16,7 +16,7 @@ extern bool swap_enabled;
extern size_t swap_avail;
#endif
-void *chunk_alloc_swap(size_t size, bool zero);
+void *chunk_alloc_swap(size_t size, bool *zero);
bool chunk_dealloc_swap(void *chunk, size_t size);
bool chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed);
bool chunk_swap_boot(void);
diff --git a/jemalloc/src/jemalloc_arena.c b/jemalloc/src/jemalloc_arena.c
index 3dbf47d..e8b21d7 100644
--- a/jemalloc/src/jemalloc_arena.c
+++ b/jemalloc/src/jemalloc_arena.c
@@ -586,7 +586,11 @@ arena_chunk_alloc(arena_t *arena)
chunk = arena->spare;
arena->spare = NULL;
} else {
- chunk = (arena_chunk_t *)chunk_alloc(chunksize, true);
+ bool zero;
+ size_t zeroed;
+
+ zero = false;
+ chunk = (arena_chunk_t *)chunk_alloc(chunksize, &zero);
if (chunk == NULL)
return (NULL);
#ifdef JEMALLOC_STATS
@@ -604,15 +608,16 @@ arena_chunk_alloc(arena_t *arena)
/*
* Initialize the map to contain one maximal free untouched run.
+ * Mark the pages as zeroed iff chunk_alloc() returned a zeroed
+ * chunk.
*/
+ zeroed = zero ? CHUNK_MAP_ZEROED : 0;
for (i = 0; i < arena_chunk_header_npages; i++)
chunk->map[i].bits = 0;
- chunk->map[i].bits = arena_maxclass | CHUNK_MAP_ZEROED;
- for (i++; i < chunk_npages-1; i++) {
- chunk->map[i].bits = CHUNK_MAP_ZEROED;
- }
- chunk->map[chunk_npages-1].bits = arena_maxclass |
- CHUNK_MAP_ZEROED;
+ chunk->map[i].bits = arena_maxclass | zeroed;
+ for (i++; i < chunk_npages-1; i++)
+ chunk->map[i].bits = zeroed;
+ chunk->map[chunk_npages-1].bits = arena_maxclass | zeroed;
}
/* Insert the run into the runs_avail tree. */
diff --git a/jemalloc/src/jemalloc_base.c b/jemalloc/src/jemalloc_base.c
index bb88f7c..607f94a 100644
--- a/jemalloc/src/jemalloc_base.c
+++ b/jemalloc/src/jemalloc_base.c
@@ -27,10 +27,12 @@ static bool
base_pages_alloc(size_t minsize)
{
size_t csize;
+ bool zero;
assert(minsize != 0);
csize = CHUNK_CEILING(minsize);
- base_pages = chunk_alloc(csize, false);
+ zero = false;
+ base_pages = chunk_alloc(csize, &zero);
if (base_pages == NULL)
return (true);
base_next_addr = base_pages;
diff --git a/jemalloc/src/jemalloc_chunk.c b/jemalloc/src/jemalloc_chunk.c
index 2da85a6..c42151d 100644
--- a/jemalloc/src/jemalloc_chunk.c
+++ b/jemalloc/src/jemalloc_chunk.c
@@ -22,8 +22,14 @@ size_t arena_maxclass; /* Max size class for arenas. */
/******************************************************************************/
+/*
+ * If the caller specifies (*zero == false), it is still possible to receive
+ * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
+ * takes advantage of this to avoid demanding zeroed chunks, but taking
+ * advantage of them if they are returned.
+ */
void *
-chunk_alloc(size_t size, bool zero)
+chunk_alloc(size_t size, bool *zero)
{
void *ret;
@@ -45,8 +51,10 @@ chunk_alloc(size_t size, bool zero)
goto RETURN;
#endif
ret = chunk_alloc_mmap(size);
- if (ret != NULL)
+ if (ret != NULL) {
+ *zero = true;
goto RETURN;
+ }
#ifdef JEMALLOC_SWAP
}
#endif
diff --git a/jemalloc/src/jemalloc_chunk_dss.c b/jemalloc/src/jemalloc_chunk_dss.c
index 4a4bb5f..7c6cdd6 100644
--- a/jemalloc/src/jemalloc_chunk_dss.c
+++ b/jemalloc/src/jemalloc_chunk_dss.c
@@ -25,13 +25,13 @@ static extent_tree_t dss_chunks_ad;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-static void *chunk_recycle_dss(size_t size, bool zero);
+static void *chunk_recycle_dss(size_t size, bool *zero);
static extent_node_t *chunk_dealloc_dss_record(void *chunk, size_t size);
/******************************************************************************/
static void *
-chunk_recycle_dss(size_t size, bool zero)
+chunk_recycle_dss(size_t size, bool *zero)
{
extent_node_t *node, key;
@@ -60,7 +60,7 @@ chunk_recycle_dss(size_t size, bool zero)
}
malloc_mutex_unlock(&dss_mtx);
- if (zero)
+ if (*zero)
memset(ret, 0, size);
return (ret);
}
@@ -70,7 +70,7 @@ chunk_recycle_dss(size_t size, bool zero)
}
void *
-chunk_alloc_dss(size_t size, bool zero)
+chunk_alloc_dss(size_t size, bool *zero)
{
void *ret;
@@ -116,6 +116,7 @@ chunk_alloc_dss(size_t size, bool zero)
/* Success. */
dss_max = (void *)((intptr_t)dss_prev + incr);
malloc_mutex_unlock(&dss_mtx);
+ *zero = true;
return (ret);
}
} while (dss_prev != (void *)-1);
diff --git a/jemalloc/src/jemalloc_chunk_swap.c b/jemalloc/src/jemalloc_chunk_swap.c
index a0cb40b..0a30471 100644
--- a/jemalloc/src/jemalloc_chunk_swap.c
+++ b/jemalloc/src/jemalloc_chunk_swap.c
@@ -31,13 +31,13 @@ static extent_tree_t swap_chunks_ad;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-static void *chunk_recycle_swap(size_t size, bool zero);
+static void *chunk_recycle_swap(size_t size, bool *zero);
static extent_node_t *chunk_dealloc_swap_record(void *chunk, size_t size);
/******************************************************************************/
static void *
-chunk_recycle_swap(size_t size, bool zero)
+chunk_recycle_swap(size_t size, bool *zero)
{
extent_node_t *node, key;
@@ -69,7 +69,7 @@ chunk_recycle_swap(size_t size, bool zero)
#endif
malloc_mutex_unlock(&swap_mtx);
- if (zero)
+ if (*zero)
memset(ret, 0, size);
return (ret);
}
@@ -79,7 +79,7 @@ chunk_recycle_swap(size_t size, bool zero)
}
void *
-chunk_alloc_swap(size_t size, bool zero)
+chunk_alloc_swap(size_t size, bool *zero)
{
void *ret;
@@ -98,7 +98,9 @@ chunk_alloc_swap(size_t size, bool zero)
#endif
malloc_mutex_unlock(&swap_mtx);
- if (zero && swap_prezeroed == false)
+ if (swap_prezeroed)
+ *zero = true;
+ else if (*zero)
memset(ret, 0, size);
} else {
malloc_mutex_unlock(&swap_mtx);
diff --git a/jemalloc/src/jemalloc_huge.c b/jemalloc/src/jemalloc_huge.c
index 7855179..b914b45 100644
--- a/jemalloc/src/jemalloc_huge.c
+++ b/jemalloc/src/jemalloc_huge.c
@@ -37,7 +37,7 @@ huge_malloc(size_t size, bool zero)
if (node == NULL)
return (NULL);
- ret = chunk_alloc(csize, zero);
+ ret = chunk_alloc(csize, &zero);
if (ret == NULL) {
base_node_dealloc(node);
return (NULL);
@@ -74,6 +74,7 @@ huge_palloc(size_t alignment, size_t size)
void *ret;
size_t alloc_size, chunk_size, offset;
extent_node_t *node;
+ bool zero;
/*
* This allocation requires alignment that is even larger than chunk
@@ -97,7 +98,8 @@ huge_palloc(size_t alignment, size_t size)
if (node == NULL)
return (NULL);
- ret = chunk_alloc(alloc_size, false);
+ zero = false;
+ ret = chunk_alloc(alloc_size, &zero);
if (ret == NULL) {
base_node_dealloc(node);
return (NULL);