summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason Evans <je@facebook.com>2010-03-22 18:45:01 (GMT)
committerJason Evans <je@facebook.com>2010-03-22 18:45:01 (GMT)
commitc03a63d68df9ddbb80f997d8be7bc04e5f3d53eb (patch)
treeb52b456ae742867e533e461d1a08183b9384953b
parent19b3d618924b3542a264612f906bc53bbcec8b70 (diff)
downloadjemalloc-c03a63d68df9ddbb80f997d8be7bc04e5f3d53eb.zip
jemalloc-c03a63d68df9ddbb80f997d8be7bc04e5f3d53eb.tar.gz
jemalloc-c03a63d68df9ddbb80f997d8be7bc04e5f3d53eb.tar.bz2
Set/clear CHUNK_MAP_ZEROED in arena_chunk_purge().
Properly set/clear CHUNK_MAP_ZEROED for all purged pages, according to whether the pages are (potentially) file-backed or anonymous. This was merely a performance pessimization for the anonymous mapping case, but was a calloc()-related bug for the swap_enabled case.
-rw-r--r--jemalloc/src/arena.c43
1 files changed, 32 insertions, 11 deletions
diff --git a/jemalloc/src/arena.c b/jemalloc/src/arena.c
index d8d1283..6427a5c 100644
--- a/jemalloc/src/arena.c
+++ b/jemalloc/src/arena.c
@@ -593,7 +593,7 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
{
ql_head(arena_chunk_map_t) mapelms;
arena_chunk_map_t *mapelm;
- size_t pageind;
+ size_t pageind, flag_zeroed;
#ifdef JEMALLOC_DEBUG
size_t ndirty;
#endif
@@ -603,6 +603,12 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
ql_new(&mapelms);
+ flag_zeroed =
+#ifdef JEMALLOC_SWAP
+ swap_enabled ? 0 :
+#endif
+ CHUNK_MAP_ZEROED;
+
/*
* If chunk is the spare, temporarily re-allocate it, 1) so that its
* run is reinserted into runs_avail_dirty, and 2) so that it cannot be
@@ -633,17 +639,32 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
npages = mapelm->bits >> PAGE_SHIFT;
assert(pageind + npages <= chunk_npages);
if (mapelm->bits & CHUNK_MAP_DIRTY) {
- /*
- * Dirty run; temporarily allocate it. Set the
- * last map element first, in case this is a
- * one-page run.
- */
+ size_t i;
+
arena_avail_tree_remove(
&arena->runs_avail_dirty, mapelm);
- chunk->map[pageind + npages - 1].bits =
- (CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED);
+
+ /*
+ * Update internal elements in the page map, so
+ * that CHUNK_MAP_ZEROED is properly set.
+ * madvise(..., MADV_DONTNEED) results in
+ * zero-filled pages for anonymous mappings,
+ * but not for file-backed mappings.
+ */
mapelm->bits = (npages << PAGE_SHIFT) |
- CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+ CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED |
+ flag_zeroed;
+ for (i = 1; i < npages - 1; i++) {
+ chunk->map[pageind + i].bits =
+ flag_zeroed;
+ }
+ if (npages > 1) {
+ chunk->map[pageind + npages - 1].bits =
+ (npages << PAGE_SHIFT) |
+ CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED |
+ flag_zeroed;
+ }
+
arena->nactive += npages;
/* Append to list for later processing. */
ql_elm_new(mapelm, u.ql_link);
@@ -653,9 +674,9 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
pageind += npages;
} else {
/* Skip allocated run. */
- if (mapelm->bits & CHUNK_MAP_LARGE) {
+ if (mapelm->bits & CHUNK_MAP_LARGE)
pageind += mapelm->bits >> PAGE_SHIFT;
- } else {
+ else {
arena_run_t *run = (arena_run_t *)((uintptr_t)
chunk + (uintptr_t)(pageind << PAGE_SHIFT));