summaryrefslogtreecommitdiffstats
path: root/src/arena.c
diff options
context:
space:
mode:
authorJason Evans <je@fb.com>2012-10-09 00:56:11 (GMT)
committerJason Evans <je@fb.com>2012-10-09 01:04:49 (GMT)
commit7de92767c20cb72c94609b9c78985526fb84a679 (patch)
tree6e6aab107b7d77d575b130b7074824da5f2df234 /src/arena.c
parentf4c3f8545beed9f7e606cef7b1d06fae3f630269 (diff)
downloadjemalloc-7de92767c20cb72c94609b9c78985526fb84a679.zip
jemalloc-7de92767c20cb72c94609b9c78985526fb84a679.tar.gz
jemalloc-7de92767c20cb72c94609b9c78985526fb84a679.tar.bz2
Fix mlockall()/madvise() interaction.
mlockall(2) can cause purging via madvise(2) to fail. Fix purging code to check whether madvise() succeeded, and base zeroed page metadata on the result. Reported by Olivier Lecomte.
Diffstat (limited to 'src/arena.c')
-rw-r--r--src/arena.c50
1 files changed, 20 insertions, 30 deletions
diff --git a/src/arena.c b/src/arena.c
index bf1614b..674ffe9 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -551,24 +551,12 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
{
ql_head(arena_chunk_map_t) mapelms;
arena_chunk_map_t *mapelm;
- size_t pageind, flag_unzeroed;
+ size_t pageind;
size_t ndirty;
size_t nmadvise;
ql_new(&mapelms);
- flag_unzeroed =
-#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
- /*
- * madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous
- * mappings.
- */
- 0
-#else
- CHUNK_MAP_UNZEROED
-#endif
- ;
-
/*
* If chunk is the spare, temporarily re-allocate it, 1) so that its
* run is reinserted into runs_avail_dirty, and 2) so that it cannot be
@@ -603,26 +591,12 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
assert(arena_mapbits_dirty_get(chunk, pageind) ==
arena_mapbits_dirty_get(chunk, pageind+npages-1));
if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
- size_t i;
-
arena_avail_tree_remove(
&arena->runs_avail_dirty, mapelm);
- arena_mapbits_unzeroed_set(chunk, pageind,
- flag_unzeroed);
arena_mapbits_large_set(chunk, pageind,
(npages << LG_PAGE), 0);
- /*
- * Update internal elements in the page map, so
- * that CHUNK_MAP_UNZEROED is properly set.
- */
- for (i = 1; i < npages - 1; i++) {
- arena_mapbits_unzeroed_set(chunk,
- pageind+i, flag_unzeroed);
- }
if (npages > 1) {
- arena_mapbits_unzeroed_set(chunk,
- pageind+npages-1, flag_unzeroed);
arena_mapbits_large_set(chunk,
pageind+npages-1, 0, 0);
}
@@ -685,14 +659,30 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
sizeof(arena_chunk_map_t)) + map_bias;
size_t npages = arena_mapbits_large_size_get(chunk, pageind) >>
LG_PAGE;
+ bool unzeroed;
+ size_t flag_unzeroed, i;
assert(pageind + npages <= chunk_npages);
assert(ndirty >= npages);
if (config_debug)
ndirty -= npages;
-
- pages_purge((void *)((uintptr_t)chunk + (pageind << LG_PAGE)),
- (npages << LG_PAGE));
+ unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
+ LG_PAGE)), (npages << LG_PAGE));
+ flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
+ /*
+ * Set the unzeroed flag for all pages, now that pages_purge()
+ * has returned whether the pages were zeroed as a side effect
+ * of purging. This chunk map modification is safe even though
+ * the arena mutex isn't currently owned by this thread,
+ * because the run is marked as allocated, thus protecting it
+ * from being modified by any other thread. As long as these
+ * writes don't perturb the first and last elements'
+ * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
+ */
+ for (i = 0; i < npages; i++) {
+ arena_mapbits_unzeroed_set(chunk, pageind+i,
+ flag_unzeroed);
+ }
if (config_stats)
nmadvise++;
}