summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2015-02-18 06:25:56 (GMT)
committerJason Evans <jasone@canonware.com>2015-02-18 06:25:56 (GMT)
commit339c2b23b2d61993ac768afcc72af135662c6771 (patch)
tree7c4592a08594b2f927fde30b478b9c84e253a55d /src
parent47701b22ee7c0df5e99efa0fcdcf98b9ff805b59 (diff)
downloadjemalloc-339c2b23b2d61993ac768afcc72af135662c6771.zip
jemalloc-339c2b23b2d61993ac768afcc72af135662c6771.tar.gz
jemalloc-339c2b23b2d61993ac768afcc72af135662c6771.tar.bz2
Fix chunk_unmap() to propagate dirty state.
Fix chunk_unmap() to propagate whether a chunk is dirty, and modify dirty chunk purging to record this information so it can be passed to chunk_unmap(). Since the broken version of chunk_unmap() claimed that all chunks were clean, this resulted in potential memory corruption for purging implementations that do not zero (e.g. MADV_FREE). This regression was introduced by ee41ad409a43d12900a5a3108f6c14f84e4eb0eb (Integrate whole chunks into unused dirty page purging machinery.).
Diffstat (limited to 'src')
-rw-r--r--src/arena.c14
-rw-r--r--src/chunk.c6
2 files changed, 13 insertions, 7 deletions
diff --git a/src/arena.c b/src/arena.c
index 205f598..3d38386 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -1035,6 +1035,7 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
chunk = arena->chunk_alloc(addr, size, chunksize, &zero,
arena->ind);
assert(chunk == addr);
+ assert(zero == zeroed);
/*
* Create a temporary node to link into the ring of
* stashed allocations.
@@ -1075,7 +1076,7 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
/* Temporarily allocate the free dirty run. */
arena_run_split_large(arena, run, run_size, false);
- /* Append to purge_runs for later processing. */
+ /* Stash. */
if (false)
qr_new(runselm, rd_link); /* Redundant. */
else {
@@ -1114,9 +1115,12 @@ arena_purge_stashed(arena_t *arena, arena_chunk_map_misc_t *purge_runs_sentinel,
if (runselm == &chunkselm->runs_dirty) {
size_t size = extent_node_size_get(chunkselm);
+ bool unzeroed;
- pages_purge(extent_node_addr_get(chunkselm), size);
npages = size >> LG_PAGE;
+ unzeroed = pages_purge(extent_node_addr_get(chunkselm),
+ size);
+ extent_node_zeroed_set(chunkselm, !unzeroed);
chunkselm = qr_next(chunkselm, cd_link);
} else {
arena_chunk_t *chunk;
@@ -1180,11 +1184,13 @@ arena_unstash_purged(arena_t *arena,
if (runselm == &chunkselm->runs_dirty) {
extent_node_t *chunkselm_next = qr_next(chunkselm,
cd_link);
+ bool dirty = !extent_node_zeroed_get(chunkselm);
+ void *addr = extent_node_addr_get(chunkselm);
+ size_t size = extent_node_size_get(chunkselm);
arena_chunk_dirty_remove(chunkselm);
- chunk_unmap(arena, extent_node_addr_get(chunkselm),
- extent_node_size_get(chunkselm));
arena_node_dalloc(arena, chunkselm);
chunkselm = chunkselm_next;
+ chunk_unmap(arena, dirty, addr, size);
} else {
arena_run_t *run = &runselm->run;
qr_remove(runselm, rd_link);
diff --git a/src/chunk.c b/src/chunk.c
index 59d72c9..774a978 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -377,7 +377,7 @@ chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
}
void
-chunk_unmap(arena_t *arena, void *chunk, size_t size)
+chunk_unmap(arena_t *arena, bool dirty, void *chunk, size_t size)
{
assert(chunk != NULL);
@@ -387,10 +387,10 @@ chunk_unmap(arena_t *arena, void *chunk, size_t size)
if (have_dss && chunk_in_dss(chunk)) {
chunk_record(arena, &arena->chunks_szad_dss,
- &arena->chunks_ad_dss, false, chunk, size);
+ &arena->chunks_ad_dss, dirty, chunk, size);
} else if (chunk_dalloc_mmap(chunk, size)) {
chunk_record(arena, &arena->chunks_szad_mmap,
- &arena->chunks_ad_mmap, false, chunk, size);
+ &arena->chunks_ad_mmap, dirty, chunk, size);
}
}