summaryrefslogtreecommitdiffstats
path: root/jemalloc
diff options
context:
space:
mode:
authorJason Evans <je@facebook.com>2010-01-28 02:27:09 (GMT)
committerJason Evans <je@facebook.com>2010-01-28 02:27:09 (GMT)
commit4fb7f513376c0bb73fa1e4e1e89966af9cb2b9ec (patch)
treecd81b1101ef5552d12a40b8c56cc9671d368c687 /jemalloc
parent95833311f10ec7c23d9c550213909aa2e35bd1b6 (diff)
downloadjemalloc-4fb7f513376c0bb73fa1e4e1e89966af9cb2b9ec.zip
jemalloc-4fb7f513376c0bb73fa1e4e1e89966af9cb2b9ec.tar.gz
jemalloc-4fb7f513376c0bb73fa1e4e1e89966af9cb2b9ec.tar.bz2
Fix a chunk leak in chunk_alloc_mmap().
A missing 'else' in chunk_alloc_mmap() caused an extra chunk to be allocated every time the optimistic alignment path was entered, since the following block would always be executed immediately afterward. This chunk leak caused no increase in physical memory usage, but virtual memory could grow until resource exaustion caused allocation failures.
Diffstat (limited to 'jemalloc')
-rw-r--r--jemalloc/src/jemalloc_arena.c13
-rw-r--r--jemalloc/src/jemalloc_chunk_mmap.c2
2 files changed, 13 insertions, 2 deletions
diff --git a/jemalloc/src/jemalloc_arena.c b/jemalloc/src/jemalloc_arena.c
index fa84f66..e1e1b8f 100644
--- a/jemalloc/src/jemalloc_arena.c
+++ b/jemalloc/src/jemalloc_arena.c
@@ -848,11 +848,22 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
/* Insert into runs_avail, now that coalescing is complete. */
arena_avail_tree_insert(&arena->runs_avail, &chunk->map[run_ind]);
- /* Deallocate chunk if it is now completely unused. */
+ /*
+ * Deallocate chunk if it is now completely unused. The bit
+ * manipulation checks whether the first run is unallocated and extends
+ * to the end of the chunk.
+ */
if ((chunk->map[arena_chunk_header_npages].bits & (~PAGE_MASK |
CHUNK_MAP_ALLOCATED)) == arena_maxclass)
arena_chunk_dealloc(arena, chunk);
+ /*
+ * It is okay to do dirty page processing even if the chunk was
+ * deallocated above, since in that case it is the spare. Waiting
+ * until after possible chunk deallocation to do dirty processing
+ * allows for an old spare to be fully deallocated, thus decreasing the
+ * chances of spuriously crossing the dirty page purging threshold.
+ */
if (dirty) {
if (chunk->dirtied == false) {
arena_chunk_tree_dirty_insert(&arena->chunks_dirty,
diff --git a/jemalloc/src/jemalloc_chunk_mmap.c b/jemalloc/src/jemalloc_chunk_mmap.c
index 8e2c804..d5702f2 100644
--- a/jemalloc/src/jemalloc_chunk_mmap.c
+++ b/jemalloc/src/jemalloc_chunk_mmap.c
@@ -184,7 +184,7 @@ chunk_alloc_mmap(size_t size)
offset));
}
}
- }
+ } else
ret = chunk_alloc_mmap_slow(size, false);
return (ret);