summaryrefslogtreecommitdiffstats
path: root/jemalloc/src
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2010-10-18 02:56:09 (GMT)
committerJason Evans <jasone@canonware.com>2010-10-18 02:56:09 (GMT)
commit12ca91402bc5d5c5a1cca495957463bb8e71fdcf (patch)
treeb10b4c0e4ee0c0857ae8f66cc767aff030c2c7b4 /jemalloc/src
parent940a2e02b27b264cc92e8ecbf186a711ce05ad04 (diff)
downloadjemalloc-12ca91402bc5d5c5a1cca495957463bb8e71fdcf.zip
jemalloc-12ca91402bc5d5c5a1cca495957463bb8e71fdcf.tar.gz
jemalloc-12ca91402bc5d5c5a1cca495957463bb8e71fdcf.tar.bz2
Add assertions to run coalescing.
Assert that the chunk map bits at the ends of the runs that participate in coalescing are self-consistent.
Diffstat (limited to 'jemalloc/src')
-rw-r--r--jemalloc/src/arena.c24
1 files changed, 17 insertions, 7 deletions
diff --git a/jemalloc/src/arena.c b/jemalloc/src/arena.c
index 36957d9..6a84737 100644
--- a/jemalloc/src/arena.c
+++ b/jemalloc/src/arena.c
@@ -934,19 +934,24 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
CHUNK_MAP_DIRTY) == flag_dirty) {
size_t nrun_size = chunk->map[run_ind+run_pages-map_bias].bits &
~PAGE_MASK;
+ size_t nrun_pages = nrun_size >> PAGE_SHIFT;
/*
* Remove successor from runs_avail; the coalesced run is
* inserted later.
*/
+ assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
+ & ~PAGE_MASK) == nrun_size);
+ assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
+ & CHUNK_MAP_ALLOCATED) == 0);
+ assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
+ & CHUNK_MAP_DIRTY) == flag_dirty);
arena_avail_tree_remove(runs_avail,
&chunk->map[run_ind+run_pages-map_bias]);
size += nrun_size;
- run_pages = size >> PAGE_SHIFT;
+ run_pages += nrun_pages;
- assert((chunk->map[run_ind+run_pages-1-map_bias].bits &
- ~PAGE_MASK) == nrun_size);
chunk->map[run_ind-map_bias].bits = size |
(chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK);
chunk->map[run_ind+run_pages-1-map_bias].bits = size |
@@ -960,21 +965,26 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
CHUNK_MAP_DIRTY) == flag_dirty) {
size_t prun_size = chunk->map[run_ind-1-map_bias].bits &
~PAGE_MASK;
+ size_t prun_pages = prun_size >> PAGE_SHIFT;
- run_ind -= prun_size >> PAGE_SHIFT;
+ run_ind -= prun_pages;
/*
* Remove predecessor from runs_avail; the coalesced run is
* inserted later.
*/
+ assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK)
+ == prun_size);
+ assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_ALLOCATED)
+ == 0);
+ assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY)
+ == flag_dirty);
arena_avail_tree_remove(runs_avail,
&chunk->map[run_ind-map_bias]);
size += prun_size;
- run_pages = size >> PAGE_SHIFT;
+ run_pages += prun_pages;
- assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) ==
- prun_size);
chunk->map[run_ind-map_bias].bits = size |
(chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK);
chunk->map[run_ind+run_pages-1-map_bias].bits = size |