summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorJason Evans <je@fb.com>2013-02-01 01:02:53 (GMT)
committerJason Evans <je@fb.com>2013-02-01 01:02:53 (GMT)
commit06912756cccd0064a9c5c59992dbac1cec68ba3f (patch)
tree0c77d4162bdbee5672a9c1cbce76755851d6b09a /src
parenta7a28c334e5526ba716bf6046eab8d60598183eb (diff)
downloadjemalloc-06912756cccd0064a9c5c59992dbac1cec68ba3f.zip
jemalloc-06912756cccd0064a9c5c59992dbac1cec68ba3f.tar.gz
jemalloc-06912756cccd0064a9c5c59992dbac1cec68ba3f.tar.bz2
Fix Valgrind integration.
Fix Valgrind integration to annotate all internally allocated memory in a way that keeps Valgrind happy about internal data structure access.
Diffstat (limited to 'src')
-rw-r--r--src/arena.c15
-rw-r--r--src/base.c3
-rw-r--r--src/chunk.c48
-rw-r--r--src/chunk_dss.c1
4 files changed, 37 insertions, 30 deletions
diff --git a/src/arena.c b/src/arena.c
index 8d50f4d..d79e035 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -366,8 +366,6 @@ arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
LG_PAGE)), (npages << LG_PAGE));
memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
(npages << LG_PAGE));
- VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
- LG_PAGE)), (npages << LG_PAGE));
}
static inline void
@@ -380,8 +378,6 @@ arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
LG_PAGE)), PAGE);
for (i = 0; i < PAGE / sizeof(size_t); i++)
assert(p[i] == 0);
- VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
- LG_PAGE)), PAGE);
}
static void
@@ -513,6 +509,8 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
run_ind+need_pages-1);
}
}
+ VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
+ LG_PAGE)), (need_pages << LG_PAGE));
}
static arena_chunk_t *
@@ -574,6 +572,11 @@ arena_chunk_alloc(arena_t *arena)
for (i = map_bias+1; i < chunk_npages-1; i++)
arena_mapbits_unzeroed_set(chunk, i, unzeroed);
} else if (config_debug) {
+ VALGRIND_MAKE_MEM_DEFINED(
+ (void *)arena_mapp_get(chunk, map_bias+1),
+ (void *)((uintptr_t)
+ arena_mapp_get(chunk, chunk_npages-1)
+ - (uintptr_t)arena_mapp_get(chunk, map_bias+1)));
for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(chunk, i) ==
unzeroed);
@@ -1246,8 +1249,6 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
(uintptr_t)bin_info->bitmap_offset);
/* Initialize run internals. */
- VALGRIND_MAKE_MEM_UNDEFINED(run, bin_info->reg0_offset -
- bin_info->redzone_size);
run->bin = bin;
run->nextind = 0;
run->nfree = bin_info->nregs;
@@ -1464,8 +1465,8 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
}
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
return (ret);
}
diff --git a/src/base.c b/src/base.c
index b1a5945..4e62e8f 100644
--- a/src/base.c
+++ b/src/base.c
@@ -63,6 +63,7 @@ base_alloc(size_t size)
ret = base_next_addr;
base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
malloc_mutex_unlock(&base_mtx);
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
return (ret);
}
@@ -88,6 +89,7 @@ base_node_alloc(void)
ret = base_nodes;
base_nodes = *(extent_node_t **)ret;
malloc_mutex_unlock(&base_mtx);
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t));
} else {
malloc_mutex_unlock(&base_mtx);
ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
@@ -100,6 +102,7 @@ void
base_node_dealloc(extent_node_t *node)
{
+ VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
malloc_mutex_lock(&base_mtx);
*(extent_node_t **)node = base_nodes;
base_nodes = node;
diff --git a/src/chunk.c b/src/chunk.c
index 8cff240..044f76b 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -120,7 +120,6 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
if (node != NULL)
base_node_dealloc(node);
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
if (*zero) {
if (zeroed == false)
memset(ret, 0, size);
@@ -131,7 +130,6 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
VALGRIND_MAKE_MEM_DEFINED(ret, size);
for (i = 0; i < size / sizeof(size_t); i++)
assert(p[i] == 0);
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
}
}
return (ret);
@@ -180,27 +178,32 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
/* All strategies for allocation failed. */
ret = NULL;
label_return:
- if (config_ivsalloc && base == false && ret != NULL) {
- if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
- chunk_dealloc(ret, size, true);
- return (NULL);
+ if (ret != NULL) {
+ if (config_ivsalloc && base == false) {
+ if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
+ chunk_dealloc(ret, size, true);
+ return (NULL);
+ }
}
- }
- if ((config_stats || config_prof) && ret != NULL) {
- bool gdump;
- malloc_mutex_lock(&chunks_mtx);
- if (config_stats)
- stats_chunks.nchunks += (size / chunksize);
- stats_chunks.curchunks += (size / chunksize);
- if (stats_chunks.curchunks > stats_chunks.highchunks) {
- stats_chunks.highchunks = stats_chunks.curchunks;
- if (config_prof)
- gdump = true;
- } else if (config_prof)
- gdump = false;
- malloc_mutex_unlock(&chunks_mtx);
- if (config_prof && opt_prof && opt_prof_gdump && gdump)
- prof_gdump();
+ if (config_stats || config_prof) {
+ bool gdump;
+ malloc_mutex_lock(&chunks_mtx);
+ if (config_stats)
+ stats_chunks.nchunks += (size / chunksize);
+ stats_chunks.curchunks += (size / chunksize);
+ if (stats_chunks.curchunks > stats_chunks.highchunks) {
+ stats_chunks.highchunks =
+ stats_chunks.curchunks;
+ if (config_prof)
+ gdump = true;
+ } else if (config_prof)
+ gdump = false;
+ malloc_mutex_unlock(&chunks_mtx);
+ if (config_prof && opt_prof && opt_prof_gdump && gdump)
+ prof_gdump();
+ }
+ if (config_valgrind)
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
}
assert(CHUNK_ADDR2BASE(ret) == ret);
return (ret);
@@ -214,6 +217,7 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
extent_node_t *xnode, *node, *prev, key;
unzeroed = pages_purge(chunk, size);
+ VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
/*
* Allocate a node before acquiring chunks_mtx even though it might not
diff --git a/src/chunk_dss.c b/src/chunk_dss.c
index d1aea93..24781cc 100644
--- a/src/chunk_dss.c
+++ b/src/chunk_dss.c
@@ -127,7 +127,6 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
if (*zero) {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
}
return (ret);
}