summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2016-04-05 23:25:44 (GMT)
committerJason Evans <jasone@canonware.com>2016-05-13 16:56:18 (GMT)
commit9a8add1510456464bc496320990ec234798bd381 (patch)
treef5c9c5d990982e55a84c192687c8ce50234bad1e /src
parenta397045323d743a787c7efff17c0619dcf25f0b4 (diff)
downloadjemalloc-9a8add1510456464bc496320990ec234798bd381.zip
jemalloc-9a8add1510456464bc496320990ec234798bd381.tar.gz
jemalloc-9a8add1510456464bc496320990ec234798bd381.tar.bz2
Remove Valgrind support.
Diffstat (limited to 'src')
-rw-r--r--src/arena.c41
-rw-r--r--src/base.c3
-rw-r--r--src/chunk.c10
-rw-r--r--src/chunk_dss.c5
-rw-r--r--src/ctl.c6
-rw-r--r--src/jemalloc.c95
-rw-r--r--src/quarantine.c7
-rw-r--r--src/stats.c1
-rw-r--r--src/valgrind.c34
9 files changed, 15 insertions, 187 deletions
diff --git a/src/arena.c b/src/arena.c
index c605bcd..4e6d3d6 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -350,27 +350,16 @@ JEMALLOC_INLINE_C void
arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
{
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
- (run_ind << LG_PAGE)), (npages << LG_PAGE));
memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
(npages << LG_PAGE));
}
JEMALLOC_INLINE_C void
-arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
-{
-
- JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
- << LG_PAGE)), PAGE);
-}
-
-JEMALLOC_INLINE_C void
arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
{
size_t i;
UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
- arena_run_page_mark_zeroed(chunk, run_ind);
for (i = 0; i < PAGE / sizeof(size_t); i++)
assert(p[i] == 0);
}
@@ -471,12 +460,9 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
}
if (zero) {
- if (flag_decommitted != 0) {
- /* The run is untouched, and therefore zeroed. */
- JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
- *)((uintptr_t)chunk + (run_ind << LG_PAGE)),
- (need_pages << LG_PAGE));
- } else if (flag_dirty != 0) {
+ if (flag_decommitted != 0)
+ ; /* The run is untouched, and therefore zeroed. */
+ else if (flag_dirty != 0) {
/* The run is dirty, so all pages must be zeroed. */
arena_run_zero(chunk, run_ind, need_pages);
} else {
@@ -492,15 +478,9 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
else if (config_debug) {
arena_run_page_validate_zeroed(chunk,
run_ind+i);
- } else {
- arena_run_page_mark_zeroed(chunk,
- run_ind+i);
}
}
}
- } else {
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
- (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
}
/*
@@ -564,8 +544,6 @@ arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
arena_run_page_validate_zeroed(chunk, run_ind+i);
}
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
- (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
return (false);
}
@@ -700,19 +678,9 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
* the chunk is not zeroed.
*/
if (!zero) {
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
- (void *)arena_bitselm_get_const(chunk, map_bias+1),
- (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
- chunk_npages-1) -
- (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
for (i = map_bias+1; i < chunk_npages-1; i++)
arena_mapbits_internal_set(chunk, i, flag_unzeroed);
} else {
- JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
- *)arena_bitselm_get_const(chunk, map_bias+1),
- (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
- chunk_npages-1) -
- (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(chunk, i) ==
@@ -2571,13 +2539,11 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
} else if (unlikely(opt_zero))
memset(ret, 0, usize);
}
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
} else {
if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
memset(ret, 0, usize);
}
@@ -3311,7 +3277,6 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
*/
copysize = (usize < oldsize) ? usize : oldsize;
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
memcpy(ret, ptr, copysize);
isqalloc(tsd, ptr, oldsize, tcache, true);
} else {
diff --git a/src/base.c b/src/base.c
index 81b0801..1b0bf69 100644
--- a/src/base.c
+++ b/src/base.c
@@ -24,7 +24,6 @@ base_node_try_alloc(tsdn_t *tsdn)
return (NULL);
node = base_nodes;
base_nodes = *(extent_node_t **)node;
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
return (node);
}
@@ -34,7 +33,6 @@ base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
malloc_mutex_assert_owner(tsdn, &base_mtx);
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
*(extent_node_t **)node = base_nodes;
base_nodes = node;
}
@@ -123,7 +121,6 @@ base_alloc(tsdn_t *tsdn, size_t size)
base_resident += PAGE_CEILING((uintptr_t)ret + csize) -
PAGE_CEILING((uintptr_t)ret);
}
- JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
label_return:
malloc_mutex_unlock(tsdn, &base_mtx);
return (ret);
diff --git a/src/chunk.c b/src/chunk.c
index adc666f..7af7bb9 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -316,7 +316,6 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
size_t i;
size_t *p = (size_t *)(uintptr_t)ret;
- JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
for (i = 0; i < size / sizeof(size_t); i++)
assert(p[i] == 0);
}
@@ -376,8 +375,6 @@ chunk_alloc_base(size_t size)
ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit);
if (ret == NULL)
return (NULL);
- if (config_valgrind)
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
return (ret);
}
@@ -401,8 +398,6 @@ chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (ret == NULL)
return (NULL);
assert(commit);
- if (config_valgrind)
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
return (ret);
}
@@ -434,8 +429,6 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
commit, arena->dss_prec);
if (ret == NULL)
return (NULL);
- if (config_valgrind)
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
return (ret);
}
@@ -478,8 +471,6 @@ chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
return (NULL);
}
- if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
return (ret);
}
@@ -494,7 +485,6 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert(!cache || !zeroed);
unzeroed = cache || !zeroed;
- JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
diff --git a/src/chunk_dss.c b/src/chunk_dss.c
index 0b1f82b..d42aeb0 100644
--- a/src/chunk_dss.c
+++ b/src/chunk_dss.c
@@ -138,11 +138,8 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
&chunk_hooks, cpad, cpad_size,
false, true);
}
- if (*zero) {
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
- ret, size);
+ if (*zero)
memset(ret, 0, size);
- }
if (!*commit)
*commit = pages_decommit(ret, size);
return (ret);
diff --git a/src/ctl.c b/src/ctl.c
index dad8008..d2e9426 100644
--- a/src/ctl.c
+++ b/src/ctl.c
@@ -86,7 +86,6 @@ CTL_PROTO(config_stats)
CTL_PROTO(config_tcache)
CTL_PROTO(config_tls)
CTL_PROTO(config_utrace)
-CTL_PROTO(config_valgrind)
CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort)
CTL_PROTO(opt_dss)
@@ -260,7 +259,6 @@ static const ctl_named_node_t config_node[] = {
{NAME("tcache"), CTL(config_tcache)},
{NAME("tls"), CTL(config_tls)},
{NAME("utrace"), CTL(config_utrace)},
- {NAME("valgrind"), CTL(config_valgrind)},
{NAME("xmalloc"), CTL(config_xmalloc)}
};
@@ -1270,7 +1268,6 @@ CTL_RO_CONFIG_GEN(config_stats, bool)
CTL_RO_CONFIG_GEN(config_tcache, bool)
CTL_RO_CONFIG_GEN(config_tls, bool)
CTL_RO_CONFIG_GEN(config_utrace, bool)
-CTL_RO_CONFIG_GEN(config_valgrind, bool)
CTL_RO_CONFIG_GEN(config_xmalloc, bool)
/******************************************************************************/
@@ -1622,8 +1619,7 @@ arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
READONLY();
WRITEONLY();
- if ((config_valgrind && unlikely(in_valgrind)) || (config_fill &&
- unlikely(opt_quarantine))) {
+ if (config_fill && unlikely(opt_quarantine)) {
ret = EFAULT;
goto label_return;
}
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 941c1c8..cfe6ed3 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -42,9 +42,6 @@ bool opt_xmalloc = false;
bool opt_zero = false;
unsigned opt_narenas = 0;
-/* Initialized to true if the process is running inside Valgrind. */
-bool in_valgrind;
-
unsigned ncpus;
/* Protects arenas initialization. */
@@ -80,8 +77,7 @@ enum {
flag_opt_quarantine = (1U << 2),
flag_opt_zero = (1U << 3),
flag_opt_utrace = (1U << 4),
- flag_in_valgrind = (1U << 5),
- flag_opt_xmalloc = (1U << 6)
+ flag_opt_xmalloc = (1U << 5)
};
static uint8_t malloc_slow_flags;
@@ -894,9 +890,6 @@ malloc_slow_flag_init(void)
| (opt_utrace ? flag_opt_utrace : 0)
| (opt_xmalloc ? flag_opt_xmalloc : 0);
- if (config_valgrind)
- malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0);
-
malloc_slow = (malloc_slow_flags != 0);
}
@@ -908,24 +901,6 @@ malloc_conf_init(void)
const char *opts, *k, *v;
size_t klen, vlen;
- /*
- * Automatically configure valgrind before processing options. The
- * valgrind option remains in jemalloc 3.x for compatibility reasons.
- */
- if (config_valgrind) {
- in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
- if (config_fill && unlikely(in_valgrind)) {
- opt_junk = "false";
- opt_junk_alloc = false;
- opt_junk_free = false;
- assert(!opt_zero);
- opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
- opt_redzone = true;
- }
- if (config_tcache && unlikely(in_valgrind))
- opt_tcache = false;
- }
-
for (i = 0; i < 4; i++) {
/* Get runtime configuration. */
switch (i) {
@@ -1183,19 +1158,7 @@ malloc_conf_init(void)
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
}
if (config_tcache) {
- CONF_HANDLE_BOOL(opt_tcache, "tcache",
- !config_valgrind || !in_valgrind)
- if (CONF_MATCH("tcache")) {
- assert(config_valgrind && in_valgrind);
- if (opt_tcache) {
- opt_tcache = false;
- malloc_conf_error(
- "tcache cannot be enabled "
- "while running inside Valgrind",
- k, klen, v, vlen);
- }
- continue;
- }
+ CONF_HANDLE_BOOL(opt_tcache, "tcache", true)
CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
"lg_tcache_max", -1,
(sizeof(size_t) << 3) - 1)
@@ -1508,8 +1471,7 @@ ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize,
if (unlikely(ind >= NSIZES))
return (NULL);
- if (config_stats || (config_prof && opt_prof) || (slow_path &&
- config_valgrind && unlikely(in_valgrind))) {
+ if (config_stats || (config_prof && opt_prof)) {
*usize = index2size(ind);
assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
}
@@ -1562,7 +1524,6 @@ je_malloc(size_t size)
ret = ialloc_body(size, false, &tsdn, &usize, true);
ialloc_post_check(ret, tsdn, usize, "malloc", true, true);
UTRACE(0, size, ret);
- JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false);
}
return (ret);
@@ -1664,8 +1625,6 @@ label_return:
*tsd_thread_allocatedp_get(tsd) += usize;
}
UTRACE(0, size, result);
- JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize,
- false);
witness_assert_lockless(tsd_tsdn(tsd));
return (ret);
label_oom:
@@ -1684,11 +1643,8 @@ JEMALLOC_EXPORT int JEMALLOC_NOTHROW
JEMALLOC_ATTR(nonnull(1))
je_posix_memalign(void **memptr, size_t alignment, size_t size)
{
- int ret;
-
- ret = imemalign(memptr, alignment, size, sizeof(void *));
- return (ret);
+ return (imemalign(memptr, alignment, size, sizeof(void *)));
}
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
@@ -1703,7 +1659,6 @@ je_aligned_alloc(size_t alignment, size_t size)
ret = NULL;
set_errno(err);
}
-
return (ret);
}
@@ -1739,7 +1694,6 @@ je_calloc(size_t num, size_t size)
ret = ialloc_body(num_size, true, &tsdn, &usize, true);
ialloc_post_check(ret, tsdn, usize, "calloc", true, true);
UTRACE(0, num_size, ret);
- JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false);
}
return (ret);
@@ -1792,7 +1746,6 @@ JEMALLOC_INLINE_C void
ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
{
size_t usize;
- UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
witness_assert_lockless(tsd_tsdn(tsd));
@@ -1802,25 +1755,20 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
if (config_prof && opt_prof) {
usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
prof_free(tsd, ptr, usize);
- } else if (config_stats || config_valgrind)
+ } else if (config_stats)
usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
if (config_stats)
*tsd_thread_deallocatedp_get(tsd) += usize;
if (likely(!slow_path))
iqalloc(tsd, ptr, tcache, false);
- else {
- if (config_valgrind && unlikely(in_valgrind))
- rzsize = p2rz(tsd_tsdn(tsd), ptr);
+ else
iqalloc(tsd, ptr, tcache, true);
- JEMALLOC_VALGRIND_FREE(ptr, rzsize);
- }
}
JEMALLOC_INLINE_C void
isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path)
{
- UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
witness_assert_lockless(tsd_tsdn(tsd));
@@ -1831,10 +1779,7 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path)
prof_free(tsd, ptr, usize);
if (config_stats)
*tsd_thread_deallocatedp_get(tsd) += usize;
- if (config_valgrind && unlikely(in_valgrind))
- rzsize = p2rz(tsd_tsdn(tsd), ptr);
isqalloc(tsd, ptr, usize, tcache, slow_path);
- JEMALLOC_VALGRIND_FREE(ptr, rzsize);
}
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
@@ -1846,7 +1791,6 @@ je_realloc(void *ptr, size_t size)
tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
size_t old_usize = 0;
- UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
if (unlikely(size == 0)) {
if (ptr != NULL) {
@@ -1871,18 +1815,13 @@ je_realloc(void *ptr, size_t size)
witness_assert_lockless(tsd_tsdn(tsd));
old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
- if (config_valgrind && unlikely(in_valgrind)) {
- old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) :
- u2rz(old_usize);
- }
if (config_prof && opt_prof) {
usize = s2u(size);
ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
NULL : irealloc_prof(tsd, ptr, old_usize, usize);
} else {
- if (config_stats || (config_valgrind &&
- unlikely(in_valgrind)))
+ if (config_stats)
usize = s2u(size);
ret = iralloc(tsd, ptr, old_usize, size, 0, false);
}
@@ -1913,8 +1852,6 @@ je_realloc(void *ptr, size_t size)
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, ret);
- JEMALLOC_VALGRIND_REALLOC(true, tsdn, ret, usize, true, ptr, old_usize,
- old_rzsize, true, false);
witness_assert_lockless(tsdn);
return (ret);
}
@@ -2143,8 +2080,7 @@ imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize,
szind_t ind = size2index(size);
if (unlikely(ind >= NSIZES))
return (NULL);
- if (config_stats || (config_prof && opt_prof) || (slow_path &&
- config_valgrind && unlikely(in_valgrind))) {
+ if (config_stats || (config_prof && opt_prof)) {
*usize = index2size(ind);
assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
}
@@ -2181,8 +2117,6 @@ je_mallocx(size_t size, int flags)
p = imallocx_body(size, flags, &tsdn, &usize, true);
ialloc_post_check(p, tsdn, usize, "mallocx", false, true);
UTRACE(0, size, p);
- JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize,
- MALLOCX_ZERO_GET(flags));
}
return (p);
@@ -2261,7 +2195,6 @@ je_rallocx(void *ptr, size_t size, int flags)
tsd_t *tsd;
size_t usize;
size_t old_usize;
- UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
size_t alignment = MALLOCX_ALIGN_GET(flags);
bool zero = flags & MALLOCX_ZERO;
arena_t *arena;
@@ -2291,8 +2224,6 @@ je_rallocx(void *ptr, size_t size, int flags)
tcache = tcache_get(tsd, true);
old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
- if (config_valgrind && unlikely(in_valgrind))
- old_rzsize = u2rz(old_usize);
if (config_prof && opt_prof) {
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
@@ -2307,7 +2238,7 @@ je_rallocx(void *ptr, size_t size, int flags)
tcache, arena);
if (unlikely(p == NULL))
goto label_oom;
- if (config_stats || (config_valgrind && unlikely(in_valgrind)))
+ if (config_stats)
usize = isalloc(tsd_tsdn(tsd), p, config_prof);
}
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
@@ -2317,8 +2248,6 @@ je_rallocx(void *ptr, size_t size, int flags)
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, p);
- JEMALLOC_VALGRIND_REALLOC(true, tsd_tsdn(tsd), p, usize, false, ptr,
- old_usize, old_rzsize, false, zero);
witness_assert_lockless(tsd_tsdn(tsd));
return (p);
label_oom:
@@ -2413,7 +2342,6 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
{
tsd_t *tsd;
size_t usize, old_usize;
- UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
size_t alignment = MALLOCX_ALIGN_GET(flags);
bool zero = flags & MALLOCX_ZERO;
@@ -2443,9 +2371,6 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
if (unlikely(HUGE_MAXCLASS - size < extra))
extra = HUGE_MAXCLASS - size;
- if (config_valgrind && unlikely(in_valgrind))
- old_rzsize = u2rz(old_usize);
-
if (config_prof && opt_prof) {
usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
alignment, zero);
@@ -2460,8 +2385,6 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
*tsd_thread_allocatedp_get(tsd) += usize;
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
- JEMALLOC_VALGRIND_REALLOC(false, tsd_tsdn(tsd), ptr, usize, false, ptr,
- old_usize, old_rzsize, false, zero);
label_not_resized:
UTRACE(ptr, size, ptr);
witness_assert_lockless(tsd_tsdn(tsd));
diff --git a/src/quarantine.c b/src/quarantine.c
index 18903fb..9658ffa 100644
--- a/src/quarantine.c
+++ b/src/quarantine.c
@@ -150,12 +150,7 @@ quarantine(tsd_t *tsd, void *ptr)
quarantine->curbytes += usize;
quarantine->curobjs++;
if (config_fill && unlikely(opt_junk_free)) {
- /*
- * Only do redzone validation if Valgrind isn't in
- * operation.
- */
- if ((!config_valgrind || likely(!in_valgrind))
- && usize <= SMALL_MAXCLASS)
+ if (usize <= SMALL_MAXCLASS)
arena_quarantine_junk_small(ptr, usize);
else
memset(ptr, JEMALLOC_FREE_JUNK, usize);
diff --git a/src/stats.c b/src/stats.c
index 073be4f..97f901f 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -517,7 +517,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
OPT_WRITE_BOOL(redzone)
OPT_WRITE_BOOL(zero)
OPT_WRITE_BOOL(utrace)
- OPT_WRITE_BOOL(valgrind)
OPT_WRITE_BOOL(xmalloc)
OPT_WRITE_BOOL(tcache)
OPT_WRITE_SSIZE_T(lg_tcache_max)
diff --git a/src/valgrind.c b/src/valgrind.c
deleted file mode 100644
index 8e7ef3a..0000000
--- a/src/valgrind.c
+++ /dev/null
@@ -1,34 +0,0 @@
-#include "jemalloc/internal/jemalloc_internal.h"
-#ifndef JEMALLOC_VALGRIND
-# error "This source file is for Valgrind integration."
-#endif
-
-#include <valgrind/memcheck.h>
-
-void
-valgrind_make_mem_noaccess(void *ptr, size_t usize)
-{
-
- VALGRIND_MAKE_MEM_NOACCESS(ptr, usize);
-}
-
-void
-valgrind_make_mem_undefined(void *ptr, size_t usize)
-{
-
- VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize);
-}
-
-void
-valgrind_make_mem_defined(void *ptr, size_t usize)
-{
-
- VALGRIND_MAKE_MEM_DEFINED(ptr, usize);
-}
-
-void
-valgrind_freelike_block(void *ptr, size_t usize)
-{
-
- VALGRIND_FREELIKE_BLOCK(ptr, usize);
-}