summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INSTALL8
-rw-r--r--Makefile.in6
-rw-r--r--configure.ac38
-rw-r--r--doc/jemalloc.xml.in79
-rw-r--r--include/jemalloc/internal/arena.h81
-rw-r--r--include/jemalloc/internal/jemalloc_internal.h.in153
-rw-r--r--include/jemalloc/internal/private_namespace.h8
-rw-r--r--include/jemalloc/internal/prof.h6
-rw-r--r--include/jemalloc/internal/quarantine.h24
-rw-r--r--include/jemalloc/internal/tcache.h29
-rw-r--r--include/jemalloc/internal/util.h1
-rw-r--r--include/jemalloc/jemalloc_defs.h.in5
-rw-r--r--src/arena.c208
-rw-r--r--src/ctl.c15
-rw-r--r--src/huge.c2
-rw-r--r--src/jemalloc.c165
-rw-r--r--src/quarantine.c163
-rw-r--r--src/stats.c3
-rw-r--r--src/tcache.c6
-rw-r--r--src/zone.c10
20 files changed, 840 insertions, 170 deletions
diff --git a/INSTALL b/INSTALL
index 8a825df..a5942ec 100644
--- a/INSTALL
+++ b/INSTALL
@@ -113,8 +113,12 @@ any of the following arguments (not a definitive list) to 'configure':
mmap(2).
--disable-fill
- Disable support for junk/zero filling of memory. See the "opt.junk"/
- "opt.zero" option documentation for usage details.
+ Disable support for junk/zero filling of memory, quarantine, and redzones.
+ See the "opt.junk", "opt.zero", "opt.quarantine", and "opt.redzone" option
+ documentation for usage details.
+
+--disable-valgrind
+ Disable support for Valgrind.
--disable-experimental
Disable support for the experimental API (*allocm()).
diff --git a/Makefile.in b/Makefile.in
index 7df4fc6..8aa9425 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -49,9 +49,9 @@ CSRCS := @srcroot@src/jemalloc.c @srcroot@src/arena.c @srcroot@src/atomic.c \
@srcroot@src/chunk_dss.c @srcroot@src/chunk_mmap.c \
@srcroot@src/ckh.c @srcroot@src/ctl.c @srcroot@src/extent.c \
@srcroot@src/hash.c @srcroot@src/huge.c @srcroot@src/mb.c \
- @srcroot@src/mutex.c @srcroot@src/prof.c @srcroot@src/rtree.c \
- @srcroot@src/stats.c @srcroot@src/tcache.c @srcroot@src/util.c \
- @srcroot@src/tsd.c
+ @srcroot@src/mutex.c @srcroot@src/prof.c @srcroot@src/quarantine.c \
+ @srcroot@src/rtree.c @srcroot@src/stats.c @srcroot@src/tcache.c \
+ @srcroot@src/util.c @srcroot@src/tsd.c
ifeq (macho, @abi@)
CSRCS += @srcroot@src/zone.c
endif
diff --git a/configure.ac b/configure.ac
index 8e94b5c..a272ecd 100644
--- a/configure.ac
+++ b/configure.ac
@@ -685,7 +685,8 @@ AC_SUBST([enable_dss])
dnl Support the junk/zero filling option by default.
AC_ARG_ENABLE([fill],
- [AS_HELP_STRING([--disable-fill], [Disable support for junk/zero filling])],
+ [AS_HELP_STRING([--disable-fill],
+ [Disable support for junk/zero filling, quarantine, and redzones])],
[if test "x$enable_fill" = "xno" ; then
enable_fill="0"
else
@@ -727,6 +728,38 @@ if test "x$enable_utrace" = "x1" ; then
fi
AC_SUBST([enable_utrace])
+dnl Support Valgrind by default.
+AC_ARG_ENABLE([valgrind],
+ [AS_HELP_STRING([--disable-valgrind], [Disable support for Valgrind])],
+[if test "x$enable_valgrind" = "xno" ; then
+ enable_valgrind="0"
+else
+ enable_valgrind="1"
+fi
+],
+[enable_valgrind="1"]
+)
+if test "x$enable_valgrind" = "x1" ; then
+ JE_COMPILABLE([valgrind], [
+#include <valgrind/valgrind.h>
+#include <valgrind/memcheck.h>
+
+#if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__) \
+ && (__VALGRIND_MAJOR__ > 3 || (__VALGRIND_MAJOR__ == 3 && \
+ __VALGRIND_MINOR__ >= 6))
+#else
+# error "Incompatible Valgrind version"
+#endif
+], [], [je_cv_valgrind])
+ if test "x${je_cv_valgrind}" = "xno" ; then
+ enable_valgrind="0"
+ fi
+ if test "x$enable_valgrind" = "x1" ; then
+ AC_DEFINE([JEMALLOC_VALGRIND], [ ])
+ fi
+fi
+AC_SUBST([enable_valgrind])
+
dnl Do not support the xmalloc option by default.
AC_ARG_ENABLE([xmalloc],
[AS_HELP_STRING([--enable-xmalloc], [Support xmalloc option])],
@@ -1088,8 +1121,9 @@ AC_MSG_RESULT([prof-libgcc : ${enable_prof_libgcc}])
AC_MSG_RESULT([prof-gcc : ${enable_prof_gcc}])
AC_MSG_RESULT([tcache : ${enable_tcache}])
AC_MSG_RESULT([fill : ${enable_fill}])
-AC_MSG_RESULT([xmalloc : ${enable_xmalloc}])
AC_MSG_RESULT([utrace : ${enable_utrace}])
+AC_MSG_RESULT([valgrind : ${enable_valgrind}])
+AC_MSG_RESULT([xmalloc : ${enable_xmalloc}])
AC_MSG_RESULT([dss : ${enable_dss}])
AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}])
AC_MSG_RESULT([tls : ${enable_tls}])
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
index 8ae8262..a47c763 100644
--- a/doc/jemalloc.xml.in
+++ b/doc/jemalloc.xml.in
@@ -722,6 +722,16 @@ for (i = 0; i < nbins; i++) {
<varlistentry>
<term>
+ <mallctl>config.valgrind</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-valgrind</option> was specified during
+ build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>
<mallctl>config.xmalloc</mallctl>
(<type>bool</type>)
<literal>r-</literal>
@@ -819,6 +829,47 @@ for (i = 0; i < nbins; i++) {
configuration, in which case it is enabled by default.</para></listitem>
</varlistentry>
+ <varlistentry id="opt.quarantine">
+ <term>
+ <mallctl>opt.quarantine</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-fill</option>]
+ </term>
+ <listitem><para>Per thread quarantine size in bytes. If non-zero, each
+ thread maintains a FIFO object quarantine that stores up to the
+ specified number of bytes of memory. The quarantined memory is not
+ freed until it is released from quarantine, though it is immediately
+ junk-filled if the <link
+ linkend="opt.junk"><mallctl>opt.junk</mallctl></link> option is
+ enabled. This feature is of particular use in combination with <ulink
+ url="http://http://valgrind.org/">Valgrind</ulink>, which can detect
+ attempts to access quarantined objects. This is intended for debugging
+ and will impact performance negatively. The default quarantine size is
+ 0.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.redzone">
+ <term>
+ <mallctl>opt.redzone</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-fill</option>]
+ </term>
+ <listitem><para>Redzones enabled/disabled. If enabled, small
+ allocations have redzones before and after them. Furthermore, if the
+ <link linkend="opt.junk"><mallctl>opt.junk</mallctl></link> option is
+ enabled, the redzones are checked for corruption during deallocation.
+ However, the primary intended purpose of this feature is to be used in
+ combination with <ulink
+ url="http://http://valgrind.org/">Valgrind</ulink>, which needs
+ redzones in order to do effective buffer overflow/underflow detection.
+ This option is intended for debugging and will impact performance
+ negatively. This option is disabled by default unless
+ <option>--enable-debug</option> is specified during configuration, in
+ which case it is enabled by default.</para></listitem>
+ </varlistentry>
+
<varlistentry id="opt.zero">
<term>
<mallctl>opt.zero</mallctl>
@@ -849,6 +900,25 @@ for (i = 0; i < nbins; i++) {
is disabled by default.</para></listitem>
</varlistentry>
+ <varlistentry id="opt.valgrind">
+ <term>
+ <mallctl>opt.valgrind</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-valgrind</option>]
+ </term>
+ <listitem><para><ulink
+ url="http://http://valgrind.org/">Valgrind</ulink> support
+ enabled/disabled. If enabled, several other options are automatically
+ modified during options processing to work well with Valgrind: <link
+ linkend="opt.junk"><mallctl>opt.junk</mallctl></link> and <link
+ linkend="opt.zero"><mallctl>opt.zero</mallctl></link> are set to false,
+ <link linkend="opt.quarantine"><mallctl>opt.quarantine</mallctl></link>
+ is set to 16 MiB, and <link
+ linkend="opt.redzone"><mallctl>opt.redzone</mallctl></link> is set to
+ true. This option is disabled by default.</para></listitem>
+ </varlistentry>
+
<varlistentry id="opt.xmalloc">
<term>
<mallctl>opt.xmalloc</mallctl>
@@ -1764,10 +1834,11 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<para>This implementation does not provide much detail about the problems
it detects, because the performance impact for storing such information
- would be prohibitive. There are a number of allocator implementations
- available on the Internet which focus on detecting and pinpointing problems
- by trading performance for extra sanity checks and detailed
- diagnostics.</para>
+ would be prohibitive. However, jemalloc does integrate with the most
+ excellent <ulink url="http://http://valgrind.org/">Valgrind</ulink> tool if
+ the <option>--enable-valgrind</option> configuration option is enabled and
+ the <link linkend="opt.valgrind"><mallctl>opt.valgrind</mallctl></link>
+ option is enabled.</para>
</refsect1>
<refsect1 id="diagnostic_messages">
<title>DIAGNOSTIC MESSAGES</title>
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index d25a2b1..f52fac4 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -16,7 +16,7 @@
* constraint is relaxed (ignored) for runs that are so small that the
* per-region overhead is greater than:
*
- * (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP))
+ * (RUN_MAX_OVRHD / (reg_interval << (3+RUN_BFP))
*/
#define RUN_BFP 12
/* \/ Implicit binary fixed point. */
@@ -28,6 +28,12 @@
#define RUN_MAXREGS (1U << LG_RUN_MAXREGS)
/*
+ * Minimum redzone size. Redzones may be larger than this if necessary to
+ * preserve region alignment.
+ */
+#define REDZONE_MINSIZE 16
+
+/*
* The minimum ratio of active:dirty pages per arena is computed as:
*
* (nactive >> opt_lg_dirty_mult) >= ndirty
@@ -192,11 +198,50 @@ struct arena_run_s {
* Read-only information associated with each element of arena_t's bins array
* is stored separately, partly to reduce memory usage (only one copy, rather
* than one per arena), but mainly to avoid false cacheline sharing.
+ *
+ * Each run has the following layout:
+ *
+ * /--------------------\
+ * | arena_run_t header |
+ * | ... |
+ * bitmap_offset | bitmap |
+ * | ... |
+ * ctx0_offset | ctx map |
+ * | ... |
+ * |--------------------|
+ * | redzone |
+ * reg0_offset | region 0 |
+ * | redzone |
+ * |--------------------| \
+ * | redzone | |
+ * | region 1 | > reg_interval
+ * | redzone | /
+ * |--------------------|
+ * | ... |
+ * | ... |
+ * | ... |
+ * |--------------------|
+ * | redzone |
+ * | region nregs-1 |
+ * | redzone |
+ * |--------------------|
+ * | alignment pad? |
+ * \--------------------/
+ *
+ * reg_interval has at least the same minimum alignment as reg_size; this
+ * preserves the alignment constraint that sa2u() depends on. Alignment pad is
+ * either 0 or redzone_size; it is present only if needed to align reg0_offset.
*/
struct arena_bin_info_s {
/* Size of regions in a run for this bin's size class. */
size_t reg_size;
+ /* Redzone size. */
+ size_t redzone_size;
+
+ /* Interval between regions (reg_size + (redzone_size << 1)). */
+ size_t reg_interval;
+
/* Total size of a run for this bin's size class. */
size_t run_size;
@@ -357,13 +402,15 @@ void arena_purge_all(arena_t *arena);
void arena_prof_accum(arena_t *arena, uint64_t accumbytes);
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
size_t binind, uint64_t prof_accumbytes);
+void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
+ bool zero);
+void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
void *arena_palloc(arena_t *arena, size_t size, size_t alloc_size,
size_t alignment, bool zero);
-size_t arena_salloc(const void *ptr);
+size_t arena_salloc(const void *ptr, bool demote);
void arena_prof_promoted(const void *ptr, size_t size);
-size_t arena_salloc_demote(const void *ptr);
void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_chunk_map_t *mapelm);
void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
@@ -408,7 +455,7 @@ JEMALLOC_INLINE unsigned
arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
{
unsigned shift, diff, regind;
- size_t size;
+ size_t interval;
/*
* Freeing a pointer lower than region zero can cause assertion
@@ -425,12 +472,12 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
bin_info->reg0_offset);
/* Rescale (factor powers of 2 out of the numerator and denominator). */
- size = bin_info->reg_size;
- shift = ffs(size) - 1;
+ interval = bin_info->reg_interval;
+ shift = ffs(interval) - 1;
diff >>= shift;
- size >>= shift;
+ interval >>= shift;
- if (size == 1) {
+ if (interval == 1) {
/* The divisor was a power of 2. */
regind = diff;
} else {
@@ -442,7 +489,7 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
*
* becomes
*
- * (X * size_invs[D - 3]) >> SIZE_INV_SHIFT
+ * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT
*
* We can omit the first three elements, because we never
* divide by 0, and 1 and 2 are both powers of two, which are
@@ -450,7 +497,7 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
*/
#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS)
#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1)
- static const unsigned size_invs[] = {
+ static const unsigned interval_invs[] = {
SIZE_INV(3),
SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
@@ -461,14 +508,16 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
};
- if (size <= ((sizeof(size_invs) / sizeof(unsigned)) + 2))
- regind = (diff * size_invs[size - 3]) >> SIZE_INV_SHIFT;
- else
- regind = diff / size;
+ if (interval <= ((sizeof(interval_invs) / sizeof(unsigned)) +
+ 2)) {
+ regind = (diff * interval_invs[interval - 3]) >>
+ SIZE_INV_SHIFT;
+ } else
+ regind = diff / interval;
#undef SIZE_INV
#undef SIZE_INV_SHIFT
}
- assert(diff == regind * size);
+ assert(diff == regind * interval);
assert(regind < bin_info->nregs);
return (regind);
@@ -610,7 +659,7 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache)
&arena_bin_info[binind];
assert(((uintptr_t)ptr - ((uintptr_t)run +
(uintptr_t)bin_info->reg0_offset)) %
- bin_info->reg_size == 0);
+ bin_info->reg_interval == 0);
}
malloc_mutex_lock(&bin->lock);
arena_dalloc_bin(arena, chunk, ptr, mapelm);
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index 66dd357..a16e5e2 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -40,6 +40,11 @@
#include <sys/ktrace.h>
#endif
+#ifdef JEMALLOC_VALGRIND
+#include <valgrind/valgrind.h>
+#include <valgrind/memcheck.h>
+#endif
+
#include "jemalloc/internal/private_namespace.h"
#ifdef JEMALLOC_CC_SILENCE
@@ -125,6 +130,13 @@ static const bool config_utrace =
false
#endif
;
+static const bool config_valgrind =
+#ifdef JEMALLOC_VALGRIND
+ true
+#else
+ false
+#endif
+ ;
static const bool config_xmalloc =
#ifdef JEMALLOC_XMALLOC
true
@@ -281,6 +293,77 @@ static const bool config_ivsalloc =
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
+#ifdef JEMALLOC_VALGRIND
+/*
+ * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions
+ * so that when Valgrind reports errors, there are no extra stack frames
+ * in the backtraces.
+ *
+ * The size that is reported to valgrind must be consistent through a chain of
+ * malloc..realloc..realloc calls. Request size isn't recorded anywhere in
+ * jemalloc, so it is critical that all callers of these macros provide usize
+ * rather than request size. As a result, buffer overflow detection is
+ * technically weakened for the standard API, though it is generally accepted
+ * practice to consider any extra bytes reported by malloc_usable_size() as
+ * usable space.
+ */
+#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \
+ if (config_valgrind && opt_valgrind && cond) \
+ VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \
+} while (0)
+#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
+ old_rzsize, zero) do { \
+ if (config_valgrind && opt_valgrind) { \
+ size_t rzsize = p2rz(ptr); \
+ \
+ if (ptr == old_ptr) { \
+ VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
+ usize, rzsize); \
+ if (zero && old_usize < usize) { \
+ VALGRIND_MAKE_MEM_DEFINED( \
+ (void *)((uintptr_t)ptr + \
+ old_usize), usize - old_usize); \
+ } \
+ } else { \
+ if (old_ptr != NULL) { \
+ VALGRIND_FREELIKE_BLOCK(old_ptr, \
+ old_rzsize); \
+ } \
+ if (ptr != NULL) { \
+ size_t copy_size = (old_usize < usize) \
+ ? old_usize : usize; \
+ size_t tail_size = usize - copy_size; \
+ VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \
+ rzsize, false); \
+ if (copy_size > 0) { \
+ VALGRIND_MAKE_MEM_DEFINED(ptr, \
+ copy_size); \
+ } \
+ if (zero && tail_size > 0) { \
+ VALGRIND_MAKE_MEM_DEFINED( \
+ (void *)((uintptr_t)ptr + \
+ copy_size), tail_size); \
+ } \
+ } \
+ } \
+ } \
+} while (0)
+#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \
+ if (config_valgrind && opt_valgrind) \
+ VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \
+} while (0)
+#else
+#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
+#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
+#define VALGRIND_FREELIKE_BLOCK(addr, rzB)
+#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len)
+#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len)
+#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero)
+#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
+ old_rzsize, zero)
+#define JEMALLOC_VALGRIND_FREE(ptr, rzsize)
+#endif
+
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
@@ -300,6 +383,7 @@ static const bool config_ivsalloc =
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
#undef JEMALLOC_H_TYPES
@@ -325,6 +409,7 @@ static const bool config_ivsalloc =
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
typedef struct {
@@ -343,7 +428,10 @@ typedef struct {
extern bool opt_abort;
extern bool opt_junk;
+extern size_t opt_quarantine;
+extern bool opt_redzone;
extern bool opt_utrace;
+extern bool opt_valgrind;
extern bool opt_xmalloc;
extern bool opt_zero;
extern size_t opt_narenas;
@@ -385,6 +473,7 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
#undef JEMALLOC_H_EXTERNS
@@ -550,14 +639,18 @@ choose_arena(arena_t *arena)
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/quarantine.h"
#ifndef JEMALLOC_ENABLE_INLINE
void *imalloc(size_t size);
void *icalloc(size_t size);
void *ipalloc(size_t usize, size_t alignment, bool zero);
-size_t isalloc(const void *ptr);
-size_t ivsalloc(const void *ptr);
+size_t isalloc(const void *ptr, bool demote);
+size_t ivsalloc(const void *ptr, bool demote);
+size_t u2rz(size_t usize);
+size_t p2rz(const void *ptr);
void idalloc(void *ptr);
+void iqalloc(void *ptr);
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero, bool no_move);
malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
@@ -621,21 +714,25 @@ ipalloc(size_t usize, size_t alignment, bool zero)
return (ret);
}
+/*
+ * Typical usage:
+ * void *ptr = [...]
+ * size_t sz = isalloc(ptr, config_prof);
+ */
JEMALLOC_INLINE size_t
-isalloc(const void *ptr)
+isalloc(const void *ptr, bool demote)
{
size_t ret;
arena_chunk_t *chunk;
assert(ptr != NULL);
+ /* Demotion only makes sense if config_prof is true. */
+ assert(config_prof || demote == false);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) {
/* Region. */
- if (config_prof)
- ret = arena_salloc_demote(ptr);
- else
- ret = arena_salloc(ptr);
+ ret = arena_salloc(ptr, demote);
} else
ret = huge_salloc(ptr);
@@ -643,14 +740,36 @@ isalloc(const void *ptr)
}
JEMALLOC_INLINE size_t
-ivsalloc(const void *ptr)
+ivsalloc(const void *ptr, bool demote)
{
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
return (0);
- return (isalloc(ptr));
+ return (isalloc(ptr, demote));
+}
+
+JEMALLOC_INLINE size_t
+u2rz(size_t usize)
+{
+ size_t ret;
+
+ if (usize <= SMALL_MAXCLASS) {
+ size_t binind = SMALL_SIZE2BIN(usize);
+ ret = arena_bin_info[binind].redzone_size;
+ } else
+ ret = 0;
+
+ return (ret);
+}
+
+JEMALLOC_INLINE size_t
+p2rz(const void *ptr)
+{
+ size_t usize = isalloc(ptr, false);
+
+ return (u2rz(usize));
}
JEMALLOC_INLINE void
@@ -667,6 +786,16 @@ idalloc(void *ptr)
huge_dalloc(ptr, true);
}
+JEMALLOC_INLINE void
+iqalloc(void *ptr)
+{
+
+ if (config_fill && opt_quarantine)
+ quarantine(ptr);
+ else
+ idalloc(ptr);
+}
+
JEMALLOC_INLINE void *
iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
bool no_move)
@@ -677,14 +806,14 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
assert(ptr != NULL);
assert(size != 0);
- oldsize = isalloc(ptr);
+ oldsize = isalloc(ptr, config_prof);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
size_t usize, copysize;
/*
- * Existing object alignment is inadquate; allocate new space
+ * Existing object alignment is inadequate; allocate new space
* and copy.
*/
if (no_move)
@@ -711,7 +840,7 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(ret, ptr, copysize);
- idalloc(ptr);
+ iqalloc(ptr);
return (ret);
}
diff --git a/include/jemalloc/internal/private_namespace.h b/include/jemalloc/internal/private_namespace.h
index de3042e..a962192 100644
--- a/include/jemalloc/internal/private_namespace.h
+++ b/include/jemalloc/internal/private_namespace.h
@@ -1,7 +1,9 @@
+#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
#define arena_bin_index JEMALLOC_N(arena_bin_index)
#define arena_boot JEMALLOC_N(arena_boot)
#define arena_dalloc JEMALLOC_N(arena_dalloc)
#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
+#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
#define arena_malloc JEMALLOC_N(arena_malloc)
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
@@ -20,7 +22,6 @@
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
#define arena_run_regind JEMALLOC_N(arena_run_regind)
#define arena_salloc JEMALLOC_N(arena_salloc)
-#define arena_salloc_demote JEMALLOC_N(arena_salloc_demote)
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
#define arenas_bin_i_index JEMALLOC_N(arenas_bin_i_index)
@@ -136,6 +137,7 @@
#define idalloc JEMALLOC_N(idalloc)
#define imalloc JEMALLOC_N(imalloc)
#define ipalloc JEMALLOC_N(ipalloc)
+#define iqalloc JEMALLOC_N(iqalloc)
#define iralloc JEMALLOC_N(iralloc)
#define isalloc JEMALLOC_N(isalloc)
#define ivsalloc JEMALLOC_N(ivsalloc)
@@ -176,6 +178,7 @@
#define opt_utrace JEMALLOC_N(opt_utrace)
#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
#define opt_zero JEMALLOC_N(opt_zero)
+#define p2rz JEMALLOC_N(p2rz)
#define pow2_ceil JEMALLOC_N(pow2_ceil)
#define prof_backtrace JEMALLOC_N(prof_backtrace)
#define prof_boot0 JEMALLOC_N(prof_boot0)
@@ -195,6 +198,8 @@
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls)
#define pthread_create JEMALLOC_N(pthread_create)
+#define quarantine JEMALLOC_N(quarantine)
+#define quarantine_boot JEMALLOC_N(quarantine_boot)
#define register_zone JEMALLOC_N(register_zone)
#define rtree_get JEMALLOC_N(rtree_get)
#define rtree_get_locked JEMALLOC_N(rtree_get_locked)
@@ -229,3 +234,4 @@
#define thread_allocated_get JEMALLOC_N(thread_allocated_get)
#define thread_allocated_get_hard JEMALLOC_N(thread_allocated_get_hard)
#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls)
+#define u2rz JEMALLOC_N(u2rz)
diff --git a/include/jemalloc/internal/prof.h b/include/jemalloc/internal/prof.h
index 34929e7..a37bb44 100644
--- a/include/jemalloc/internal/prof.h
+++ b/include/jemalloc/internal/prof.h
@@ -378,7 +378,7 @@ prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
cassert(config_prof);
assert(ptr != NULL);
- assert(size == isalloc(ptr));
+ assert(size == isalloc(ptr, true));
if (opt_lg_prof_sample != 0) {
if (prof_sample_accum_update(size)) {
@@ -427,7 +427,7 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
if (ptr != NULL) {
- assert(size == isalloc(ptr));
+ assert(size == isalloc(ptr, true));
if (opt_lg_prof_sample != 0) {
if (prof_sample_accum_update(size)) {
/*
@@ -500,7 +500,7 @@ prof_free(const void *ptr, size_t size)
cassert(config_prof);
if ((uintptr_t)ctx > (uintptr_t)1) {
- assert(size == isalloc(ptr));
+ assert(size == isalloc(ptr, true));
prof_thr_cnt_t *tcnt = prof_lookup(ctx->bt);
if (tcnt != NULL) {
diff --git a/include/jemalloc/internal/quarantine.h b/include/jemalloc/internal/quarantine.h
new file mode 100644
index 0000000..38f3d69
--- /dev/null
+++ b/include/jemalloc/internal/quarantine.h
@@ -0,0 +1,24 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+/* Default per thread quarantine size if valgrind is enabled. */
+#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24)
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+void quarantine(void *ptr);
+bool quarantine_boot(void);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
+
diff --git a/include/jemalloc/internal/tcache.h b/include/jemalloc/internal/tcache.h
index 93e721d..9d8c992 100644
--- a/include/jemalloc/internal/tcache.h
+++ b/include/jemalloc/internal/tcache.h
@@ -340,17 +340,24 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
if (ret == NULL)
return (NULL);
}
- assert(arena_salloc(ret) == arena_bin_info[binind].reg_size);
+ assert(arena_salloc(ret, false) == arena_bin_info[binind].reg_size);
if (zero == false) {
if (config_fill) {
- if (opt_junk)
- memset(ret, 0xa5, size);
- else if (opt_zero)
+ if (opt_junk) {
+ arena_alloc_junk_small(ret,
+ &arena_bin_info[binind], false);
+ } else if (opt_zero)
memset(ret, 0, size);
}
- } else
+ } else {
+ if (config_fill && opt_junk) {
+ arena_alloc_junk_small(ret, &arena_bin_info[binind],
+ true);
+ }
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
+ }
if (config_stats)
tbin->tstats.nrequests++;
@@ -397,8 +404,10 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
else if (opt_zero)
memset(ret, 0, size);
}
- } else
+ } else {
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
+ }
if (config_stats)
tbin->tstats.nrequests++;
@@ -422,7 +431,7 @@ tcache_dalloc_small(tcache_t *tcache, void *ptr)
size_t pageind, binind;
arena_chunk_map_t *mapelm;
- assert(arena_salloc(ptr) <= SMALL_MAXCLASS);
+ assert(arena_salloc(ptr, false) <= SMALL_MAXCLASS);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
arena = chunk->arena;
@@ -436,7 +445,7 @@ tcache_dalloc_small(tcache_t *tcache, void *ptr)
assert(binind < NBINS);
if (config_fill && opt_junk)
- memset(ptr, 0x5a, arena_bin_info[binind].reg_size);
+ arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
@@ -459,8 +468,8 @@ tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
tcache_bin_info_t *tbin_info;
assert((size & PAGE_MASK) == 0);
- assert(arena_salloc(ptr) > SMALL_MAXCLASS);
- assert(arena_salloc(ptr) <= tcache_maxclass);
+ assert(arena_salloc(ptr, false) > SMALL_MAXCLASS);
+ assert(arena_salloc(ptr, false) <= tcache_maxclass);
binind = NBINS + (size >> LG_PAGE) - 1;
diff --git a/include/jemalloc/internal/util.h b/include/jemalloc/internal/util.h
index 3d3ea3a..d360ae3 100644
--- a/include/jemalloc/internal/util.h
+++ b/include/jemalloc/internal/util.h
@@ -140,7 +140,6 @@ malloc_write(const char *s)
je_malloc_message(NULL, s);
}
-
#endif
#endif /* JEMALLOC_H_INLINES */
diff --git a/include/jemalloc/jemalloc_defs.h.in b/include/jemalloc/jemalloc_defs.h.in
index 8e7442d..7770a7a 100644
--- a/include/jemalloc/jemalloc_defs.h.in
+++ b/include/jemalloc/jemalloc_defs.h.in
@@ -148,7 +148,7 @@
*/
#undef JEMALLOC_DSS
-/* Support memory filling (junk/zero). */
+/* Support memory filling (junk/zero/quarantine/redzone). */
#undef JEMALLOC_FILL
/* Support the experimental API. */
@@ -157,6 +157,9 @@
/* Support utrace(2)-based tracing. */
#undef JEMALLOC_UTRACE
+/* Support Valgrind. */
+#undef JEMALLOC_VALGRIND
+
/* Support optional abort() on OOM. */
#undef JEMALLOC_XMALLOC
diff --git a/src/arena.c b/src/arena.c
index c84aaf4..1d4f61e 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -140,7 +140,7 @@ arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
- (uintptr_t)(bin_info->reg_size * regind));
+ (uintptr_t)(bin_info->reg_interval * regind));
run->nfree--;
if (regind == run->nextind)
run->nextind++;
@@ -161,8 +161,8 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr)
assert(run->nfree < bin_info->nregs);
/* Freeing an interior pointer can cause assertion failure. */
assert(((uintptr_t)ptr - ((uintptr_t)run +
- (uintptr_t)bin_info->reg0_offset)) % (uintptr_t)bin_info->reg_size
- == 0);
+ (uintptr_t)bin_info->reg0_offset)) %
+ (uintptr_t)bin_info->reg_interval == 0);
assert((uintptr_t)ptr >= (uintptr_t)run +
(uintptr_t)bin_info->reg0_offset);
/* Freeing an unallocated pointer can cause assertion failure. */
@@ -260,10 +260,18 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
for (i = 0; i < need_pages; i++) {
if ((chunk->map[run_ind+i-map_bias].bits
& CHUNK_MAP_UNZEROED) != 0) {
+ VALGRIND_MAKE_MEM_UNDEFINED(
+ (void *)((uintptr_t)
+ chunk + ((run_ind+i) <<
+ LG_PAGE)), PAGE);
memset((void *)((uintptr_t)
chunk + ((run_ind+i) <<
LG_PAGE)), 0, PAGE);
} else if (config_debug) {
+ VALGRIND_MAKE_MEM_DEFINED(
+ (void *)((uintptr_t)
+ chunk + ((run_ind+i) <<
+ LG_PAGE)), PAGE);
arena_chunk_validate_zeroed(
chunk, run_ind+i);
}
@@ -273,6 +281,9 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
* The run is dirty, so all pages must be
* zeroed.
*/
+ VALGRIND_MAKE_MEM_UNDEFINED((void
+ *)((uintptr_t)chunk + (run_ind <<
+ LG_PAGE)), (need_pages << LG_PAGE));
memset((void *)((uintptr_t)chunk + (run_ind <<
LG_PAGE)), 0, (need_pages << LG_PAGE));
}
@@ -1245,6 +1256,10 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
ptr = arena_bin_malloc_hard(arena, bin);
if (ptr == NULL)
break;
+ if (config_fill && opt_junk) {
+ arena_alloc_junk_small(ptr, &arena_bin_info[binind],
+ true);
+ }
/* Insert such that low regions get used first. */
tbin->avail[nfill - 1 - i] = ptr;
}
@@ -1259,6 +1274,55 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
tbin->ncached = i;
}
+void
+arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
+{
+
+ if (zero) {
+ size_t redzone_size = bin_info->redzone_size;
+ memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
+ redzone_size);
+ memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
+ redzone_size);
+ } else {
+ memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
+ bin_info->reg_interval);
+ }
+}
+
+void
+arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
+{
+ size_t size = bin_info->reg_size;
+ size_t redzone_size = bin_info->redzone_size;
+ size_t i;
+ bool error = false;
+
+ for (i = 1; i <= redzone_size; i++) {
+ unsigned byte;
+ if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) {
+ error = true;
+ malloc_printf("<jemalloc>: Corrupt redzone "
+ "%zu byte%s before %p (size %zu), byte=%#x\n", i,
+ (i == 1) ? "" : "s", ptr, size, byte);
+ }
+ }
+ for (i = 0; i < redzone_size; i++) {
+ unsigned byte;
+ if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) {
+ error = true;
+ malloc_printf("<jemalloc>: Corrupt redzone "
+ "%zu byte%s after end of %p (size %zu), byte=%#x\n",
+ i, (i == 1) ? "" : "s", ptr, size, byte);
+ }
+ }
+ if (opt_abort && error)
+ abort();
+
+ memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
+ bin_info->reg_interval);
+}
+
void *
arena_malloc_small(arena_t *arena, size_t size, bool zero)
{
@@ -1297,13 +1361,20 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
if (zero == false) {
if (config_fill) {
- if (opt_junk)
- memset(ret, 0xa5, size);
- else if (opt_zero)
+ if (opt_junk) {
+ arena_alloc_junk_small(ret,
+ &arena_bin_info[binind], false);
+ } else if (opt_zero)
memset(ret, 0, size);
}
- } else
+ } else {
+ if (config_fill && opt_junk) {
+ arena_alloc_junk_small(ret, &arena_bin_info[binind],
+ true);
+ }
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
+ }
return (ret);
}
@@ -1412,7 +1483,7 @@ arena_palloc(arena_t *arena, size_t size, size_t alloc_size, size_t alignment,
/* Return the size of the allocation pointed to by ptr. */
size_t
-arena_salloc(const void *ptr)
+arena_salloc(const void *ptr, bool demote)
{
size_t ret;
arena_chunk_t *chunk;
@@ -1431,12 +1502,19 @@ arena_salloc(const void *ptr)
size_t binind = arena_bin_index(chunk->arena, run->bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
assert(((uintptr_t)ptr - ((uintptr_t)run +
- (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_size ==
- 0);
+ (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
+ == 0);
ret = bin_info->reg_size;
} else {
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
ret = mapbits & ~PAGE_MASK;
+ if (demote && prof_promote && ret == PAGE && (mapbits &
+ CHUNK_MAP_CLASS_MASK) != 0) {
+ size_t binind = ((mapbits & CHUNK_MAP_CLASS_MASK) >>
+ CHUNK_MAP_CLASS_SHIFT) - 1;
+ assert(binind < NBINS);
+ ret = arena_bin_info[binind].reg_size;
+ }
assert(ret != 0);
}
@@ -1449,9 +1527,11 @@ arena_prof_promoted(const void *ptr, size_t size)
arena_chunk_t *chunk;
size_t pageind, binind;
+ assert(config_prof);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
- assert(isalloc(ptr) == PAGE);
+ assert(isalloc(ptr, false) == PAGE);
+ assert(isalloc(ptr, true) == PAGE);
assert(size <= SMALL_MAXCLASS);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
@@ -1460,45 +1540,9 @@ arena_prof_promoted(const void *ptr, size_t size)
assert(binind < NBINS);
chunk->map[pageind-map_bias].bits = (chunk->map[pageind-map_bias].bits &
~CHUNK_MAP_CLASS_MASK) | ((binind+1) << CHUNK_MAP_CLASS_SHIFT);
-}
-
-size_t
-arena_salloc_demote(const void *ptr)
-{
- size_t ret;
- arena_chunk_t *chunk;
- size_t pageind, mapbits;
-
- assert(ptr != NULL);
- assert(CHUNK_ADDR2BASE(ptr) != ptr);
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- mapbits = chunk->map[pageind-map_bias].bits;
- assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
- if ((mapbits & CHUNK_MAP_LARGE) == 0) {
- arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
- (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << LG_PAGE));
- size_t binind = arena_bin_index(chunk->arena, run->bin);
- arena_bin_info_t *bin_info = &arena_bin_info[binind];
- assert(((uintptr_t)ptr - ((uintptr_t)run +
- (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_size ==
- 0);
- ret = bin_info->reg_size;
- } else {
- assert(((uintptr_t)ptr & PAGE_MASK) == 0);
- ret = mapbits & ~PAGE_MASK;
- if (prof_promote && ret == PAGE && (mapbits &
- CHUNK_MAP_CLASS_MASK) != 0) {
- size_t binind = ((mapbits & CHUNK_MAP_CLASS_MASK) >>
- CHUNK_MAP_CLASS_SHIFT) - 1;
- assert(binind < NBINS);
- ret = arena_bin_info[binind].reg_size;
- }
- assert(ret != 0);
- }
- return (ret);
+ assert(isalloc(ptr, false) == PAGE);
+ assert(isalloc(ptr, true) == size);
}
static void
@@ -1545,7 +1589,8 @@ arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
past = (size_t)(PAGE_CEILING((uintptr_t)run +
(uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
- bin_info->reg_size) - (uintptr_t)chunk) >> LG_PAGE);
+ bin_info->reg_interval - bin_info->redzone_size) -
+ (uintptr_t)chunk) >> LG_PAGE);
malloc_mutex_lock(&arena->lock);
/*
@@ -1617,7 +1662,7 @@ arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size = bin_info->reg_size;
if (config_fill && opt_junk)
- memset(ptr, 0x5a, size);
+ arena_dalloc_junk_small(ptr, bin_info);
arena_run_reg_dalloc(run, ptr);
if (run->nfree == bin_info->nregs) {
@@ -1936,7 +1981,7 @@ arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(ret, ptr, copysize);
- idalloc(ptr);
+ iqalloc(ptr);
return (ret);
}
@@ -2007,17 +2052,41 @@ arena_new(arena_t *arena, unsigned ind)
static size_t
bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
{
+ size_t pad_size;
size_t try_run_size, good_run_size;
uint32_t try_nregs, good_nregs;
uint32_t try_hdr_size, good_hdr_size;
uint32_t try_bitmap_offset, good_bitmap_offset;
uint32_t try_ctx0_offset, good_ctx0_offset;
- uint32_t try_reg0_offset, good_reg0_offset;
+ uint32_t try_redzone0_offset, good_redzone0_offset;
assert(min_run_size >= PAGE);
assert(min_run_size <= arena_maxclass);
/*
+ * Determine redzone size based on minimum alignment and minimum
+ * redzone size. Add padding to the end of the run if it is needed to
+ * align the regions. The padding allows each redzone to be half the
+ * minimum alignment; without the padding, each redzone would have to
+ * be twice as large in order to maintain alignment.
+ */
+ if (config_fill && opt_redzone) {
+ size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1);
+ if (align_min <= REDZONE_MINSIZE) {
+ bin_info->redzone_size = REDZONE_MINSIZE;
+ pad_size = 0;
+ } else {
+ bin_info->redzone_size = align_min >> 1;
+ pad_size = bin_info->redzone_size;
+ }
+ } else {
+ bin_info->redzone_size = 0;
+ pad_size = 0;
+ }
+ bin_info->reg_interval = bin_info->reg_size +
+ (bin_info->redzone_size << 1);
+
+ /*
* Calculate known-valid settings before entering the run_size
* expansion loop, so that the first part of the loop always copies
* valid settings.
@@ -2028,7 +2097,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
* header's mask length and the number of regions.
*/
try_run_size = min_run_size;
- try_nregs = ((try_run_size - sizeof(arena_run_t)) / bin_info->reg_size)
+ try_nregs = ((try_run_size - sizeof(arena_run_t)) /
+ bin_info->reg_interval)
+ 1; /* Counter-act try_nregs-- in loop. */
if (try_nregs > RUN_MAXREGS) {
try_nregs = RUN_MAXREGS
@@ -2050,9 +2120,9 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
} else
try_ctx0_offset = 0;
- try_reg0_offset = try_run_size - (try_nregs *
- bin_info->reg_size);
- } while (try_hdr_size > try_reg0_offset);
+ try_redzone0_offset = try_run_size - (try_nregs *
+ bin_info->reg_interval) - pad_size;
+ } while (try_hdr_size > try_redzone0_offset);
/* run_size expansion loop. */
do {
@@ -2064,12 +2134,12 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
good_hdr_size = try_hdr_size;
good_bitmap_offset = try_bitmap_offset;
good_ctx0_offset = try_ctx0_offset;
- good_reg0_offset = try_reg0_offset;
+ good_redzone0_offset = try_redzone0_offset;
/* Try more aggressive settings. */
try_run_size += PAGE;
- try_nregs = ((try_run_size - sizeof(arena_run_t)) /
- bin_info->reg_size)
+ try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
+ bin_info->reg_interval)
+ 1; /* Counter-act try_nregs-- in loop. */
if (try_nregs > RUN_MAXREGS) {
try_nregs = RUN_MAXREGS
@@ -2093,23 +2163,27 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
try_hdr_size += try_nregs *
sizeof(prof_ctx_t *);
}
- try_reg0_offset = try_run_size - (try_nregs *
- bin_info->reg_size);
- } while (try_hdr_size > try_reg0_offset);
+ try_redzone0_offset = try_run_size - (try_nregs *
+ bin_info->reg_interval) - pad_size;
+ } while (try_hdr_size > try_redzone0_offset);
} while (try_run_size <= arena_maxclass
&& try_run_size <= arena_maxclass
- && RUN_MAX_OVRHD * (bin_info->reg_size << 3) > RUN_MAX_OVRHD_RELAX
- && (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
+ && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
+ RUN_MAX_OVRHD_RELAX
+ && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
&& try_nregs < RUN_MAXREGS);
- assert(good_hdr_size <= good_reg0_offset);
+ assert(good_hdr_size <= good_redzone0_offset);
/* Copy final settings. */
bin_info->run_size = good_run_size;
bin_info->nregs = good_nregs;
bin_info->bitmap_offset = good_bitmap_offset;
bin_info->ctx0_offset = good_ctx0_offset;
- bin_info->reg0_offset = good_reg0_offset;
+ bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
+
+ assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
+ * bin_info->reg_interval) + pad_size == bin_info->run_size);
return (good_run_size);
}
diff --git a/src/ctl.c b/src/ctl.c
index a75ffef..6be4056 100644
--- a/src/ctl.c
+++ b/src/ctl.c
@@ -57,6 +57,7 @@ CTL_PROTO(config_stats)
CTL_PROTO(config_tcache)
CTL_PROTO(config_tls)
CTL_PROTO(config_utrace)
+CTL_PROTO(config_valgrind)
CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort)
CTL_PROTO(opt_lg_chunk)
@@ -65,7 +66,10 @@ CTL_PROTO(opt_lg_dirty_mult)
CTL_PROTO(opt_stats_print)
CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero)
+CTL_PROTO(opt_quarantine)
+CTL_PROTO(opt_redzone)
CTL_PROTO(opt_utrace)
+CTL_PROTO(opt_valgrind)
CTL_PROTO(opt_xmalloc)
CTL_PROTO(opt_tcache)
CTL_PROTO(opt_lg_tcache_max)
@@ -179,6 +183,7 @@ static const ctl_node_t config_node[] = {
{NAME("tcache"), CTL(config_tcache)},
{NAME("tls"), CTL(config_tls)},
{NAME("utrace"), CTL(config_utrace)},
+ {NAME("valgrind"), CTL(config_valgrind)},
{NAME("xmalloc"), CTL(config_xmalloc)}
};
@@ -190,7 +195,10 @@ static const ctl_node_t opt_node[] = {
{NAME("stats_print"), CTL(opt_stats_print)},
{NAME("junk"), CTL(opt_junk)},
{NAME("zero"), CTL(opt_zero)},
+ {NAME("quarantine"), CTL(opt_quarantine)},
+ {NAME("redzone"), CTL(opt_redzone)},
{NAME("utrace"), CTL(opt_utrace)},
+ {NAME("valgrind"), CTL(opt_valgrind)},
{NAME("xmalloc"), CTL(opt_xmalloc)},
{NAME("tcache"), CTL(opt_tcache)},
{NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
@@ -1050,7 +1058,8 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
/* Set new arena association. */
if (config_tcache) {
tcache_t *tcache;
- if ((tcache = *tcache_tsd_get()) != NULL) {
+ if ((uintptr_t)(tcache = *tcache_tsd_get()) >
+ (uintptr_t)TCACHE_STATE_MAX) {
tcache_arena_dissociate(tcache);
tcache_arena_associate(tcache, arena);
}
@@ -1085,6 +1094,7 @@ CTL_RO_BOOL_CONFIG_GEN(config_stats)
CTL_RO_BOOL_CONFIG_GEN(config_tcache)
CTL_RO_BOOL_CONFIG_GEN(config_tls)
CTL_RO_BOOL_CONFIG_GEN(config_utrace)
+CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
/******************************************************************************/
@@ -1096,7 +1106,10 @@ CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
+CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
+CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
+CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool)
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
diff --git a/src/huge.c b/src/huge.c
index 43c8f3b..daf0c62 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -174,7 +174,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
#endif
{
memcpy(ret, ptr, copysize);
- idalloc(ptr);
+ iqalloc(ptr);
}
return (ret);
}
diff --git a/src/jemalloc.c b/src/jemalloc.c
index cde998c..237dd58 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -14,14 +14,19 @@ const char *je_malloc_conf JEMALLOC_ATTR(visibility("default"));
bool opt_abort = true;
# ifdef JEMALLOC_FILL
bool opt_junk = true;
+bool opt_redzone = true;
# else
bool opt_junk = false;
+bool opt_redzone = false;
# endif
#else
bool opt_abort = false;
bool opt_junk = false;
+bool opt_redzone = false;
#endif
+size_t opt_quarantine = ZU(0);
bool opt_utrace = false;
+bool opt_valgrind = false;
bool opt_xmalloc = false;
bool opt_zero = false;
size_t opt_narenas = 0;
@@ -419,7 +424,7 @@ malloc_conf_init(void)
while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
&vlen) == false) {
-#define CONF_HANDLE_BOOL(o, n) \
+#define CONF_HANDLE_BOOL_HIT(o, n, hit) \
if (sizeof(#n)-1 == klen && strncmp(#n, k, \
klen) == 0) { \
if (strncmp("true", v, vlen) == 0 && \
@@ -433,12 +438,19 @@ malloc_conf_init(void)
"Invalid conf value", \
k, klen, v, vlen); \
} \
+ hit = true; \
+ } else \
+ hit = false;
+#define CONF_HANDLE_BOOL(o, n) { \
+ bool hit; \
+ CONF_HANDLE_BOOL_HIT(o, n, hit); \
+ if (hit) \
continue; \
- }
+}
#define CONF_HANDLE_SIZE_T(o, n, min, max) \
if (sizeof(#n)-1 == klen && strncmp(#n, k, \
klen) == 0) { \
- uintmax_t um; \
+ uintmax_t um; \
char *end; \
\
errno = 0; \
@@ -502,11 +514,30 @@ malloc_conf_init(void)
CONF_HANDLE_BOOL(opt_stats_print, stats_print)
if (config_fill) {
CONF_HANDLE_BOOL(opt_junk, junk)
+ CONF_HANDLE_SIZE_T(opt_quarantine, quarantine,
+ 0, SIZE_T_MAX)
+ CONF_HANDLE_BOOL(opt_redzone, redzone)
CONF_HANDLE_BOOL(opt_zero, zero)
}
if (config_utrace) {
CONF_HANDLE_BOOL(opt_utrace, utrace)
}
+ if (config_valgrind) {
+ bool hit;
+ CONF_HANDLE_BOOL_HIT(opt_valgrind,
+ valgrind, hit)
+ if (config_fill && opt_valgrind && hit) {
+ opt_junk = false;
+ opt_zero = false;
+ if (opt_quarantine == 0) {
+ opt_quarantine =
+ JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
+ }
+ opt_redzone = true;
+ }
+ if (hit)
+ continue;
+ }
if (config_xmalloc) {
CONF_HANDLE_BOOL(opt_xmalloc, xmalloc)
}
@@ -662,6 +693,11 @@ malloc_init_hard(void)
return (true);
}
+ if (config_fill && quarantine_boot()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+
if (config_prof && prof_boot2()) {
malloc_mutex_unlock(&init_lock);
return (true);
@@ -763,7 +799,7 @@ je_malloc(size_t size)
} else
ret = imalloc(size);
} else {
- if (config_stats)
+ if (config_stats || (config_valgrind && opt_valgrind))
usize = s2u(size);
ret = imalloc(size);
}
@@ -780,10 +816,11 @@ label_oom:
if (config_prof && opt_prof && ret != NULL)
prof_malloc(ret, usize, cnt);
if (config_stats && ret != NULL) {
- assert(usize == isalloc(ret));
+ assert(usize == isalloc(ret, config_prof));
thread_allocated_tsd_get()->allocated += usize;
}
UTRACE(0, size, ret);
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
return (ret);
}
@@ -872,7 +909,7 @@ imemalign(void **memptr, size_t alignment, size_t size,
label_return:
if (config_stats && result != NULL) {
- assert(usize == isalloc(result));
+ assert(usize == isalloc(result, config_prof));
thread_allocated_tsd_get()->allocated += usize;
}
if (config_prof && opt_prof && result != NULL)
@@ -886,8 +923,10 @@ JEMALLOC_ATTR(visibility("default"))
int
je_posix_memalign(void **memptr, size_t alignment, size_t size)
{
-
- return imemalign(memptr, alignment, size, sizeof(void *));
+ int ret = imemalign(memptr, alignment, size, sizeof(void *));
+ JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
+ config_prof), false);
+ return (ret);
}
JEMALLOC_ATTR(malloc)
@@ -902,6 +941,8 @@ je_aligned_alloc(size_t alignment, size_t size)
ret = NULL;
errno = err;
}
+ JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
+ false);
return (ret);
}
@@ -956,7 +997,7 @@ je_calloc(size_t num, size_t size)
} else
ret = icalloc(num_size);
} else {
- if (config_stats)
+ if (config_stats || (config_valgrind && opt_valgrind))
usize = s2u(num_size);
ret = icalloc(num_size);
}
@@ -974,10 +1015,11 @@ label_return:
if (config_prof && opt_prof && ret != NULL)
prof_malloc(ret, usize, cnt);
if (config_stats && ret != NULL) {
- assert(usize == isalloc(ret));
+ assert(usize == isalloc(ret, config_prof));
thread_allocated_tsd_get()->allocated += usize;
}
UTRACE(0, num_size, ret);
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
return (ret);
}
@@ -988,19 +1030,30 @@ je_realloc(void *ptr, size_t size)
void *ret;
size_t usize;
size_t old_size = 0;
+ size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
if (size == 0) {
if (ptr != NULL) {
/* realloc(ptr, 0) is equivalent to free(p). */
- if (config_prof || config_stats)
- old_size = isalloc(ptr);
+ if (config_prof) {
+ old_size = isalloc(ptr, true);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = p2rz(ptr);
+ } else if (config_stats) {
+ old_size = isalloc(ptr, false);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = u2rz(old_size);
+ } else if (config_valgrind && opt_valgrind) {
+ old_size = isalloc(ptr, false);
+ old_rzsize = u2rz(old_size);
+ }
if (config_prof && opt_prof) {
old_ctx = prof_ctx_get(ptr);
cnt = NULL;
}
- idalloc(ptr);
+ iqalloc(ptr);
ret = NULL;
goto label_return;
} else
@@ -1010,8 +1063,18 @@ je_realloc(void *ptr, size_t size)
if (ptr != NULL) {
assert(malloc_initialized || IS_INITIALIZER);
- if (config_prof || config_stats)
- old_size = isalloc(ptr);
+ if (config_prof) {
+ old_size = isalloc(ptr, true);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = p2rz(ptr);
+ } else if (config_stats) {
+ old_size = isalloc(ptr, false);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = u2rz(old_size);
+ } else if (config_valgrind && opt_valgrind) {
+ old_size = isalloc(ptr, false);
+ old_rzsize = u2rz(old_size);
+ }
if (config_prof && opt_prof) {
usize = s2u(size);
old_ctx = prof_ctx_get(ptr);
@@ -1035,7 +1098,7 @@ je_realloc(void *ptr, size_t size)
old_ctx = NULL;
}
} else {
- if (config_stats)
+ if (config_stats || (config_valgrind && opt_valgrind))
usize = s2u(size);
ret = iralloc(ptr, size, 0, 0, false, false);
}
@@ -1076,7 +1139,8 @@ label_oom:
ret = imalloc(size);
}
} else {
- if (config_stats)
+ if (config_stats || (config_valgrind &&
+ opt_valgrind))
usize = s2u(size);
ret = imalloc(size);
}
@@ -1097,12 +1161,13 @@ label_return:
prof_realloc(ret, usize, cnt, old_size, old_ctx);
if (config_stats && ret != NULL) {
thread_allocated_t *ta;
- assert(usize == isalloc(ret));
+ assert(usize == isalloc(ret, config_prof));
ta = thread_allocated_tsd_get();
ta->allocated += usize;
ta->deallocated += old_size;
}
UTRACE(ptr, size, ret);
+ JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
return (ret);
}
@@ -1114,18 +1179,21 @@ je_free(void *ptr)
UTRACE(ptr, 0, 0);
if (ptr != NULL) {
size_t usize;
+ size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
assert(malloc_initialized || IS_INITIALIZER);
if (config_prof && opt_prof) {
- usize = isalloc(ptr);
+ usize = isalloc(ptr, config_prof);
prof_free(ptr, usize);
- } else if (config_stats) {
- usize = isalloc(ptr);
- }
+ } else if (config_stats || config_valgrind)
+ usize = isalloc(ptr, config_prof);
if (config_stats)
thread_allocated_tsd_get()->deallocated += usize;
- idalloc(ptr);
+ if (config_valgrind && opt_valgrind)
+ rzsize = p2rz(ptr);
+ iqalloc(ptr);
+ JEMALLOC_VALGRIND_FREE(ptr, rzsize);
}
}
@@ -1145,6 +1213,7 @@ je_memalign(size_t alignment, size_t size)
{
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
imemalign(&ret, alignment, size, 1);
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
return (ret);
}
#endif
@@ -1157,6 +1226,7 @@ je_valloc(size_t size)
{
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
imemalign(&ret, PAGE, size, 1);
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
return (ret);
}
#endif
@@ -1209,9 +1279,9 @@ je_malloc_usable_size(const void *ptr)
assert(malloc_initialized || IS_INITIALIZER);
if (config_ivsalloc)
- ret = ivsalloc(ptr);
+ ret = ivsalloc(ptr, config_prof);
else
- ret = (ptr != NULL) ? isalloc(ptr) : 0;
+ ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
return (ret);
}
@@ -1336,10 +1406,11 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
*ptr = p;
if (config_stats) {
- assert(usize == isalloc(p));
+ assert(usize == isalloc(p, config_prof));
thread_allocated_tsd_get()->allocated += usize;
}
UTRACE(0, size, p);
+ JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
return (ALLOCM_SUCCESS);
label_oom:
if (config_xmalloc && opt_xmalloc) {
@@ -1360,6 +1431,7 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
void *p, *q;
size_t usize;
size_t old_size;
+ size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
bool zero = flags & ALLOCM_ZERO;
@@ -1384,7 +1456,9 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
size_t max_usize = (alignment == 0) ? s2u(size+extra) :
sa2u(size+extra, alignment, NULL);
prof_ctx_t *old_ctx = prof_ctx_get(p);
- old_size = isalloc(p);
+ old_size = isalloc(p, true);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = p2rz(p);
PROF_ALLOC_PREP(1, max_usize, cnt);
if (cnt == NULL)
goto label_oom;
@@ -1403,27 +1477,33 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
usize = max_usize;
arena_prof_promoted(q, usize);
} else
- usize = isalloc(q);
+ usize = isalloc(q, config_prof);
} else {
q = iralloc(p, size, extra, alignment, zero, no_move);
if (q == NULL)
goto label_err;
- usize = isalloc(q);
+ usize = isalloc(q, config_prof);
}
prof_realloc(q, usize, cnt, old_size, old_ctx);
if (rsize != NULL)
*rsize = usize;
} else {
- if (config_stats)
- old_size = isalloc(p);
+ if (config_stats) {
+ old_size = isalloc(p, false);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = u2rz(old_size);
+ } else if (config_valgrind && opt_valgrind) {
+ old_size = isalloc(p, false);
+ old_rzsize = u2rz(old_size);
+ }
q = iralloc(p, size, extra, alignment, zero, no_move);
if (q == NULL)
goto label_err;
if (config_stats)
- usize = isalloc(q);
+ usize = isalloc(q, config_prof);
if (rsize != NULL) {
if (config_stats == false)
- usize = isalloc(q);
+ usize = isalloc(q, config_prof);
*rsize = usize;
}
}
@@ -1436,6 +1516,7 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
ta->deallocated += old_size;
}
UTRACE(p, size, q);
+ JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
return (ALLOCM_SUCCESS);
label_err:
if (no_move) {
@@ -1462,10 +1543,10 @@ je_sallocm(const void *ptr, size_t *rsize, int flags)
assert(malloc_initialized || IS_INITIALIZER);
if (config_ivsalloc)
- sz = ivsalloc(ptr);
+ sz = ivsalloc(ptr, config_prof);
else {
assert(ptr != NULL);
- sz = isalloc(ptr);
+ sz = isalloc(ptr, config_prof);
}
assert(rsize != NULL);
*rsize = sz;
@@ -1479,21 +1560,25 @@ int
je_dallocm(void *ptr, int flags)
{
size_t usize;
+ size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
assert(ptr != NULL);
assert(malloc_initialized || IS_INITIALIZER);
UTRACE(ptr, 0, 0);
- if (config_stats)
- usize = isalloc(ptr);
+ if (config_stats || config_valgrind)
+ usize = isalloc(ptr, config_prof);
if (config_prof && opt_prof) {
- if (config_stats == false)
- usize = isalloc(ptr);
+ if (config_stats == false && config_valgrind == false)
+ usize = isalloc(ptr, config_prof);
prof_free(ptr, usize);
}
if (config_stats)
thread_allocated_tsd_get()->deallocated += usize;
- idalloc(ptr);
+ if (config_valgrind && opt_valgrind)
+ rzsize = p2rz(ptr);
+ iqalloc(ptr);
+ JEMALLOC_VALGRIND_FREE(ptr, rzsize);
return (ALLOCM_SUCCESS);
}
diff --git a/src/quarantine.c b/src/quarantine.c
new file mode 100644
index 0000000..89a25c6
--- /dev/null
+++ b/src/quarantine.c
@@ -0,0 +1,163 @@
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+typedef struct quarantine_s quarantine_t;
+
+struct quarantine_s {
+ size_t curbytes;
+ size_t curobjs;
+ size_t first;
+#define LG_MAXOBJS_INIT 10
+ size_t lg_maxobjs;
+ void *objs[1]; /* Dynamically sized ring buffer. */
+};
+
+static void quarantine_cleanup(void *arg);
+
+malloc_tsd_data(static, quarantine, quarantine_t *, NULL)
+malloc_tsd_funcs(JEMALLOC_INLINE, quarantine, quarantine_t *, NULL,
+ quarantine_cleanup)
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static quarantine_t *quarantine_init(size_t lg_maxobjs);
+static quarantine_t *quarantine_grow(quarantine_t *quarantine);
+static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound);
+
+/******************************************************************************/
+
+static quarantine_t *
+quarantine_init(size_t lg_maxobjs)
+{
+ quarantine_t *quarantine;
+
+ quarantine = (quarantine_t *)imalloc(offsetof(quarantine_t, objs) +
+ ((ZU(1) << lg_maxobjs) * sizeof(void *)));
+ if (quarantine == NULL)
+ return (NULL);
+ quarantine->curbytes = 0;
+ quarantine->curobjs = 0;
+ quarantine->first = 0;
+ quarantine->lg_maxobjs = lg_maxobjs;
+
+ quarantine_tsd_set(&quarantine);
+
+ return (quarantine);
+}
+
+static quarantine_t *
+quarantine_grow(quarantine_t *quarantine)
+{
+ quarantine_t *ret;
+
+ ret = quarantine_init(quarantine->lg_maxobjs + 1);
+ if (ret == NULL)
+ return (quarantine);
+
+ ret->curbytes = quarantine->curbytes;
+ if (quarantine->first + quarantine->curobjs < (ZU(1) <<
+ quarantine->lg_maxobjs)) {
+ /* objs ring buffer data are contiguous. */
+ memcpy(ret->objs, &quarantine->objs[quarantine->first],
+ quarantine->curobjs * sizeof(void *));
+ ret->curobjs = quarantine->curobjs;
+ } else {
+ /* objs ring buffer data wrap around. */
+ size_t ncopy = (ZU(1) << quarantine->lg_maxobjs) -
+ quarantine->first;
+ memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy *
+ sizeof(void *));
+ ret->curobjs = ncopy;
+ if (quarantine->curobjs != 0) {
+ memcpy(&ret->objs[ret->curobjs], quarantine->objs,
+ quarantine->curobjs - ncopy);
+ }
+ }
+
+ return (ret);
+}
+
+static void
+quarantine_drain(quarantine_t *quarantine, size_t upper_bound)
+{
+
+ while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) {
+ void *ptr = quarantine->objs[quarantine->first];
+ size_t usize = isalloc(ptr, config_prof);
+ idalloc(ptr);
+ quarantine->curbytes -= usize;
+ quarantine->curobjs--;
+ quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
+ quarantine->lg_maxobjs) - 1);
+ }
+}
+
+void
+quarantine(void *ptr)
+{
+ quarantine_t *quarantine;
+ size_t usize = isalloc(ptr, config_prof);
+
+ assert(config_fill);
+ assert(opt_quarantine);
+
+ quarantine = *quarantine_tsd_get();
+ if (quarantine == NULL && (quarantine =
+ quarantine_init(LG_MAXOBJS_INIT)) == NULL) {
+ idalloc(ptr);
+ return;
+ }
+ /*
+ * Drain one or more objects if the quarantine size limit would be
+ * exceeded by appending ptr.
+ */
+ if (quarantine->curbytes + usize > opt_quarantine) {
+ size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
+ - usize : 0;
+ quarantine_drain(quarantine, upper_bound);
+ }
+ /* Grow the quarantine ring buffer if it's full. */
+ if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
+ quarantine = quarantine_grow(quarantine);
+ /* quarantine_grow() must free a slot if it fails to grow. */
+ assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs));
+ /* Append ptr if its size doesn't exceed the quarantine size. */
+ if (quarantine->curbytes + usize <= opt_quarantine) {
+ size_t offset = (quarantine->first + quarantine->curobjs) &
+ ((ZU(1) << quarantine->lg_maxobjs) - 1);
+ quarantine->objs[offset] = ptr;
+ quarantine->curbytes += usize;
+ quarantine->curobjs++;
+ if (opt_junk)
+ memset(ptr, 0x5a, usize);
+ } else {
+ assert(quarantine->curbytes == 0);
+ idalloc(ptr);
+ }
+}
+
+static void
+quarantine_cleanup(void *arg)
+{
+ quarantine_t *quarantine = *(quarantine_t **)arg;
+
+ if (quarantine != NULL) {
+ quarantine_drain(quarantine, 0);
+ idalloc(quarantine);
+ }
+}
+
+bool
+quarantine_boot(void)
+{
+
+ assert(config_fill);
+
+ if (quarantine_tsd_boot())
+ return (true);
+
+ return (false);
+}
diff --git a/src/stats.c b/src/stats.c
index 0cd70b0..4cad214 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -382,8 +382,11 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
OPT_WRITE_SSIZE_T(lg_dirty_mult)
OPT_WRITE_BOOL(stats_print)
OPT_WRITE_BOOL(junk)
+ OPT_WRITE_SIZE_T(quarantine)
+ OPT_WRITE_BOOL(redzone)
OPT_WRITE_BOOL(zero)
OPT_WRITE_BOOL(utrace)
+ OPT_WRITE_BOOL(valgrind)
OPT_WRITE_BOOL(xmalloc)
OPT_WRITE_BOOL(tcache)
OPT_WRITE_SSIZE_T(lg_tcache_max)
diff --git a/src/tcache.c b/src/tcache.c
index 99a657b..be26b59 100644
--- a/src/tcache.c
+++ b/src/tcache.c
@@ -75,6 +75,10 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
(uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_t *mapelm =
&chunk->map[pageind-map_bias];
+ if (config_fill && opt_junk) {
+ arena_alloc_junk_small(ptr,
+ &arena_bin_info[binind], true);
+ }
arena_dalloc_bin(arena, chunk, ptr, mapelm);
} else {
/*
@@ -298,7 +302,7 @@ tcache_destroy(tcache_t *tcache)
malloc_mutex_unlock(&tcache->arena->lock);
}
- tcache_size = arena_salloc(tcache);
+ tcache_size = arena_salloc(tcache, false);
if (tcache_size <= SMALL_MAXCLASS) {
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
arena_t *arena = chunk->arena;
diff --git a/src/zone.c b/src/zone.c
index a50c129..cde5d49 100644
--- a/src/zone.c
+++ b/src/zone.c
@@ -56,7 +56,7 @@ zone_size(malloc_zone_t *zone, void *ptr)
* not work in practice, we must check all pointers to assure that they
* reside within a mapped chunk before determining size.
*/
- return (ivsalloc(ptr));
+ return (ivsalloc(ptr, config_prof));
}
static void *
@@ -87,7 +87,7 @@ static void
zone_free(malloc_zone_t *zone, void *ptr)
{
- if (ivsalloc(ptr) != 0) {
+ if (ivsalloc(ptr, config_prof) != 0) {
je_free(ptr);
return;
}
@@ -99,7 +99,7 @@ static void *
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
{
- if (ivsalloc(ptr) != 0)
+ if (ivsalloc(ptr, config_prof) != 0)
return (je_realloc(ptr, size));
return (realloc(ptr, size));
@@ -122,8 +122,8 @@ static void
zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
{
- if (ivsalloc(ptr) != 0) {
- assert(ivsalloc(ptr) == size);
+ if (ivsalloc(ptr, config_prof) != 0) {
+ assert(ivsalloc(ptr, config_prof) == size);
je_free(ptr);
return;
}