From 7372b15a31c63ac5cb9ed8aeabc2a0a3c005e8bf Mon Sep 17 00:00:00 2001
From: Jason Evans <je@fb.com>
Date: Fri, 10 Feb 2012 20:22:09 -0800
Subject: Reduce cpp conditional logic complexity.

Convert configuration-related cpp conditional logic to use static
constant variables, e.g.:

  #ifdef JEMALLOC_DEBUG
    [...]
  #endif

becomes:

  if (config_debug) {
    [...]
  }

The advantage is clearer, more concise code.  The main disadvantage is
that data structures no longer have conditionally defined fields, so
they pay the cost of all fields regardless of whether they are used.  In
practice, this is only a minor concern; config_stats will go away in an
upcoming change, and config_prof is the only other major feature that
depends on more than a few special-purpose fields.
---
 configure.ac                                     |   4 +-
 include/jemalloc/internal/arena.h                | 112 ++--
 include/jemalloc/internal/chunk.h                |   6 -
 include/jemalloc/internal/chunk_dss.h            |   2 -
 include/jemalloc/internal/chunk_swap.h           |   4 -
 include/jemalloc/internal/ckh.h                  |   2 -
 include/jemalloc/internal/ctl.h                  |   6 -
 include/jemalloc/internal/extent.h               |   6 -
 include/jemalloc/internal/huge.h                 |   4 -
 include/jemalloc/internal/jemalloc_internal.h.in | 169 ++++--
 include/jemalloc/internal/mutex.h                |  12 +-
 include/jemalloc/internal/prof.h                 |  15 +-
 include/jemalloc/internal/stats.h                |  21 -
 include/jemalloc/internal/tcache.h               | 114 ++--
 include/jemalloc/jemalloc_defs.h.in              |   6 +-
 src/arena.c                                      | 738 +++++++++--------------
 src/chunk.c                                      | 104 ++--
 src/chunk_dss.c                                  |  14 +-
 src/chunk_swap.c                                 |  43 +-
 src/ckh.c                                        |  17 +-
 src/ctl.c                                        | 598 +++++++-----------
 src/extent.c                                     |   2 -
 src/huge.c                                       |  80 +--
 src/jemalloc.c                                   | 608 ++++++-------------
 src/prof.c                                       |  75 ++-
 src/stats.c                                      |  13 +-
 src/tcache.c                                     | 130 ++--
 27 files changed, 1187 insertions(+), 1718 deletions(-)

diff --git a/configure.ac b/configure.ac
index 699f931..9617a5e 100644
--- a/configure.ac
+++ b/configure.ac
@@ -174,6 +174,9 @@ AC_DEFINE_UNQUOTED([CPU_SPINWAIT], [$CPU_SPINWAIT])
 LD_PRELOAD_VAR="LD_PRELOAD"
 so="so"
 
+dnl Heap profiling uses the log(3) function.
+LIBS="$LIBS -lm"
+
 dnl Platform-specific settings.  abi and RPATH can probably be determined
 dnl programmatically, but doing so is error-prone, which makes it generally
 dnl not worth the trouble.
@@ -553,7 +556,6 @@ fi
 AC_MSG_CHECKING([configured backtracing method])
 AC_MSG_RESULT([$backtrace_method])
 if test "x$enable_prof" = "x1" ; then
-  LIBS="$LIBS -lm"
   AC_DEFINE([JEMALLOC_PROF], [ ])
 fi
 AC_SUBST([enable_prof])
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index b80c118..b6a5c23 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -16,11 +16,9 @@
 #define	SUBPAGE_CEILING(s)						\
 	(((s) + SUBPAGE_MASK) & ~SUBPAGE_MASK)
 
-#ifdef JEMALLOC_TINY
-   /* Smallest size class to support. */
-#  define LG_TINY_MIN		LG_SIZEOF_PTR
-#  define TINY_MIN		(1U << LG_TINY_MIN)
-#endif
+/* Smallest size class to support. */
+#define	LG_TINY_MIN		LG_SIZEOF_PTR
+#define	TINY_MIN		(1U << LG_TINY_MIN)
 
 /*
  * Maximum size class that is a multiple of the quantum, but not (necessarily)
@@ -85,6 +83,15 @@ typedef struct arena_s arena_t;
 
 /* Each element of the chunk map corresponds to one page within the chunk. */
 struct arena_chunk_map_s {
+#ifndef JEMALLOC_PROF
+	/*
+	 * Overlay prof_ctx in order to allow it to be referenced by dead code.
+	 * Such antics aren't warranted for per arena data structures, but
+	 * chunk map overhead accounts for a percentage of memory, rather than
+	 * being just a fixed cost.
+	 */
+	union {
+#endif
 	union {
 		/*
 		 * Linkage for run trees.  There are two disjoint uses:
@@ -103,9 +110,10 @@ struct arena_chunk_map_s {
 		ql_elm(arena_chunk_map_t)	ql_link;
 	}				u;
 
-#ifdef JEMALLOC_PROF
 	/* Profile counters, used for large object runs. */
 	prof_ctx_t			*prof_ctx;
+#ifndef JEMALLOC_PROF
+	}; /* union { ... }; */
 #endif
 
 	/*
@@ -162,10 +170,8 @@ struct arena_chunk_map_s {
 	 *     ssssssss ssssssss ssss---- ----D-LA
 	 */
 	size_t				bits;
-#ifdef JEMALLOC_PROF
 #define	CHUNK_MAP_CLASS_SHIFT	4
 #define	CHUNK_MAP_CLASS_MASK	((size_t)0xff0U)
-#endif
 #define	CHUNK_MAP_FLAGS_MASK	((size_t)0xfU)
 #define	CHUNK_MAP_DIRTY		((size_t)0x8U)
 #define	CHUNK_MAP_UNZEROED	((size_t)0x4U)
@@ -205,10 +211,8 @@ struct arena_chunk_s {
 typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
 
 struct arena_run_s {
-#ifdef JEMALLOC_DEBUG
 	uint32_t	magic;
 #  define ARENA_RUN_MAGIC 0x384adf93
-#endif
 
 	/* Bin this run is associated with. */
 	arena_bin_t	*bin;
@@ -247,13 +251,11 @@ struct arena_bin_info_s {
 	 */
 	bitmap_info_t	bitmap_info;
 
-#ifdef JEMALLOC_PROF
 	/*
 	 * Offset of first (prof_ctx_t *) in a run header for this bin's size
-	 * class, or 0 if (opt_prof == false).
+	 * class, or 0 if (config_prof == false || opt_prof == false).
 	 */
 	uint32_t	ctx0_offset;
-#endif
 
 	/* Offset of first region in a run for this bin's size class. */
 	uint32_t	reg0_offset;
@@ -283,17 +285,13 @@ struct arena_bin_s {
 	 */
 	arena_run_tree_t runs;
 
-#ifdef JEMALLOC_STATS
 	/* Bin statistics. */
 	malloc_bin_stats_t stats;
-#endif
 };
 
 struct arena_s {
-#ifdef JEMALLOC_DEBUG
 	uint32_t		magic;
 #  define ARENA_MAGIC 0x947d3d24
-#endif
 
 	/* This arena's index within the arenas array. */
 	unsigned		ind;
@@ -314,20 +312,14 @@ struct arena_s {
 	 */
 	malloc_mutex_t		lock;
 
-#ifdef JEMALLOC_STATS
 	arena_stats_t		stats;
-#  ifdef JEMALLOC_TCACHE
 	/*
 	 * List of tcaches for extant threads associated with this arena.
 	 * Stats from these are merged incrementally, and at exit.
 	 */
 	ql_head(tcache_t)	tcache_ql;
-#  endif
-#endif
 
-#ifdef JEMALLOC_PROF
 	uint64_t		prof_accumbytes;
-#endif
 
 	/* List of dirty-page-containing chunks this arena manages. */
 	ql_head(arena_chunk_t)	chunks_dirty;
@@ -455,35 +447,23 @@ extern size_t		sspace_max;
 #define			nlclasses (chunk_npages - map_bias)
 
 void	arena_purge_all(arena_t *arena);
-#ifdef JEMALLOC_PROF
 void	arena_prof_accum(arena_t *arena, uint64_t accumbytes);
-#endif
-#ifdef JEMALLOC_TCACHE
 void	arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
-    size_t binind
-#  ifdef JEMALLOC_PROF
-    , uint64_t prof_accumbytes
-#  endif
-    );
-#endif
+    size_t binind, uint64_t prof_accumbytes);
 void	*arena_malloc_small(arena_t *arena, size_t size, bool zero);
 void	*arena_malloc_large(arena_t *arena, size_t size, bool zero);
 void	*arena_malloc(size_t size, bool zero);
 void	*arena_palloc(arena_t *arena, size_t size, size_t alloc_size,
     size_t alignment, bool zero);
 size_t	arena_salloc(const void *ptr);
-#ifdef JEMALLOC_PROF
 void	arena_prof_promoted(const void *ptr, size_t size);
 size_t	arena_salloc_demote(const void *ptr);
-#endif
 void	arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
     arena_chunk_map_t *mapelm);
 void	arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
-#ifdef JEMALLOC_STATS
 void	arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
     arena_stats_t *astats, malloc_bin_stats_t *bstats,
     malloc_large_stats_t *lstats);
-#endif
 void	*arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
     size_t extra, bool zero);
 void	*arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
@@ -499,10 +479,8 @@ bool	arena_boot(void);
 size_t	arena_bin_index(arena_t *arena, arena_bin_t *bin);
 unsigned	arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
     const void *ptr);
-#  ifdef JEMALLOC_PROF
 prof_ctx_t	*arena_prof_ctx_get(const void *ptr);
 void	arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
-#  endif
 void	arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr);
 #endif
 
@@ -521,7 +499,7 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
 	unsigned shift, diff, regind;
 	size_t size;
 
-	dassert(run->magic == ARENA_RUN_MAGIC);
+	assert(run->magic == ARENA_RUN_MAGIC);
 	/*
 	 * Freeing a pointer lower than region zero can cause assertion
 	 * failure.
@@ -586,7 +564,6 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
 	return (regind);
 }
 
-#ifdef JEMALLOC_PROF
 JEMALLOC_INLINE prof_ctx_t *
 arena_prof_ctx_get(const void *ptr)
 {
@@ -594,6 +571,7 @@ arena_prof_ctx_get(const void *ptr)
 	arena_chunk_t *chunk;
 	size_t pageind, mapbits;
 
+	cassert(config_prof);
 	assert(ptr != NULL);
 	assert(CHUNK_ADDR2BASE(ptr) != ptr);
 
@@ -612,7 +590,7 @@ arena_prof_ctx_get(const void *ptr)
 			arena_bin_info_t *bin_info = &arena_bin_info[binind];
 			unsigned regind;
 
-			dassert(run->magic == ARENA_RUN_MAGIC);
+			assert(run->magic == ARENA_RUN_MAGIC);
 			regind = arena_run_regind(run, bin_info, ptr);
 			ret = *(prof_ctx_t **)((uintptr_t)run +
 			    bin_info->ctx0_offset + (regind *
@@ -630,6 +608,7 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
 	arena_chunk_t *chunk;
 	size_t pageind, mapbits;
 
+	cassert(config_prof);
 	assert(ptr != NULL);
 	assert(CHUNK_ADDR2BASE(ptr) != ptr);
 
@@ -647,7 +626,7 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
 			arena_bin_info_t *bin_info;
 			unsigned regind;
 
-			dassert(run->magic == ARENA_RUN_MAGIC);
+			assert(run->magic == ARENA_RUN_MAGIC);
 			binind = arena_bin_index(chunk->arena, bin);
 			bin_info = &arena_bin_info[binind];
 			regind = arena_run_regind(run, bin_info, ptr);
@@ -659,7 +638,6 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
 	} else
 		chunk->map[pageind-map_bias].prof_ctx = ctx;
 }
-#endif
 
 JEMALLOC_INLINE void
 arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
@@ -668,7 +646,7 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
 	arena_chunk_map_t *mapelm;
 
 	assert(arena != NULL);
-	dassert(arena->magic == ARENA_MAGIC);
+	assert(arena->magic == ARENA_MAGIC);
 	assert(chunk->arena == arena);
 	assert(ptr != NULL);
 	assert(CHUNK_ADDR2BASE(ptr) != ptr);
@@ -678,63 +656,57 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
 	assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
 	if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
 		/* Small allocation. */
-#ifdef JEMALLOC_TCACHE
 		tcache_t *tcache;
 
-		if ((tcache = tcache_get()) != NULL)
+		if (config_tcache && (tcache = tcache_get()) != NULL)
 			tcache_dalloc_small(tcache, ptr);
 		else {
-#endif
 			arena_run_t *run;
 			arena_bin_t *bin;
 
 			run = (arena_run_t *)((uintptr_t)chunk +
 			    (uintptr_t)((pageind - (mapelm->bits >>
 			    PAGE_SHIFT)) << PAGE_SHIFT));
-			dassert(run->magic == ARENA_RUN_MAGIC);
+			assert(run->magic == ARENA_RUN_MAGIC);
 			bin = run->bin;
-#ifdef JEMALLOC_DEBUG
-			{
+			if (config_debug) {
 				size_t binind = arena_bin_index(arena, bin);
-				arena_bin_info_t *bin_info =
+				UNUSED arena_bin_info_t *bin_info =
 				    &arena_bin_info[binind];
 				assert(((uintptr_t)ptr - ((uintptr_t)run +
 				    (uintptr_t)bin_info->reg0_offset)) %
 				    bin_info->reg_size == 0);
 			}
-#endif
 			malloc_mutex_lock(&bin->lock);
 			arena_dalloc_bin(arena, chunk, ptr, mapelm);
 			malloc_mutex_unlock(&bin->lock);
-#ifdef JEMALLOC_TCACHE
 		}
-#endif
 	} else {
-#ifdef JEMALLOC_TCACHE
-		size_t size = mapelm->bits & ~PAGE_MASK;
-
-		assert(((uintptr_t)ptr & PAGE_MASK) == 0);
-		if (size <= tcache_maxclass) {
-			tcache_t *tcache;
-
-			if ((tcache = tcache_get()) != NULL)
-				tcache_dalloc_large(tcache, ptr, size);
-			else {
+		if (config_tcache) {
+			size_t size = mapelm->bits & ~PAGE_MASK;
+
+			assert(((uintptr_t)ptr & PAGE_MASK) == 0);
+			if (size <= tcache_maxclass) {
+				tcache_t *tcache;
+
+				if ((tcache = tcache_get()) != NULL)
+					tcache_dalloc_large(tcache, ptr, size);
+				else {
+					malloc_mutex_lock(&arena->lock);
+					arena_dalloc_large(arena, chunk, ptr);
+					malloc_mutex_unlock(&arena->lock);
+				}
+			} else {
 				malloc_mutex_lock(&arena->lock);
 				arena_dalloc_large(arena, chunk, ptr);
 				malloc_mutex_unlock(&arena->lock);
 			}
 		} else {
+			assert(((uintptr_t)ptr & PAGE_MASK) == 0);
 			malloc_mutex_lock(&arena->lock);
 			arena_dalloc_large(arena, chunk, ptr);
 			malloc_mutex_unlock(&arena->lock);
 		}
-#else
-		assert(((uintptr_t)ptr & PAGE_MASK) == 0);
-		malloc_mutex_lock(&arena->lock);
-		arena_dalloc_large(arena, chunk, ptr);
-		malloc_mutex_unlock(&arena->lock);
-#endif
 	}
 }
 #endif
diff --git a/include/jemalloc/internal/chunk.h b/include/jemalloc/internal/chunk.h
index 54b6a3e..4cc1e80 100644
--- a/include/jemalloc/internal/chunk.h
+++ b/include/jemalloc/internal/chunk.h
@@ -28,20 +28,14 @@
 #ifdef JEMALLOC_H_EXTERNS
 
 extern size_t		opt_lg_chunk;
-#ifdef JEMALLOC_SWAP
 extern bool		opt_overcommit;
-#endif
 
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
 /* Protects stats_chunks; currently not used for any other purpose. */
 extern malloc_mutex_t	chunks_mtx;
 /* Chunk statistics. */
 extern chunk_stats_t	stats_chunks;
-#endif
 
-#ifdef JEMALLOC_IVSALLOC
 extern rtree_t		*chunks_rtree;
-#endif
 
 extern size_t		chunksize;
 extern size_t		chunksize_mask; /* (chunksize - 1). */
diff --git a/include/jemalloc/internal/chunk_dss.h b/include/jemalloc/internal/chunk_dss.h
index 6f00522..35cd461 100644
--- a/include/jemalloc/internal/chunk_dss.h
+++ b/include/jemalloc/internal/chunk_dss.h
@@ -1,4 +1,3 @@
-#ifdef JEMALLOC_DSS
 /******************************************************************************/
 #ifdef JEMALLOC_H_TYPES
 
@@ -27,4 +26,3 @@ bool	chunk_dss_boot(void);
 
 #endif /* JEMALLOC_H_INLINES */
 /******************************************************************************/
-#endif /* JEMALLOC_DSS */
diff --git a/include/jemalloc/internal/chunk_swap.h b/include/jemalloc/internal/chunk_swap.h
index 9faa739..99a079e 100644
--- a/include/jemalloc/internal/chunk_swap.h
+++ b/include/jemalloc/internal/chunk_swap.h
@@ -1,4 +1,3 @@
-#ifdef JEMALLOC_SWAP
 /******************************************************************************/
 #ifdef JEMALLOC_H_TYPES
 
@@ -15,9 +14,7 @@ extern bool		swap_enabled;
 extern bool		swap_prezeroed;
 extern size_t		swap_nfds;
 extern int		*swap_fds;
-#ifdef JEMALLOC_STATS
 extern size_t		swap_avail;
-#endif
 
 void	*chunk_alloc_swap(size_t size, bool *zero);
 bool	chunk_in_swap(void *chunk);
@@ -31,4 +28,3 @@ bool	chunk_swap_boot(void);
 
 #endif /* JEMALLOC_H_INLINES */
 /******************************************************************************/
-#endif /* JEMALLOC_SWAP */
diff --git a/include/jemalloc/internal/ckh.h b/include/jemalloc/internal/ckh.h
index 3e4ad4c..28f171c 100644
--- a/include/jemalloc/internal/ckh.h
+++ b/include/jemalloc/internal/ckh.h
@@ -30,10 +30,8 @@ struct ckhc_s {
 };
 
 struct ckh_s {
-#ifdef JEMALLOC_DEBUG
 #define	CKH_MAGIC	0x3af2489d
 	uint32_t	magic;
-#endif
 
 #ifdef CKH_COUNT
 	/* Counters used to get an idea of performance. */
diff --git a/include/jemalloc/internal/ctl.h b/include/jemalloc/internal/ctl.h
index f1f5eb7..31f9d99 100644
--- a/include/jemalloc/internal/ctl.h
+++ b/include/jemalloc/internal/ctl.h
@@ -32,7 +32,6 @@ struct ctl_arena_stats_s {
 	unsigned		nthreads;
 	size_t			pactive;
 	size_t			pdirty;
-#ifdef JEMALLOC_STATS
 	arena_stats_t		astats;
 
 	/* Aggregate stats for small size classes, based on bin stats. */
@@ -43,11 +42,9 @@ struct ctl_arena_stats_s {
 
 	malloc_bin_stats_t	*bstats;	/* nbins elements. */
 	malloc_large_stats_t	*lstats;	/* nlclasses elements. */
-#endif
 };
 
 struct ctl_stats_s {
-#ifdef JEMALLOC_STATS
 	size_t			allocated;
 	size_t			active;
 	size_t			mapped;
@@ -61,11 +58,8 @@ struct ctl_stats_s {
 		uint64_t	nmalloc;	/* huge_nmalloc */
 		uint64_t	ndalloc;	/* huge_ndalloc */
 	} huge;
-#endif
 	ctl_arena_stats_t	*arenas;	/* (narenas + 1) elements. */
-#ifdef JEMALLOC_SWAP
 	size_t			swap_avail;
-#endif
 };
 
 #endif /* JEMALLOC_H_STRUCTS */
diff --git a/include/jemalloc/internal/extent.h b/include/jemalloc/internal/extent.h
index 6fe9702..36af8be 100644
--- a/include/jemalloc/internal/extent.h
+++ b/include/jemalloc/internal/extent.h
@@ -9,18 +9,14 @@ typedef struct extent_node_s extent_node_t;
 
 /* Tree of extents. */
 struct extent_node_s {
-#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
 	/* Linkage for the size/address-ordered tree. */
 	rb_node(extent_node_t)	link_szad;
-#endif
 
 	/* Linkage for the address-ordered tree. */
 	rb_node(extent_node_t)	link_ad;
 
-#ifdef JEMALLOC_PROF
 	/* Profile counters, used for huge objects. */
 	prof_ctx_t		*prof_ctx;
-#endif
 
 	/* Pointer to the extent that this tree node is responsible for. */
 	void			*addr;
@@ -34,9 +30,7 @@ typedef rb_tree(extent_node_t) extent_tree_t;
 /******************************************************************************/
 #ifdef JEMALLOC_H_EXTERNS
 
-#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
 rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
-#endif
 
 rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
 
diff --git a/include/jemalloc/internal/huge.h b/include/jemalloc/internal/huge.h
index 66544cf..3a6b0b8 100644
--- a/include/jemalloc/internal/huge.h
+++ b/include/jemalloc/internal/huge.h
@@ -9,12 +9,10 @@
 /******************************************************************************/
 #ifdef JEMALLOC_H_EXTERNS
 
-#ifdef JEMALLOC_STATS
 /* Huge allocation statistics. */
 extern uint64_t		huge_nmalloc;
 extern uint64_t		huge_ndalloc;
 extern size_t		huge_allocated;
-#endif
 
 /* Protects chunk-related data structures. */
 extern malloc_mutex_t	huge_mtx;
@@ -27,10 +25,8 @@ void	*huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
     size_t alignment, bool zero);
 void	huge_dalloc(void *ptr, bool unmap);
 size_t	huge_salloc(const void *ptr);
-#ifdef JEMALLOC_PROF
 prof_ctx_t	*huge_prof_ctx_get(const void *ptr);
 void	huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
-#endif
 bool	huge_boot(void);
 
 #endif /* JEMALLOC_H_EXTERNS */
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index a44f097..8842e4b 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -35,6 +35,125 @@
 
 #include "jemalloc/internal/private_namespace.h"
 
+#ifdef JEMALLOC_CC_SILENCE
+#define	UNUSED JEMALLOC_ATTR(unused)
+#else
+#define	UNUSED
+#endif
+
+static const bool config_debug =
+#ifdef JEMALLOC_DEBUG
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_dss =
+#ifdef JEMALLOC_DSS
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_dynamic_page_shift =
+#ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_fill =
+#ifdef JEMALLOC_FILL
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_lazy_lock =
+#ifdef JEMALLOC_LAZY_LOCK
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_prof =
+#ifdef JEMALLOC_PROF
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_prof_libgcc =
+#ifdef JEMALLOC_PROF_LIBGCC
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_prof_libunwind =
+#ifdef JEMALLOC_PROF_LIBUNWIND
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_stats =
+#ifdef JEMALLOC_STATS
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_swap =
+#ifdef JEMALLOC_SWAP
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_sysv =
+#ifdef JEMALLOC_SYSV
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_tcache =
+#ifdef JEMALLOC_TCACHE
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_tiny =
+#ifdef JEMALLOC_TINY
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_tls =
+#ifdef JEMALLOC_TLS
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_xmalloc =
+#ifdef JEMALLOC_XMALLOC
+    true
+#else
+    false
+#endif
+    ;
+static const bool config_ivsalloc =
+#ifdef JEMALLOC_IVSALLOC
+    true
+#else
+    false
+#endif
+    ;
+
 #if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
 #include <libkern/OSAtomic.h>
 #endif
@@ -82,11 +201,11 @@ extern void	(*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
 #  endif
 #endif
 
-#ifdef JEMALLOC_DEBUG
-#  define dassert(e) assert(e)
-#else
-#  define dassert(e)
-#endif
+/* Use to assert a particular configuration, e.g., cassert(config_debug). */
+#define	cassert(c) do {							\
+	if ((c) == false)						\
+		assert(false);						\
+} while (0)
 
 /*
  * jemalloc can conceptually be broken into components (arena, tcache, etc.),
@@ -265,30 +384,20 @@ extern void	(*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
 #endif
 #include "jemalloc/internal/prof.h"
 
-#ifdef JEMALLOC_STATS
 typedef struct {
 	uint64_t	allocated;
 	uint64_t	deallocated;
 } thread_allocated_t;
-#endif
 
 #undef JEMALLOC_H_STRUCTS
 /******************************************************************************/
 #define JEMALLOC_H_EXTERNS
 
 extern bool	opt_abort;
-#ifdef JEMALLOC_FILL
 extern bool	opt_junk;
-#endif
-#ifdef JEMALLOC_SYSV
 extern bool	opt_sysv;
-#endif
-#ifdef JEMALLOC_XMALLOC
 extern bool	opt_xmalloc;
-#endif
-#ifdef JEMALLOC_FILL
 extern bool	opt_zero;
-#endif
 extern size_t	opt_narenas;
 
 #ifdef DYNAMIC_PAGE_SHIFT
@@ -327,8 +436,7 @@ extern __thread arena_t	*arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
 extern arena_t		**arenas;
 extern unsigned		narenas;
 
-#ifdef JEMALLOC_STATS
-#  ifndef NO_TLS
+#ifndef NO_TLS
 extern __thread thread_allocated_t	thread_allocated_tls;
 #    define ALLOCATED_GET() (thread_allocated_tls.allocated)
 #    define ALLOCATEDP_GET() (&thread_allocated_tls.allocated)
@@ -338,10 +446,7 @@ extern __thread thread_allocated_t	thread_allocated_tls;
 	thread_allocated_tls.allocated += a;				\
 	thread_allocated_tls.deallocated += d;				\
 } while (0)
-#  else
-extern pthread_key_t	thread_allocated_tsd;
-thread_allocated_t	*thread_allocated_get_hard(void);
-
+#else
 #    define ALLOCATED_GET() (thread_allocated_get()->allocated)
 #    define ALLOCATEDP_GET() (&thread_allocated_get()->allocated)
 #    define DEALLOCATED_GET() (thread_allocated_get()->deallocated)
@@ -351,8 +456,9 @@ thread_allocated_t	*thread_allocated_get_hard(void);
 	thread_allocated->allocated += (a);				\
 	thread_allocated->deallocated += (d);				\
 } while (0)
-#  endif
 #endif
+extern pthread_key_t	thread_allocated_tsd;
+thread_allocated_t	*thread_allocated_get_hard(void);
 
 arena_t	*arenas_extend(unsigned ind);
 arena_t	*choose_arena_hard(void);
@@ -403,9 +509,7 @@ size_t	s2u(size_t size);
 size_t	sa2u(size_t size, size_t alignment, size_t *run_size_p);
 void	malloc_write(const char *s);
 arena_t	*choose_arena(void);
-#  if (defined(JEMALLOC_STATS) && defined(NO_TLS))
 thread_allocated_t	*thread_allocated_get(void);
-#  endif
 #endif
 
 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
@@ -565,7 +669,6 @@ choose_arena(void)
 	return (ret);
 }
 
-#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
 JEMALLOC_INLINE thread_allocated_t *
 thread_allocated_get(void)
 {
@@ -577,7 +680,6 @@ thread_allocated_get(void)
 	return (thread_allocated);
 }
 #endif
-#endif
 
 #include "jemalloc/internal/bitmap.h"
 #include "jemalloc/internal/rtree.h"
@@ -593,9 +695,7 @@ void	*imalloc(size_t size);
 void	*icalloc(size_t size);
 void	*ipalloc(size_t usize, size_t alignment, bool zero);
 size_t	isalloc(const void *ptr);
-#  ifdef JEMALLOC_IVSALLOC
 size_t	ivsalloc(const void *ptr);
-#  endif
 void	idalloc(void *ptr);
 void	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
     bool zero, bool no_move);
@@ -674,20 +774,18 @@ isalloc(const void *ptr)
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
 	if (chunk != ptr) {
 		/* Region. */
-		dassert(chunk->arena->magic == ARENA_MAGIC);
+		assert(chunk->arena->magic == ARENA_MAGIC);
 
-#ifdef JEMALLOC_PROF
-		ret = arena_salloc_demote(ptr);
-#else
-		ret = arena_salloc(ptr);
-#endif
+		if (config_prof)
+			ret = arena_salloc_demote(ptr);
+		else
+			ret = arena_salloc(ptr);
 	} else
 		ret = huge_salloc(ptr);
 
 	return (ret);
 }
 
-#ifdef JEMALLOC_IVSALLOC
 JEMALLOC_INLINE size_t
 ivsalloc(const void *ptr)
 {
@@ -698,7 +796,6 @@ ivsalloc(const void *ptr)
 
 	return (isalloc(ptr));
 }
-#endif
 
 JEMALLOC_INLINE void
 idalloc(void *ptr)
diff --git a/include/jemalloc/internal/mutex.h b/include/jemalloc/internal/mutex.h
index 62947ce..6a7b4fc 100644
--- a/include/jemalloc/internal/mutex.h
+++ b/include/jemalloc/internal/mutex.h
@@ -3,14 +3,14 @@
 
 #ifdef JEMALLOC_OSSPIN
 typedef OSSpinLock malloc_mutex_t;
+#define	MALLOC_MUTEX_INITIALIZER 0
 #else
 typedef pthread_mutex_t malloc_mutex_t;
-#endif
-
-#ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
-#  define MALLOC_MUTEX_INITIALIZER PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
-#else
-#  define MALLOC_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+#  ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
+#    define MALLOC_MUTEX_INITIALIZER PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
+#  else
+#    define MALLOC_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+#  endif
 #endif
 
 #endif /* JEMALLOC_H_TYPES */
diff --git a/include/jemalloc/internal/prof.h b/include/jemalloc/internal/prof.h
index e9064ba..d470080 100644
--- a/include/jemalloc/internal/prof.h
+++ b/include/jemalloc/internal/prof.h
@@ -1,4 +1,3 @@
-#ifdef JEMALLOC_PROF
 /******************************************************************************/
 #ifdef JEMALLOC_H_TYPES
 
@@ -297,6 +296,8 @@ prof_sample_threshold_update(prof_tdata_t *prof_tdata)
 	uint64_t r;
 	double u;
 
+	cassert(config_prof);
+
 	/*
 	 * Compute sample threshold as a geometrically distributed random
 	 * variable with mean (2^opt_lg_prof_sample).
@@ -329,12 +330,13 @@ prof_ctx_get(const void *ptr)
 	prof_ctx_t *ret;
 	arena_chunk_t *chunk;
 
+	cassert(config_prof);
 	assert(ptr != NULL);
 
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
 	if (chunk != ptr) {
 		/* Region. */
-		dassert(chunk->arena->magic == ARENA_MAGIC);
+		assert(chunk->arena->magic == ARENA_MAGIC);
 
 		ret = arena_prof_ctx_get(ptr);
 	} else
@@ -348,12 +350,13 @@ prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
 {
 	arena_chunk_t *chunk;
 
+	cassert(config_prof);
 	assert(ptr != NULL);
 
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
 	if (chunk != ptr) {
 		/* Region. */
-		dassert(chunk->arena->magic == ARENA_MAGIC);
+		assert(chunk->arena->magic == ARENA_MAGIC);
 
 		arena_prof_ctx_set(ptr, ctx);
 	} else
@@ -365,6 +368,7 @@ prof_sample_accum_update(size_t size)
 {
 	prof_tdata_t *prof_tdata;
 
+	cassert(config_prof);
 	/* Sampling logic is unnecessary if the interval is 1. */
 	assert(opt_lg_prof_sample != 0);
 
@@ -391,6 +395,7 @@ JEMALLOC_INLINE void
 prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
 {
 
+	cassert(config_prof);
 	assert(ptr != NULL);
 	assert(size == isalloc(ptr));
 
@@ -437,6 +442,7 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
 {
 	prof_thr_cnt_t *told_cnt;
 
+	cassert(config_prof);
 	assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
 
 	if (ptr != NULL) {
@@ -510,6 +516,8 @@ prof_free(const void *ptr, size_t size)
 {
 	prof_ctx_t *ctx = prof_ctx_get(ptr);
 
+	cassert(config_prof);
+
 	if ((uintptr_t)ctx > (uintptr_t)1) {
 		assert(size == isalloc(ptr));
 		prof_thr_cnt_t *tcnt = prof_lookup(ctx->bt);
@@ -544,4 +552,3 @@ prof_free(const void *ptr, size_t size)
 
 #endif /* JEMALLOC_H_INLINES */
 /******************************************************************************/
-#endif /* JEMALLOC_PROF */
diff --git a/include/jemalloc/internal/stats.h b/include/jemalloc/internal/stats.h
index 2a9b31d..64ba4bd 100644
--- a/include/jemalloc/internal/stats.h
+++ b/include/jemalloc/internal/stats.h
@@ -3,23 +3,16 @@
 
 #define	UMAX2S_BUFSIZE	65
 
-#ifdef JEMALLOC_STATS
 typedef struct tcache_bin_stats_s tcache_bin_stats_t;
 typedef struct malloc_bin_stats_s malloc_bin_stats_t;
 typedef struct malloc_large_stats_s malloc_large_stats_t;
 typedef struct arena_stats_s arena_stats_t;
-#endif
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
 typedef struct chunk_stats_s chunk_stats_t;
-#endif
 
 #endif /* JEMALLOC_H_TYPES */
 /******************************************************************************/
 #ifdef JEMALLOC_H_STRUCTS
 
-#ifdef JEMALLOC_STATS
-
-#ifdef JEMALLOC_TCACHE
 struct tcache_bin_stats_s {
 	/*
 	 * Number of allocation requests that corresponded to the size of this
@@ -27,7 +20,6 @@ struct tcache_bin_stats_s {
 	 */
 	uint64_t	nrequests;
 };
-#endif
 
 struct malloc_bin_stats_s {
 	/*
@@ -52,13 +44,11 @@ struct malloc_bin_stats_s {
 	 */
 	uint64_t	nrequests;
 
-#ifdef JEMALLOC_TCACHE
 	/* Number of tcache fills from this bin. */
 	uint64_t	nfills;
 
 	/* Number of tcache flushes to this bin. */
 	uint64_t	nflushes;
-#endif
 
 	/* Total number of runs created for this bin's size class. */
 	uint64_t	nruns;
@@ -127,14 +117,10 @@ struct arena_stats_s {
 	 */
 	malloc_large_stats_t	*lstats;
 };
-#endif /* JEMALLOC_STATS */
 
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
 struct chunk_stats_s {
-#  ifdef JEMALLOC_STATS
 	/* Number of chunks that were allocated. */
 	uint64_t	nchunks;
-#  endif
 
 	/* High-water mark for number of chunks allocated. */
 	size_t		highchunks;
@@ -146,7 +132,6 @@ struct chunk_stats_s {
 	 */
 	size_t		curchunks;
 };
-#endif /* JEMALLOC_STATS */
 
 #endif /* JEMALLOC_H_STRUCTS */
 /******************************************************************************/
@@ -154,24 +139,19 @@ struct chunk_stats_s {
 
 extern bool	opt_stats_print;
 
-#ifdef JEMALLOC_STATS
 extern size_t	stats_cactive;
-#endif
 
 char	*u2s(uint64_t x, unsigned base, char *s);
-#ifdef JEMALLOC_STATS
 void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
     const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4));
 void	malloc_printf(const char *format, ...)
     JEMALLOC_ATTR(format(printf, 1, 2));
-#endif
 void	stats_print(void (*write)(void *, const char *), void *cbopaque,
     const char *opts);
 
 #endif /* JEMALLOC_H_EXTERNS */
 /******************************************************************************/
 #ifdef JEMALLOC_H_INLINES
-#ifdef JEMALLOC_STATS
 
 #ifndef JEMALLOC_ENABLE_INLINE
 size_t	stats_cactive_get(void);
@@ -202,6 +182,5 @@ stats_cactive_sub(size_t size)
 }
 #endif
 
-#endif /* JEMALLOC_STATS */
 #endif /* JEMALLOC_H_INLINES */
 /******************************************************************************/
diff --git a/include/jemalloc/internal/tcache.h b/include/jemalloc/internal/tcache.h
index da3c68c..0855d32 100644
--- a/include/jemalloc/internal/tcache.h
+++ b/include/jemalloc/internal/tcache.h
@@ -42,9 +42,7 @@ struct tcache_bin_info_s {
 };
 
 struct tcache_bin_s {
-#  ifdef JEMALLOC_STATS
 	tcache_bin_stats_t tstats;
-#  endif
 	int		low_water;	/* Min # cached since last GC. */
 	unsigned	lg_fill_div;	/* Fill (ncached_max >> lg_fill_div). */
 	unsigned	ncached;	/* # of cached objects. */
@@ -52,12 +50,8 @@ struct tcache_bin_s {
 };
 
 struct tcache_s {
-#  ifdef JEMALLOC_STATS
 	ql_elm(tcache_t) link;		/* Used for aggregating stats. */
-#  endif
-#  ifdef JEMALLOC_PROF
 	uint64_t	prof_accumbytes;/* Cleared after arena_prof_accum() */
-#  endif
 	arena_t		*arena;		/* This thread's arena. */
 	unsigned	ev_cnt;		/* Event count since incremental GC. */
 	unsigned	next_gc_bin;	/* Next bin to GC. */
@@ -109,23 +103,15 @@ extern size_t			tcache_maxclass;
 /* Number of tcache allocation/deallocation events between incremental GCs. */
 extern unsigned			tcache_gc_incr;
 
-void	tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
-    , tcache_t *tcache
-#endif
-    );
-void	tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
-    , tcache_t *tcache
-#endif
-    );
+void	tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
+    tcache_t *tcache);
+void	tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
+    tcache_t *tcache);
 tcache_t *tcache_create(arena_t *arena);
 void	*tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
     size_t binind);
 void	tcache_destroy(tcache_t *tcache);
-#ifdef JEMALLOC_STATS
 void	tcache_stats_merge(tcache_t *tcache, arena_t *arena);
-#endif
 bool	tcache_boot(void);
 
 #endif /* JEMALLOC_H_EXTERNS */
@@ -195,19 +181,11 @@ tcache_event(tcache_t *tcache)
 			if (binind < nbins) {
 				tcache_bin_flush_small(tbin, binind,
 				    tbin->ncached - tbin->low_water +
-				    (tbin->low_water >> 2)
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
-				    , tcache
-#endif
-				    );
+				    (tbin->low_water >> 2), tcache);
 			} else {
 				tcache_bin_flush_large(tbin, binind,
 				    tbin->ncached - tbin->low_water +
-				    (tbin->low_water >> 2)
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
-				    , tcache
-#endif
-				    );
+				    (tbin->low_water >> 2), tcache);
 			}
 			/*
 			 * Reduce fill count by 2X.  Limit lg_fill_div such that
@@ -268,21 +246,19 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
 	assert(arena_salloc(ret) == arena_bin_info[binind].reg_size);
 
 	if (zero == false) {
-#ifdef JEMALLOC_FILL
-		if (opt_junk)
-			memset(ret, 0xa5, size);
-		else if (opt_zero)
-			memset(ret, 0, size);
-#endif
+		if (config_fill) {
+			if (opt_junk)
+				memset(ret, 0xa5, size);
+			else if (opt_zero)
+				memset(ret, 0, size);
+		}
 	} else
 		memset(ret, 0, size);
 
-#ifdef JEMALLOC_STATS
-	tbin->tstats.nrequests++;
-#endif
-#ifdef JEMALLOC_PROF
-	tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
-#endif
+	if (config_stats)
+		tbin->tstats.nrequests++;
+	if (config_prof)
+		tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
 	tcache_event(tcache);
 	return (ret);
 }
@@ -309,28 +285,28 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
 		if (ret == NULL)
 			return (NULL);
 	} else {
-#ifdef JEMALLOC_PROF
-		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
-		size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
-		    PAGE_SHIFT);
-		chunk->map[pageind-map_bias].bits &= ~CHUNK_MAP_CLASS_MASK;
-#endif
+		if (config_prof) {
+			arena_chunk_t *chunk =
+			    (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
+			size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
+			    PAGE_SHIFT);
+			chunk->map[pageind-map_bias].bits &=
+			    ~CHUNK_MAP_CLASS_MASK;
+		}
 		if (zero == false) {
-#ifdef JEMALLOC_FILL
-			if (opt_junk)
-				memset(ret, 0xa5, size);
-			else if (opt_zero)
-				memset(ret, 0, size);
-#endif
+			if (config_fill) {
+				if (opt_junk)
+					memset(ret, 0xa5, size);
+				else if (opt_zero)
+					memset(ret, 0, size);
+			}
 		} else
 			memset(ret, 0, size);
 
-#ifdef JEMALLOC_STATS
-		tbin->tstats.nrequests++;
-#endif
-#ifdef JEMALLOC_PROF
-		tcache->prof_accumbytes += size;
-#endif
+		if (config_stats)
+			tbin->tstats.nrequests++;
+		if (config_prof)
+			tcache->prof_accumbytes += size;
 	}
 
 	tcache_event(tcache);
@@ -357,26 +333,20 @@ tcache_dalloc_small(tcache_t *tcache, void *ptr)
 	mapelm = &chunk->map[pageind-map_bias];
 	run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
 	    (mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT));
-	dassert(run->magic == ARENA_RUN_MAGIC);
+	assert(run->magic == ARENA_RUN_MAGIC);
 	bin = run->bin;
 	binind = ((uintptr_t)bin - (uintptr_t)&arena->bins) /
 	    sizeof(arena_bin_t);
 	assert(binind < nbins);
 
-#ifdef JEMALLOC_FILL
-	if (opt_junk)
+	if (config_fill && opt_junk)
 		memset(ptr, 0x5a, arena_bin_info[binind].reg_size);
-#endif
 
 	tbin = &tcache->tbins[binind];
 	tbin_info = &tcache_bin_info[binind];
 	if (tbin->ncached == tbin_info->ncached_max) {
 		tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
-		    1)
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
-		    , tcache
-#endif
-		    );
+		    1), tcache);
 	}
 	assert(tbin->ncached < tbin_info->ncached_max);
 	tbin->avail[tbin->ncached] = ptr;
@@ -403,20 +373,14 @@ tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
 	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
 	binind = nbins + (size >> PAGE_SHIFT) - 1;
 
-#ifdef JEMALLOC_FILL
-	if (opt_junk)
+	if (config_fill && opt_junk)
 		memset(ptr, 0x5a, size);
-#endif
 
 	tbin = &tcache->tbins[binind];
 	tbin_info = &tcache_bin_info[binind];
 	if (tbin->ncached == tbin_info->ncached_max) {
 		tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
-		    1)
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
-		    , tcache
-#endif
-		    );
+		    1), tcache);
 	}
 	assert(tbin->ncached < tbin_info->ncached_max);
 	tbin->avail[tbin->ncached] = ptr;
diff --git a/include/jemalloc/jemalloc_defs.h.in b/include/jemalloc/jemalloc_defs.h.in
index 9ac7e1c..d8052e2 100644
--- a/include/jemalloc/jemalloc_defs.h.in
+++ b/include/jemalloc/jemalloc_defs.h.in
@@ -48,9 +48,11 @@
 /* Defined if __attribute__((...)) syntax is supported. */
 #undef JEMALLOC_HAVE_ATTR
 #ifdef JEMALLOC_HAVE_ATTR
-#  define JEMALLOC_ATTR(s) __attribute__((s))
+#  define JEMALLOC_CATTR(s, a) __attribute__((s))
+#  define JEMALLOC_ATTR(s) JEMALLOC_CATTR(s,)
 #else
-#  define JEMALLOC_ATTR(s)
+#  define JEMALLOC_CATTR(s, a) a
+#  define JEMALLOC_ATTR(s) JEMALLOC_CATTR(s,)
 #endif
 
 /* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
diff --git a/src/arena.c b/src/arena.c
index d166ca1..356b628 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -188,9 +188,7 @@ static bool	arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
 static bool	arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
     size_t extra, bool zero);
 static bool	small_size2bin_init(void);
-#ifdef JEMALLOC_DEBUG
 static void	small_size2bin_validate(void);
-#endif
 static bool	small_size2bin_init_hard(void);
 static size_t	bin_info_run_size_calc(arena_bin_info_t *bin_info,
     size_t min_run_size);
@@ -211,8 +209,8 @@ arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
 }
 
 /* Generate red-black tree functions. */
-rb_gen(static JEMALLOC_ATTR(unused), arena_run_tree_, arena_run_tree_t,
-    arena_chunk_map_t, u.rb_link, arena_run_comp)
+rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
+    u.rb_link, arena_run_comp)
 
 static inline int
 arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
@@ -246,8 +244,8 @@ arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
 }
 
 /* Generate red-black tree functions. */
-rb_gen(static JEMALLOC_ATTR(unused), arena_avail_tree_, arena_avail_tree_t,
-    arena_chunk_map_t, u.rb_link, arena_avail_comp)
+rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
+    u.rb_link, arena_avail_comp)
 
 static inline void *
 arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
@@ -257,7 +255,7 @@ arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
 	bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
 	    (uintptr_t)bin_info->bitmap_offset);
 
-	dassert(run->magic == ARENA_RUN_MAGIC);
+	assert(run->magic == ARENA_RUN_MAGIC);
 	assert(run->nfree > 0);
 	assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
 
@@ -295,17 +293,16 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr)
 	run->nfree++;
 }
 
-#ifdef JEMALLOC_DEBUG
 static inline void
 arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
 {
 	size_t i;
-	size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << PAGE_SHIFT));
+	UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind <<
+	    PAGE_SHIFT));
 
 	for (i = 0; i < PAGE_SIZE / sizeof(size_t); i++)
 		assert(p[i] == 0);
 }
-#endif
 
 static void
 arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
@@ -315,9 +312,6 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
 	size_t old_ndirty, run_ind, total_pages, need_pages, rem_pages, i;
 	size_t flag_dirty;
 	arena_avail_tree_t *runs_avail;
-#ifdef JEMALLOC_STATS
-	size_t cactive_diff;
-#endif
 
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
 	old_ndirty = chunk->ndirty;
@@ -336,13 +330,17 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
 	rem_pages = total_pages - need_pages;
 
 	arena_avail_tree_remove(runs_avail, &chunk->map[run_ind-map_bias]);
-#ifdef JEMALLOC_STATS
-	/* Update stats_cactive if nactive is crossing a chunk multiple. */
-	cactive_diff = CHUNK_CEILING((arena->nactive + need_pages) <<
-	    PAGE_SHIFT) - CHUNK_CEILING(arena->nactive << PAGE_SHIFT);
-	if (cactive_diff != 0)
-		stats_cactive_add(cactive_diff);
-#endif
+	if (config_stats) {
+		/*
+		 * Update stats_cactive if nactive is crossing a chunk
+		 * multiple.
+		 */
+		size_t cactive_diff = CHUNK_CEILING((arena->nactive +
+		    need_pages) << PAGE_SHIFT) - CHUNK_CEILING(arena->nactive <<
+		    PAGE_SHIFT);
+		if (cactive_diff != 0)
+			stats_cactive_add(cactive_diff);
+	}
 	arena->nactive += need_pages;
 
 	/* Keep track of trailing unused pages for later use. */
@@ -390,13 +388,10 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
 						    chunk + ((run_ind+i) <<
 						    PAGE_SHIFT)), 0,
 						    PAGE_SIZE);
-					}
-#ifdef JEMALLOC_DEBUG
-					else {
+					} else if (config_debug) {
 						arena_chunk_validate_zeroed(
 						    chunk, run_ind+i);
 					}
-#endif
 				}
 			} else {
 				/*
@@ -427,40 +422,34 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
 		chunk->map[run_ind-map_bias].bits =
 		    (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED) |
 		    CHUNK_MAP_ALLOCATED | flag_dirty;
-#ifdef JEMALLOC_DEBUG
 		/*
 		 * The first page will always be dirtied during small run
 		 * initialization, so a validation failure here would not
 		 * actually cause an observable failure.
 		 */
-		if (flag_dirty == 0 &&
+		if (config_debug && flag_dirty == 0 &&
 		    (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED)
 		    == 0)
 			arena_chunk_validate_zeroed(chunk, run_ind);
-#endif
 		for (i = 1; i < need_pages - 1; i++) {
 			chunk->map[run_ind+i-map_bias].bits = (i << PAGE_SHIFT)
 			    | (chunk->map[run_ind+i-map_bias].bits &
 			    CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED;
-#ifdef JEMALLOC_DEBUG
-			if (flag_dirty == 0 &&
+			if (config_debug && flag_dirty == 0 &&
 			    (chunk->map[run_ind+i-map_bias].bits &
 			    CHUNK_MAP_UNZEROED) == 0)
 				arena_chunk_validate_zeroed(chunk, run_ind+i);
-#endif
 		}
 		chunk->map[run_ind+need_pages-1-map_bias].bits = ((need_pages
 		    - 1) << PAGE_SHIFT) |
 		    (chunk->map[run_ind+need_pages-1-map_bias].bits &
 		    CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED | flag_dirty;
-#ifdef JEMALLOC_DEBUG
-		if (flag_dirty == 0 &&
+		if (config_debug && flag_dirty == 0 &&
 		    (chunk->map[run_ind+need_pages-1-map_bias].bits &
 		    CHUNK_MAP_UNZEROED) == 0) {
 			arena_chunk_validate_zeroed(chunk,
 			    run_ind+need_pages-1);
 		}
-#endif
 	}
 }
 
@@ -498,9 +487,8 @@ arena_chunk_alloc(arena_t *arena)
 		malloc_mutex_lock(&arena->lock);
 		if (chunk == NULL)
 			return (NULL);
-#ifdef JEMALLOC_STATS
-		arena->stats.mapped += chunksize;
-#endif
+		if (config_stats)
+			arena->stats.mapped += chunksize;
 
 		chunk->arena = arena;
 		ql_elm_new(chunk, link_dirty);
@@ -526,13 +514,10 @@ arena_chunk_alloc(arena_t *arena)
 		if (zero == false) {
 			for (i = map_bias+1; i < chunk_npages-1; i++)
 				chunk->map[i-map_bias].bits = unzeroed;
-		}
-#ifdef JEMALLOC_DEBUG
-		else {
+		} else if (config_debug) {
 			for (i = map_bias+1; i < chunk_npages-1; i++)
 				assert(chunk->map[i-map_bias].bits == unzeroed);
 		}
-#endif
 		chunk->map[chunk_npages-1-map_bias].bits = arena_maxclass |
 		    unzeroed;
 
@@ -571,9 +556,8 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
 		malloc_mutex_unlock(&arena->lock);
 		chunk_dealloc((void *)spare, chunksize, true);
 		malloc_mutex_lock(&arena->lock);
-#ifdef JEMALLOC_STATS
-		arena->stats.mapped -= chunksize;
-#endif
+		if (config_stats)
+			arena->stats.mapped -= chunksize;
 	} else
 		arena->spare = chunk;
 }
@@ -677,12 +661,8 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
 	ql_head(arena_chunk_map_t) mapelms;
 	arena_chunk_map_t *mapelm;
 	size_t pageind, flag_unzeroed;
-#ifdef JEMALLOC_DEBUG
 	size_t ndirty;
-#endif
-#ifdef JEMALLOC_STATS
 	size_t nmadvise;
-#endif
 
 	ql_new(&mapelms);
 
@@ -692,10 +672,7 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
     * madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous
     * mappings, but not for file-backed mappings.
     */
-#  ifdef JEMALLOC_SWAP
-	    swap_enabled ? CHUNK_MAP_UNZEROED :
-#  endif
-	    0;
+	    (config_swap && swap_enabled) ? CHUNK_MAP_UNZEROED : 0;
 #else
 	    CHUNK_MAP_UNZEROED;
 #endif
@@ -730,9 +707,6 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
 			assert(pageind + npages <= chunk_npages);
 			if (mapelm->bits & CHUNK_MAP_DIRTY) {
 				size_t i;
-#ifdef JEMALLOC_STATS
-				size_t cactive_diff;
-#endif
 
 				arena_avail_tree_remove(
 				    &arena->runs_avail_dirty, mapelm);
@@ -755,17 +729,19 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
 					    CHUNK_MAP_ALLOCATED;
 				}
 
-#ifdef JEMALLOC_STATS
-				/*
-				 * Update stats_cactive if nactive is crossing a
-				 * chunk multiple.
-				 */
-				cactive_diff = CHUNK_CEILING((arena->nactive +
-				    npages) << PAGE_SHIFT) -
-				    CHUNK_CEILING(arena->nactive << PAGE_SHIFT);
-				if (cactive_diff != 0)
-					stats_cactive_add(cactive_diff);
-#endif
+				if (config_stats) {
+					/*
+					 * Update stats_cactive if nactive is
+					 * crossing a chunk multiple.
+					 */
+					size_t cactive_diff =
+					    CHUNK_CEILING((arena->nactive +
+					    npages) << PAGE_SHIFT) -
+					    CHUNK_CEILING(arena->nactive <<
+					    PAGE_SHIFT);
+					if (cactive_diff != 0)
+						stats_cactive_add(cactive_diff);
+				}
 				arena->nactive += npages;
 				/* Append to list for later processing. */
 				ql_elm_new(mapelm, u.ql_link);
@@ -782,7 +758,7 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
 				    chunk + (uintptr_t)(pageind << PAGE_SHIFT));
 
 				assert((mapelm->bits >> PAGE_SHIFT) == 0);
-				dassert(run->magic == ARENA_RUN_MAGIC);
+				assert(run->magic == ARENA_RUN_MAGIC);
 				size_t binind = arena_bin_index(arena,
 				    run->bin);
 				arena_bin_info_t *bin_info =
@@ -793,53 +769,45 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
 	}
 	assert(pageind == chunk_npages);
 
-#ifdef JEMALLOC_DEBUG
-	ndirty = chunk->ndirty;
-#endif
-#ifdef JEMALLOC_STATS
-	arena->stats.purged += chunk->ndirty;
-#endif
+	if (config_debug)
+		ndirty = chunk->ndirty;
+	if (config_stats)
+		arena->stats.purged += chunk->ndirty;
 	arena->ndirty -= chunk->ndirty;
 	chunk->ndirty = 0;
 	ql_remove(&arena->chunks_dirty, chunk, link_dirty);
 	chunk->dirtied = false;
 
 	malloc_mutex_unlock(&arena->lock);
-#ifdef JEMALLOC_STATS
-	nmadvise = 0;
-#endif
+	if (config_stats)
+		nmadvise = 0;
 	ql_foreach(mapelm, &mapelms, u.ql_link) {
 		size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
 		    sizeof(arena_chunk_map_t)) + map_bias;
 		size_t npages = mapelm->bits >> PAGE_SHIFT;
 
 		assert(pageind + npages <= chunk_npages);
-#ifdef JEMALLOC_DEBUG
 		assert(ndirty >= npages);
-		ndirty -= npages;
-#endif
+		if (config_debug)
+			ndirty -= npages;
 
 #ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
-		madvise((void *)((uintptr_t)chunk + (pageind << PAGE_SHIFT)),
-		    (npages << PAGE_SHIFT), MADV_DONTNEED);
+#  define MADV_PURGE MADV_DONTNEED
 #elif defined(JEMALLOC_PURGE_MADVISE_FREE)
-		madvise((void *)((uintptr_t)chunk + (pageind << PAGE_SHIFT)),
-		    (npages << PAGE_SHIFT), MADV_FREE);
+#  define MADV_PURGE MADV_FREE
 #else
 #  error "No method defined for purging unused dirty pages."
 #endif
-
-#ifdef JEMALLOC_STATS
-		nmadvise++;
-#endif
+		madvise((void *)((uintptr_t)chunk + (pageind << PAGE_SHIFT)),
+		    (npages << PAGE_SHIFT), MADV_PURGE);
+#undef MADV_PURGE
+		if (config_stats)
+			nmadvise++;
 	}
-#ifdef JEMALLOC_DEBUG
 	assert(ndirty == 0);
-#endif
 	malloc_mutex_lock(&arena->lock);
-#ifdef JEMALLOC_STATS
-	arena->stats.nmadvise += nmadvise;
-#endif
+	if (config_stats)
+		arena->stats.nmadvise += nmadvise;
 
 	/* Deallocate runs. */
 	for (mapelm = ql_first(&mapelms); mapelm != NULL;
@@ -859,23 +827,22 @@ arena_purge(arena_t *arena, bool all)
 {
 	arena_chunk_t *chunk;
 	size_t npurgatory;
-#ifdef JEMALLOC_DEBUG
-	size_t ndirty = 0;
+	if (config_debug) {
+		size_t ndirty = 0;
 
-	ql_foreach(chunk, &arena->chunks_dirty, link_dirty) {
-	    assert(chunk->dirtied);
-	    ndirty += chunk->ndirty;
+		ql_foreach(chunk, &arena->chunks_dirty, link_dirty) {
+		    assert(chunk->dirtied);
+		    ndirty += chunk->ndirty;
+		}
+		assert(ndirty == arena->ndirty);
 	}
-	assert(ndirty == arena->ndirty);
-#endif
 	assert(arena->ndirty > arena->npurgatory || all);
 	assert(arena->ndirty - arena->npurgatory > chunk_npages || all);
 	assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
 	    arena->npurgatory) || all);
 
-#ifdef JEMALLOC_STATS
-	arena->stats.npurge++;
-#endif
+	if (config_stats)
+		arena->stats.npurge++;
 
 	/*
 	 * Compute the minimum number of pages that this thread should try to
@@ -957,9 +924,6 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
 	arena_chunk_t *chunk;
 	size_t size, run_ind, run_pages, flag_dirty;
 	arena_avail_tree_t *runs_avail;
-#ifdef JEMALLOC_STATS
-	size_t cactive_diff;
-#endif
 
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
 	run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk)
@@ -981,13 +945,17 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
 		size = bin_info->run_size;
 	}
 	run_pages = (size >> PAGE_SHIFT);
-#ifdef JEMALLOC_STATS
-	/* Update stats_cactive if nactive is crossing a chunk multiple. */
-	cactive_diff = CHUNK_CEILING(arena->nactive << PAGE_SHIFT) -
-	    CHUNK_CEILING((arena->nactive - run_pages) << PAGE_SHIFT);
-	if (cactive_diff != 0)
-		stats_cactive_sub(cactive_diff);
-#endif
+	if (config_stats) {
+		/*
+		 * Update stats_cactive if nactive is crossing a chunk
+		 * multiple.
+		 */
+		size_t cactive_diff = CHUNK_CEILING(arena->nactive <<
+		    PAGE_SHIFT) - CHUNK_CEILING((arena->nactive - run_pages) <<
+		    PAGE_SHIFT);
+		if (cactive_diff != 0)
+			stats_cactive_sub(cactive_diff);
+	}
 	arena->nactive -= run_pages;
 
 	/*
@@ -1144,9 +1112,8 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
 	    | flag_dirty | (chunk->map[pageind-map_bias].bits &
 	    CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
 
-#ifdef JEMALLOC_DEBUG
-	{
-		size_t tail_npages = newsize >> PAGE_SHIFT;
+	if (config_debug) {
+		UNUSED size_t tail_npages = newsize >> PAGE_SHIFT;
 		assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
 		    .bits & ~PAGE_MASK) == 0);
 		assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
@@ -1156,7 +1123,6 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
 		assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
 		    .bits & CHUNK_MAP_ALLOCATED) != 0);
 	}
-#endif
 	chunk->map[pageind+head_npages-map_bias].bits = newsize | flag_dirty |
 	    (chunk->map[pageind+head_npages-map_bias].bits &
 	    CHUNK_MAP_FLAGS_MASK) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
@@ -1231,9 +1197,8 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
 		run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
 		    (mapelm->bits >> PAGE_SHIFT))
 		    << PAGE_SHIFT));
-#ifdef JEMALLOC_STATS
-		bin->stats.reruns++;
-#endif
+		if (config_stats)
+			bin->stats.reruns++;
 		return (run);
 	}
 	/* No existing runs have any space available. */
@@ -1255,20 +1220,19 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
 		run->nextind = 0;
 		run->nfree = bin_info->nregs;
 		bitmap_init(bitmap, &bin_info->bitmap_info);
-#ifdef JEMALLOC_DEBUG
-		run->magic = ARENA_RUN_MAGIC;
-#endif
+		if (config_debug)
+			run->magic = ARENA_RUN_MAGIC;
 	}
 	malloc_mutex_unlock(&arena->lock);
 	/********************************/
 	malloc_mutex_lock(&bin->lock);
 	if (run != NULL) {
-#ifdef JEMALLOC_STATS
-		bin->stats.nruns++;
-		bin->stats.curruns++;
-		if (bin->stats.curruns > bin->stats.highruns)
-			bin->stats.highruns = bin->stats.curruns;
-#endif
+		if (config_stats) {
+			bin->stats.nruns++;
+			bin->stats.curruns++;
+			if (bin->stats.curruns > bin->stats.highruns)
+				bin->stats.highruns = bin->stats.curruns;
+		}
 		return (run);
 	}
 
@@ -1291,9 +1255,8 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
 		run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
 		    (mapelm->bits >> PAGE_SHIFT))
 		    << PAGE_SHIFT));
-#ifdef JEMALLOC_STATS
-		bin->stats.reruns++;
-#endif
+		if (config_stats)
+			bin->stats.reruns++;
 		return (run);
 	}
 
@@ -1318,7 +1281,7 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
 		 * Another thread updated runcur while this one ran without the
 		 * bin lock in arena_bin_nonfull_run_get().
 		 */
-		dassert(bin->runcur->magic == ARENA_RUN_MAGIC);
+		assert(bin->runcur->magic == ARENA_RUN_MAGIC);
 		assert(bin->runcur->nfree > 0);
 		ret = arena_run_reg_alloc(bin->runcur, bin_info);
 		if (run != NULL) {
@@ -1346,13 +1309,12 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
 
 	bin->runcur = run;
 
-	dassert(bin->runcur->magic == ARENA_RUN_MAGIC);
+	assert(bin->runcur->magic == ARENA_RUN_MAGIC);
 	assert(bin->runcur->nfree > 0);
 
 	return (arena_run_reg_alloc(bin->runcur, bin_info));
 }
 
-#ifdef JEMALLOC_PROF
 void
 arena_prof_accum(arena_t *arena, uint64_t accumbytes)
 {
@@ -1365,15 +1327,10 @@ arena_prof_accum(arena_t *arena, uint64_t accumbytes)
 		}
 	}
 }
-#endif
 
-#ifdef JEMALLOC_TCACHE
 void
-arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind
-#  ifdef JEMALLOC_PROF
-    , uint64_t prof_accumbytes
-#  endif
-    )
+arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
+    uint64_t prof_accumbytes)
 {
 	unsigned i, nfill;
 	arena_bin_t *bin;
@@ -1382,11 +1339,11 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind
 
 	assert(tbin->ncached == 0);
 
-#ifdef JEMALLOC_PROF
-	malloc_mutex_lock(&arena->lock);
-	arena_prof_accum(arena, prof_accumbytes);
-	malloc_mutex_unlock(&arena->lock);
-#endif
+	if (config_prof) {
+		malloc_mutex_lock(&arena->lock);
+		arena_prof_accum(arena, prof_accumbytes);
+		malloc_mutex_unlock(&arena->lock);
+	}
 	bin = &arena->bins[binind];
 	malloc_mutex_lock(&bin->lock);
 	for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
@@ -1400,17 +1357,16 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind
 		/* Insert such that low regions get used first. */
 		tbin->avail[nfill - 1 - i] = ptr;
 	}
-#ifdef JEMALLOC_STATS
-	bin->stats.allocated += i * arena_bin_info[binind].reg_size;
-	bin->stats.nmalloc += i;
-	bin->stats.nrequests += tbin->tstats.nrequests;
-	bin->stats.nfills++;
-	tbin->tstats.nrequests = 0;
-#endif
+	if (config_stats) {
+		bin->stats.allocated += i * arena_bin_info[binind].reg_size;
+		bin->stats.nmalloc += i;
+		bin->stats.nrequests += tbin->tstats.nrequests;
+		bin->stats.nfills++;
+		tbin->tstats.nrequests = 0;
+	}
 	malloc_mutex_unlock(&bin->lock);
 	tbin->ncached = i;
 }
-#endif
 
 void *
 arena_malloc_small(arena_t *arena, size_t size, bool zero)
@@ -1436,27 +1392,25 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
 		return (NULL);
 	}
 
-#ifdef JEMALLOC_STATS
-	bin->stats.allocated += size;
-	bin->stats.nmalloc++;
-	bin->stats.nrequests++;
-#endif
+	if (config_stats) {
+		bin->stats.allocated += size;
+		bin->stats.nmalloc++;
+		bin->stats.nrequests++;
+	}
 	malloc_mutex_unlock(&bin->lock);
-#ifdef JEMALLOC_PROF
-	if (isthreaded == false) {
+	if (config_prof && isthreaded == false) {
 		malloc_mutex_lock(&arena->lock);
 		arena_prof_accum(arena, size);
 		malloc_mutex_unlock(&arena->lock);
 	}
-#endif
 
 	if (zero == false) {
-#ifdef JEMALLOC_FILL
-		if (opt_junk)
-			memset(ret, 0xa5, size);
-		else if (opt_zero)
-			memset(ret, 0, size);
-#endif
+		if (config_fill) {
+			if (opt_junk)
+				memset(ret, 0xa5, size);
+			else if (opt_zero)
+				memset(ret, 0, size);
+		}
 	} else
 		memset(ret, 0, size);
 
@@ -1476,31 +1430,31 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
 		malloc_mutex_unlock(&arena->lock);
 		return (NULL);
 	}
-#ifdef JEMALLOC_STATS
-	arena->stats.nmalloc_large++;
-	arena->stats.nrequests_large++;
-	arena->stats.allocated_large += size;
-	arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
-	arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
-	arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
-	if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
-	    arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
-		arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
-		    arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns;
+	if (config_stats) {
+		arena->stats.nmalloc_large++;
+		arena->stats.nrequests_large++;
+		arena->stats.allocated_large += size;
+		arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
+		arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
+		arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
+		if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
+		    arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
+			arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
+			    arena->stats.lstats[(size >> PAGE_SHIFT)
+			    - 1].curruns;
+		}
 	}
-#endif
-#ifdef JEMALLOC_PROF
-	arena_prof_accum(arena, size);
-#endif
+	if (config_prof)
+		arena_prof_accum(arena, size);
 	malloc_mutex_unlock(&arena->lock);
 
 	if (zero == false) {
-#ifdef JEMALLOC_FILL
-		if (opt_junk)
-			memset(ret, 0xa5, size);
-		else if (opt_zero)
-			memset(ret, 0, size);
-#endif
+		if (config_fill) {
+			if (opt_junk)
+				memset(ret, 0xa5, size);
+			else if (opt_zero)
+				memset(ret, 0, size);
+		}
 	}
 
 	return (ret);
@@ -1514,18 +1468,14 @@ arena_malloc(size_t size, bool zero)
 	assert(QUANTUM_CEILING(size) <= arena_maxclass);
 
 	if (size <= small_maxclass) {
-#ifdef JEMALLOC_TCACHE
 		tcache_t *tcache;
 
-		if ((tcache = tcache_get()) != NULL)
+		if (config_tcache && (tcache = tcache_get()) != NULL)
 			return (tcache_alloc_small(tcache, size, zero));
 		else
-
-#endif
 			return (arena_malloc_small(choose_arena(), size, zero));
 	} else {
-#ifdef JEMALLOC_TCACHE
-		if (size <= tcache_maxclass) {
+		if (config_tcache && size <= tcache_maxclass) {
 			tcache_t *tcache;
 
 			if ((tcache = tcache_get()) != NULL)
@@ -1535,7 +1485,6 @@ arena_malloc(size_t size, bool zero)
 				    size, zero));
 			}
 		} else
-#endif
 			return (arena_malloc_large(choose_arena(), size, zero));
 	}
 }
@@ -1586,29 +1535,28 @@ arena_palloc(arena_t *arena, size_t size, size_t alloc_size, size_t alignment,
 		}
 	}
 
-#ifdef JEMALLOC_STATS
-	arena->stats.nmalloc_large++;
-	arena->stats.nrequests_large++;
-	arena->stats.allocated_large += size;
-	arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
-	arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
-	arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
-	if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
-	    arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
-		arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
-		    arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns;
+	if (config_stats) {
+		arena->stats.nmalloc_large++;
+		arena->stats.nrequests_large++;
+		arena->stats.allocated_large += size;
+		arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
+		arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
+		arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
+		if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
+		    arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
+			arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
+			    arena->stats.lstats[(size >> PAGE_SHIFT)
+			    - 1].curruns;
+		}
 	}
-#endif
 	malloc_mutex_unlock(&arena->lock);
 
-#ifdef JEMALLOC_FILL
-	if (zero == false) {
+	if (config_fill && zero == false) {
 		if (opt_junk)
 			memset(ret, 0xa5, size);
 		else if (opt_zero)
 			memset(ret, 0, size);
 	}
-#endif
 	return (ret);
 }
 
@@ -1631,7 +1579,7 @@ arena_salloc(const void *ptr)
 		arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
 		    (uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) <<
 		    PAGE_SHIFT));
-		dassert(run->magic == ARENA_RUN_MAGIC);
+		assert(run->magic == ARENA_RUN_MAGIC);
 		size_t binind = arena_bin_index(chunk->arena, run->bin);
 		arena_bin_info_t *bin_info = &arena_bin_info[binind];
 		assert(((uintptr_t)ptr - ((uintptr_t)run +
@@ -1647,7 +1595,6 @@ arena_salloc(const void *ptr)
 	return (ret);
 }
 
-#ifdef JEMALLOC_PROF
 void
 arena_prof_promoted(const void *ptr, size_t size)
 {
@@ -1685,7 +1632,7 @@ arena_salloc_demote(const void *ptr)
 		arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
 		    (uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) <<
 		    PAGE_SHIFT));
-		dassert(run->magic == ARENA_RUN_MAGIC);
+		assert(run->magic == ARENA_RUN_MAGIC);
 		size_t binind = arena_bin_index(chunk->arena, run->bin);
 		arena_bin_info_t *bin_info = &arena_bin_info[binind];
 		assert(((uintptr_t)ptr - ((uintptr_t)run +
@@ -1707,7 +1654,6 @@ arena_salloc_demote(const void *ptr)
 
 	return (ret);
 }
-#endif
 
 static void
 arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
@@ -1781,16 +1727,14 @@ arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
 		    ((past - run_ind) << PAGE_SHIFT), false);
 		/* npages = past - run_ind; */
 	}
-#ifdef JEMALLOC_DEBUG
-	run->magic = 0;
-#endif
+	if (config_debug)
+		run->magic = 0;
 	arena_run_dalloc(arena, run, true);
 	malloc_mutex_unlock(&arena->lock);
 	/****************************/
 	malloc_mutex_lock(&bin->lock);
-#ifdef JEMALLOC_STATS
-	bin->stats.curruns--;
-#endif
+	if (config_stats)
+		bin->stats.curruns--;
 }
 
 static void
@@ -1836,25 +1780,20 @@ arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
 	size_t pageind;
 	arena_run_t *run;
 	arena_bin_t *bin;
-#if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS))
 	size_t size;
-#endif
 
 	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
 	run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
 	    (mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT));
-	dassert(run->magic == ARENA_RUN_MAGIC);
+	assert(run->magic == ARENA_RUN_MAGIC);
 	bin = run->bin;
 	size_t binind = arena_bin_index(arena, bin);
 	arena_bin_info_t *bin_info = &arena_bin_info[binind];
-#if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS))
-	size = bin_info->reg_size;
-#endif
+	if (config_fill || config_stats)
+		size = bin_info->reg_size;
 
-#ifdef JEMALLOC_FILL
-	if (opt_junk)
+	if (config_fill && opt_junk)
 		memset(ptr, 0x5a, size);
-#endif
 
 	arena_run_reg_dalloc(run, ptr);
 	if (run->nfree == bin_info->nregs) {
@@ -1863,13 +1802,12 @@ arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
 	} else if (run->nfree == 1 && run != bin->runcur)
 		arena_bin_lower_run(arena, chunk, run, bin);
 
-#ifdef JEMALLOC_STATS
-	bin->stats.allocated -= size;
-	bin->stats.ndalloc++;
-#endif
+	if (config_stats) {
+		bin->stats.allocated -= size;
+		bin->stats.ndalloc++;
+	}
 }
 
-#ifdef JEMALLOC_STATS
 void
 arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
     arena_stats_t *astats, malloc_bin_stats_t *bstats,
@@ -1907,10 +1845,10 @@ arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
 		bstats[i].nmalloc += bin->stats.nmalloc;
 		bstats[i].ndalloc += bin->stats.ndalloc;
 		bstats[i].nrequests += bin->stats.nrequests;
-#ifdef JEMALLOC_TCACHE
-		bstats[i].nfills += bin->stats.nfills;
-		bstats[i].nflushes += bin->stats.nflushes;
-#endif
+		if (config_tcache) {
+			bstats[i].nfills += bin->stats.nfills;
+			bstats[i].nflushes += bin->stats.nflushes;
+		}
 		bstats[i].nruns += bin->stats.nruns;
 		bstats[i].reruns += bin->stats.reruns;
 		bstats[i].highruns += bin->stats.highruns;
@@ -1918,37 +1856,24 @@ arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
 		malloc_mutex_unlock(&bin->lock);
 	}
 }
-#endif
 
 void
 arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
 {
 
-	/* Large allocation. */
-#ifdef JEMALLOC_FILL
-#  ifndef JEMALLOC_STATS
-	if (opt_junk)
-#  endif
-#endif
-	{
-#if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS))
+	if (config_fill || config_stats) {
 		size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
 		    PAGE_SHIFT;
 		size_t size = chunk->map[pageind-map_bias].bits & ~PAGE_MASK;
-#endif
 
-#ifdef JEMALLOC_FILL
-#  ifdef JEMALLOC_STATS
-		if (opt_junk)
-#  endif
+		if (config_fill && config_stats && opt_junk)
 			memset(ptr, 0x5a, size);
-#endif
-#ifdef JEMALLOC_STATS
-		arena->stats.ndalloc_large++;
-		arena->stats.allocated_large -= size;
-		arena->stats.lstats[(size >> PAGE_SHIFT) - 1].ndalloc++;
-		arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns--;
-#endif
+		if (config_stats) {
+			arena->stats.ndalloc_large++;
+			arena->stats.allocated_large -= size;
+			arena->stats.lstats[(size >> PAGE_SHIFT) - 1].ndalloc++;
+			arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns--;
+		}
 	}
 
 	arena_run_dalloc(arena, (arena_run_t *)ptr, true);
@@ -1968,24 +1893,25 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
 	malloc_mutex_lock(&arena->lock);
 	arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
 	    true);
-#ifdef JEMALLOC_STATS
-	arena->stats.ndalloc_large++;
-	arena->stats.allocated_large -= oldsize;
-	arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].ndalloc++;
-	arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].curruns--;
-
-	arena->stats.nmalloc_large++;
-	arena->stats.nrequests_large++;
-	arena->stats.allocated_large += size;
-	arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
-	arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
-	arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
-	if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
-	    arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
-		arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
-		    arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns;
+	if (config_stats) {
+		arena->stats.ndalloc_large++;
+		arena->stats.allocated_large -= oldsize;
+		arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].ndalloc++;
+		arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].curruns--;
+
+		arena->stats.nmalloc_large++;
+		arena->stats.nrequests_large++;
+		arena->stats.allocated_large += size;
+		arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
+		arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
+		arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
+		if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
+		    arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
+			arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
+			    arena->stats.lstats[(size >> PAGE_SHIFT)
+			    - 1].curruns;
+		}
 	}
-#endif
 	malloc_mutex_unlock(&arena->lock);
 }
 
@@ -2038,25 +1964,29 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
 		chunk->map[pageind+npages-1-map_bias].bits = flag_dirty |
 		    CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
 
-#ifdef JEMALLOC_STATS
-		arena->stats.ndalloc_large++;
-		arena->stats.allocated_large -= oldsize;
-		arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].ndalloc++;
-		arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].curruns--;
-
-		arena->stats.nmalloc_large++;
-		arena->stats.nrequests_large++;
-		arena->stats.allocated_large += size;
-		arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
-		arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
-		arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
-		if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
-		    arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
-			arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
-			    arena->stats.lstats[(size >> PAGE_SHIFT) -
-			    1].curruns;
+		if (config_stats) {
+			arena->stats.ndalloc_large++;
+			arena->stats.allocated_large -= oldsize;
+			arena->stats.lstats[(oldsize >> PAGE_SHIFT)
+			    - 1].ndalloc++;
+			arena->stats.lstats[(oldsize >> PAGE_SHIFT)
+			    - 1].curruns--;
+
+			arena->stats.nmalloc_large++;
+			arena->stats.nrequests_large++;
+			arena->stats.allocated_large += size;
+			arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
+			arena->stats.lstats[(size >> PAGE_SHIFT)
+			    - 1].nrequests++;
+			arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
+			if (arena->stats.lstats[(size >> PAGE_SHIFT)
+			    - 1].curruns > arena->stats.lstats[(size >>
+			    PAGE_SHIFT) - 1].highruns) {
+				arena->stats.lstats[(size >> PAGE_SHIFT)
+				    - 1].highruns = arena->stats.lstats[(size >>
+				    PAGE_SHIFT) - 1].curruns;
+			}
 		}
-#endif
 		malloc_mutex_unlock(&arena->lock);
 		return (false);
 	}
@@ -2078,12 +2008,10 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
 	psize = PAGE_CEILING(size + extra);
 	if (psize == oldsize) {
 		/* Same size class. */
-#ifdef JEMALLOC_FILL
-		if (opt_junk && size < oldsize) {
+		if (config_fill && opt_junk && size < oldsize) {
 			memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
 			    size);
 		}
-#endif
 		return (false);
 	} else {
 		arena_chunk_t *chunk;
@@ -2091,16 +2019,14 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
 
 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
 		arena = chunk->arena;
-		dassert(arena->magic == ARENA_MAGIC);
+		assert(arena->magic == ARENA_MAGIC);
 
 		if (psize < oldsize) {
-#ifdef JEMALLOC_FILL
 			/* Fill before shrinking in order avoid a race. */
-			if (opt_junk) {
+			if (config_fill && opt_junk) {
 				memset((void *)((uintptr_t)ptr + size), 0x5a,
 				    oldsize - size);
 			}
-#endif
 			arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
 			    psize);
 			return (false);
@@ -2108,12 +2034,11 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
 			bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
 			    oldsize, PAGE_CEILING(size),
 			    psize - PAGE_CEILING(size), zero);
-#ifdef JEMALLOC_FILL
-			if (ret == false && zero == false && opt_zero) {
+			if (config_fill && ret == false && zero == false &&
+			    opt_zero) {
 				memset((void *)((uintptr_t)ptr + oldsize), 0,
 				    size - oldsize);
 			}
-#endif
 			return (ret);
 		}
 	}
@@ -2135,12 +2060,10 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
 			    SMALL_SIZE2BIN(size + extra) ==
 			    SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
 			    size + extra >= oldsize)) {
-#ifdef JEMALLOC_FILL
-				if (opt_junk && size < oldsize) {
+				if (config_fill && opt_junk && size < oldsize) {
 					memset((void *)((uintptr_t)ptr + size),
 					    0x5a, oldsize - size);
 				}
-#endif
 				return (ptr);
 			}
 		} else {
@@ -2222,22 +2145,21 @@ arena_new(arena_t *arena, unsigned ind)
 	if (malloc_mutex_init(&arena->lock))
 		return (true);
 
-#ifdef JEMALLOC_STATS
-	memset(&arena->stats, 0, sizeof(arena_stats_t));
-	arena->stats.lstats = (malloc_large_stats_t *)base_alloc(nlclasses *
-	    sizeof(malloc_large_stats_t));
-	if (arena->stats.lstats == NULL)
-		return (true);
-	memset(arena->stats.lstats, 0, nlclasses *
-	    sizeof(malloc_large_stats_t));
-#  ifdef JEMALLOC_TCACHE
-	ql_new(&arena->tcache_ql);
-#  endif
-#endif
+	if (config_stats) {
+		memset(&arena->stats, 0, sizeof(arena_stats_t));
+		arena->stats.lstats =
+		    (malloc_large_stats_t *)base_alloc(nlclasses *
+		    sizeof(malloc_large_stats_t));
+		if (arena->stats.lstats == NULL)
+			return (true);
+		memset(arena->stats.lstats, 0, nlclasses *
+		    sizeof(malloc_large_stats_t));
+		if (config_tcache)
+			ql_new(&arena->tcache_ql);
+	}
 
-#ifdef JEMALLOC_PROF
-	arena->prof_accumbytes = 0;
-#endif
+	if (config_prof)
+		arena->prof_accumbytes = 0;
 
 	/* Initialize chunks. */
 	ql_new(&arena->chunks_dirty);
@@ -2251,84 +2173,41 @@ arena_new(arena_t *arena, unsigned ind)
 	arena_avail_tree_new(&arena->runs_avail_dirty);
 
 	/* Initialize bins. */
-	i = 0;
-#ifdef JEMALLOC_TINY
-	/* (2^n)-spaced tiny bins. */
-	for (; i < ntbins; i++) {
-		bin = &arena->bins[i];
-		if (malloc_mutex_init(&bin->lock))
-			return (true);
-		bin->runcur = NULL;
-		arena_run_tree_new(&bin->runs);
-#ifdef JEMALLOC_STATS
-		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-#endif
-	}
-#endif
-
-	/* Quantum-spaced bins. */
-	for (; i < ntbins + nqbins; i++) {
-		bin = &arena->bins[i];
-		if (malloc_mutex_init(&bin->lock))
-			return (true);
-		bin->runcur = NULL;
-		arena_run_tree_new(&bin->runs);
-#ifdef JEMALLOC_STATS
-		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-#endif
-	}
-
-	/* Cacheline-spaced bins. */
-	for (; i < ntbins + nqbins + ncbins; i++) {
-		bin = &arena->bins[i];
-		if (malloc_mutex_init(&bin->lock))
-			return (true);
-		bin->runcur = NULL;
-		arena_run_tree_new(&bin->runs);
-#ifdef JEMALLOC_STATS
-		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-#endif
-	}
-
-	/* Subpage-spaced bins. */
-	for (; i < nbins; i++) {
+	for (i = 0; i < nbins; i++) {
 		bin = &arena->bins[i];
 		if (malloc_mutex_init(&bin->lock))
 			return (true);
 		bin->runcur = NULL;
 		arena_run_tree_new(&bin->runs);
-#ifdef JEMALLOC_STATS
-		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-#endif
+		if (config_stats)
+			memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
 	}
 
-#ifdef JEMALLOC_DEBUG
-	arena->magic = ARENA_MAGIC;
-#endif
+	if (config_debug)
+		arena->magic = ARENA_MAGIC;
 
 	return (false);
 }
 
-#ifdef JEMALLOC_DEBUG
 static void
 small_size2bin_validate(void)
 {
 	size_t i, size, binind;
 
 	i = 1;
-#  ifdef JEMALLOC_TINY
 	/* Tiny. */
-	for (; i < (1U << LG_TINY_MIN); i++) {
-		size = pow2_ceil(1U << LG_TINY_MIN);
-		binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
-		assert(SMALL_SIZE2BIN(i) == binind);
-	}
-	for (; i < qspace_min; i++) {
-		size = pow2_ceil(i);
-		binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
-		assert(SMALL_SIZE2BIN(i) == binind);
+	if (config_tiny) {
+		for (; i < (1U << LG_TINY_MIN); i++) {
+			size = pow2_ceil(1U << LG_TINY_MIN);
+			binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
+			assert(SMALL_SIZE2BIN(i) == binind);
+		}
+		for (; i < qspace_min; i++) {
+			size = pow2_ceil(i);
+			binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
+			assert(SMALL_SIZE2BIN(i) == binind);
+		}
 	}
-#  endif
 	/* Quantum-spaced. */
 	for (; i <= qspace_max; i++) {
 		size = QUANTUM_CEILING(i);
@@ -2350,7 +2229,6 @@ small_size2bin_validate(void)
 		assert(SMALL_SIZE2BIN(i) == binind);
 	}
 }
-#endif
 
 static bool
 small_size2bin_init(void)
@@ -2363,9 +2241,8 @@ small_size2bin_init(void)
 		return (small_size2bin_init_hard());
 
 	small_size2bin = const_small_size2bin;
-#ifdef JEMALLOC_DEBUG
-	small_size2bin_validate();
-#endif
+	if (config_debug)
+		small_size2bin_validate();
 	return (false);
 }
 
@@ -2388,19 +2265,19 @@ small_size2bin_init_hard(void)
 		return (true);
 
 	i = 1;
-#ifdef JEMALLOC_TINY
 	/* Tiny. */
-	for (; i < (1U << LG_TINY_MIN); i += TINY_MIN) {
-		size = pow2_ceil(1U << LG_TINY_MIN);
-		binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
-		CUSTOM_SMALL_SIZE2BIN(i) = binind;
-	}
-	for (; i < qspace_min; i += TINY_MIN) {
-		size = pow2_ceil(i);
-		binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
-		CUSTOM_SMALL_SIZE2BIN(i) = binind;
+	if (config_tiny) {
+		for (; i < (1U << LG_TINY_MIN); i += TINY_MIN) {
+			size = pow2_ceil(1U << LG_TINY_MIN);
+			binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
+			CUSTOM_SMALL_SIZE2BIN(i) = binind;
+		}
+		for (; i < qspace_min; i += TINY_MIN) {
+			size = pow2_ceil(i);
+			binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
+			CUSTOM_SMALL_SIZE2BIN(i) = binind;
+		}
 	}
-#endif
 	/* Quantum-spaced. */
 	for (; i <= qspace_max; i += TINY_MIN) {
 		size = QUANTUM_CEILING(i);
@@ -2423,9 +2300,8 @@ small_size2bin_init_hard(void)
 	}
 
 	small_size2bin = custom_small_size2bin;
-#ifdef JEMALLOC_DEBUG
-	small_size2bin_validate();
-#endif
+	if (config_debug)
+		small_size2bin_validate();
 	return (false);
 #undef CUSTOM_SMALL_SIZE2BIN
 }
@@ -2448,9 +2324,7 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
 	uint32_t try_nregs, good_nregs;
 	uint32_t try_hdr_size, good_hdr_size;
 	uint32_t try_bitmap_offset, good_bitmap_offset;
-#ifdef JEMALLOC_PROF
 	uint32_t try_ctx0_offset, good_ctx0_offset;
-#endif
 	uint32_t try_reg0_offset, good_reg0_offset;
 
 	assert(min_run_size >= PAGE_SIZE);
@@ -2481,8 +2355,7 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
 		try_bitmap_offset = try_hdr_size;
 		/* Add space for bitmap. */
 		try_hdr_size += bitmap_size(try_nregs);
-#ifdef JEMALLOC_PROF
-		if (opt_prof && prof_promote == false) {
+		if (config_prof && opt_prof && prof_promote == false) {
 			/* Pad to a quantum boundary. */
 			try_hdr_size = QUANTUM_CEILING(try_hdr_size);
 			try_ctx0_offset = try_hdr_size;
@@ -2490,7 +2363,6 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
 			try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
 		} else
 			try_ctx0_offset = 0;
-#endif
 		try_reg0_offset = try_run_size - (try_nregs *
 		    bin_info->reg_size);
 	} while (try_hdr_size > try_reg0_offset);
@@ -2504,9 +2376,7 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
 		good_nregs = try_nregs;
 		good_hdr_size = try_hdr_size;
 		good_bitmap_offset = try_bitmap_offset;
-#ifdef JEMALLOC_PROF
 		good_ctx0_offset = try_ctx0_offset;
-#endif
 		good_reg0_offset = try_reg0_offset;
 
 		/* Try more aggressive settings. */
@@ -2526,8 +2396,7 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
 			try_bitmap_offset = try_hdr_size;
 			/* Add space for bitmap. */
 			try_hdr_size += bitmap_size(try_nregs);
-#ifdef JEMALLOC_PROF
-			if (opt_prof && prof_promote == false) {
+			if (config_prof && opt_prof && prof_promote == false) {
 				/* Pad to a quantum boundary. */
 				try_hdr_size = QUANTUM_CEILING(try_hdr_size);
 				try_ctx0_offset = try_hdr_size;
@@ -2537,7 +2406,6 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
 				try_hdr_size += try_nregs *
 				    sizeof(prof_ctx_t *);
 			}
-#endif
 			try_reg0_offset = try_run_size - (try_nregs *
 			    bin_info->reg_size);
 		} while (try_hdr_size > try_reg0_offset);
@@ -2553,9 +2421,7 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
 	bin_info->run_size = good_run_size;
 	bin_info->nregs = good_nregs;
 	bin_info->bitmap_offset = good_bitmap_offset;
-#ifdef JEMALLOC_PROF
 	bin_info->ctx0_offset = good_ctx0_offset;
-#endif
 	bin_info->reg0_offset = good_reg0_offset;
 
 	return (good_run_size);
@@ -2574,15 +2440,17 @@ bin_info_init(void)
 
 	prev_run_size = PAGE_SIZE;
 	i = 0;
-#ifdef JEMALLOC_TINY
 	/* (2^n)-spaced tiny bins. */
-	for (; i < ntbins; i++) {
-		bin_info = &arena_bin_info[i];
-		bin_info->reg_size = (1U << (LG_TINY_MIN + i));
-		prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);
-		bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
+	if (config_tiny) {
+		for (; i < ntbins; i++) {
+			bin_info = &arena_bin_info[i];
+			bin_info->reg_size = (1U << (LG_TINY_MIN + i));
+			prev_run_size = bin_info_run_size_calc(bin_info,
+			    prev_run_size);
+			bitmap_info_init(&bin_info->bitmap_info,
+			    bin_info->nregs);
+		}
 	}
-#endif
 
 	/* Quantum-spaced bins. */
 	for (; i < ntbins + nqbins; i++) {
@@ -2631,9 +2499,8 @@ arena_boot(void)
 	assert(sspace_min < PAGE_SIZE);
 	sspace_max = PAGE_SIZE - SUBPAGE;
 
-#ifdef JEMALLOC_TINY
-	assert(LG_QUANTUM >= LG_TINY_MIN);
-#endif
+	if (config_tiny)
+		assert(LG_QUANTUM >= LG_TINY_MIN);
 	assert(ntbins <= LG_QUANTUM);
 	nqbins = qspace_max >> LG_QUANTUM;
 	ncbins = ((cspace_max - cspace_min) >> LG_CACHELINE) + 1;
@@ -2652,23 +2519,18 @@ arena_boot(void)
 	 * small size classes, plus a "not small" size class must be stored in
 	 * 8 bits of arena_chunk_map_t's bits field.
 	 */
-#ifdef JEMALLOC_PROF
-	if (opt_prof && prof_promote) {
-		if (nbins > 255) {
-		    char line_buf[UMAX2S_BUFSIZE];
-		    malloc_write("<jemalloc>: Too many small size classes (");
-		    malloc_write(u2s(nbins, 10, line_buf));
-		    malloc_write(" > max 255)\n");
-		    abort();
-		}
-	} else
-#endif
-	if (nbins > 256) {
-	    char line_buf[UMAX2S_BUFSIZE];
-	    malloc_write("<jemalloc>: Too many small size classes (");
-	    malloc_write(u2s(nbins, 10, line_buf));
-	    malloc_write(" > max 256)\n");
-	    abort();
+	if (config_prof && opt_prof && prof_promote && nbins > 255) {
+		char line_buf[UMAX2S_BUFSIZE];
+		malloc_write("<jemalloc>: Too many small size classes (");
+		malloc_write(u2s(nbins, 10, line_buf));
+		malloc_write(" > max 255)\n");
+		abort();
+	} else if (nbins > 256) {
+		char line_buf[UMAX2S_BUFSIZE];
+		malloc_write("<jemalloc>: Too many small size classes (");
+		malloc_write(u2s(nbins, 10, line_buf));
+		malloc_write(" > max 256)\n");
+		abort();
 	}
 
 	/*
diff --git a/src/chunk.c b/src/chunk.c
index d190c6f..57ab20d 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -5,18 +5,12 @@
 /* Data. */
 
 size_t	opt_lg_chunk = LG_CHUNK_DEFAULT;
-#ifdef JEMALLOC_SWAP
 bool	opt_overcommit = true;
-#endif
 
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
 malloc_mutex_t	chunks_mtx;
 chunk_stats_t	stats_chunks;
-#endif
 
-#ifdef JEMALLOC_IVSALLOC
 rtree_t		*chunks_rtree;
-#endif
 
 /* Various chunk-related settings. */
 size_t		chunksize;
@@ -41,67 +35,50 @@ chunk_alloc(size_t size, bool base, bool *zero)
 	assert(size != 0);
 	assert((size & chunksize_mask) == 0);
 
-#ifdef JEMALLOC_SWAP
-	if (swap_enabled) {
+	if (config_swap && swap_enabled) {
 		ret = chunk_alloc_swap(size, zero);
 		if (ret != NULL)
 			goto RETURN;
 	}
 
 	if (swap_enabled == false || opt_overcommit) {
-#endif
-#ifdef JEMALLOC_DSS
-		ret = chunk_alloc_dss(size, zero);
-		if (ret != NULL)
-			goto RETURN;
-#endif
+		if (config_dss) {
+			ret = chunk_alloc_dss(size, zero);
+			if (ret != NULL)
+				goto RETURN;
+		}
 		ret = chunk_alloc_mmap(size);
 		if (ret != NULL) {
 			*zero = true;
 			goto RETURN;
 		}
-#ifdef JEMALLOC_SWAP
 	}
-#endif
 
 	/* All strategies for allocation failed. */
 	ret = NULL;
 RETURN:
-#ifdef JEMALLOC_IVSALLOC
-	if (base == false && ret != NULL) {
+	if (config_ivsalloc && base == false && ret != NULL) {
 		if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
 			chunk_dealloc(ret, size, true);
 			return (NULL);
 		}
 	}
-#endif
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
-	if (ret != NULL) {
-#  ifdef JEMALLOC_PROF
+	if ((config_stats || config_prof) && ret != NULL) {
 		bool gdump;
-#  endif
 		malloc_mutex_lock(&chunks_mtx);
-#  ifdef JEMALLOC_STATS
-		stats_chunks.nchunks += (size / chunksize);
-#  endif
+		if (config_stats)
+			stats_chunks.nchunks += (size / chunksize);
 		stats_chunks.curchunks += (size / chunksize);
 		if (stats_chunks.curchunks > stats_chunks.highchunks) {
 			stats_chunks.highchunks = stats_chunks.curchunks;
-#  ifdef JEMALLOC_PROF
-			gdump = true;
-#  endif
-		}
-#  ifdef JEMALLOC_PROF
-		else
+			if (config_prof)
+				gdump = true;
+		} else if (config_prof)
 			gdump = false;
-#  endif
 		malloc_mutex_unlock(&chunks_mtx);
-#  ifdef JEMALLOC_PROF
-		if (opt_prof && opt_prof_gdump && gdump)
+		if (config_prof && opt_prof && opt_prof_gdump && gdump)
 			prof_gdump();
-#  endif
 	}
-#endif
 
 	assert(CHUNK_ADDR2BASE(ret) == ret);
 	return (ret);
@@ -116,24 +93,20 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
 	assert(size != 0);
 	assert((size & chunksize_mask) == 0);
 
-#ifdef JEMALLOC_IVSALLOC
-	rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
-#endif
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
-	malloc_mutex_lock(&chunks_mtx);
-	stats_chunks.curchunks -= (size / chunksize);
-	malloc_mutex_unlock(&chunks_mtx);
-#endif
+	if (config_ivsalloc)
+		rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
+	if (config_stats || config_prof) {
+		malloc_mutex_lock(&chunks_mtx);
+		stats_chunks.curchunks -= (size / chunksize);
+		malloc_mutex_unlock(&chunks_mtx);
+	}
 
 	if (unmap) {
-#ifdef JEMALLOC_SWAP
-		if (swap_enabled && chunk_dealloc_swap(chunk, size) == false)
+		if (config_swap && swap_enabled && chunk_dealloc_swap(chunk,
+		    size) == false)
 			return;
-#endif
-#ifdef JEMALLOC_DSS
-		if (chunk_dealloc_dss(chunk, size) == false)
+		if (config_dss && chunk_dealloc_dss(chunk, size) == false)
 			return;
-#endif
 		chunk_dealloc_mmap(chunk, size);
 	}
 }
@@ -148,26 +121,23 @@ chunk_boot(void)
 	chunksize_mask = chunksize - 1;
 	chunk_npages = (chunksize >> PAGE_SHIFT);
 
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
-	if (malloc_mutex_init(&chunks_mtx))
-		return (true);
-	memset(&stats_chunks, 0, sizeof(chunk_stats_t));
-#endif
-#ifdef JEMALLOC_SWAP
-	if (chunk_swap_boot())
+	if (config_stats || config_prof) {
+		if (malloc_mutex_init(&chunks_mtx))
+			return (true);
+		memset(&stats_chunks, 0, sizeof(chunk_stats_t));
+	}
+	if (config_swap && chunk_swap_boot())
 		return (true);
-#endif
 	if (chunk_mmap_boot())
 		return (true);
-#ifdef JEMALLOC_DSS
-	if (chunk_dss_boot())
+	if (config_dss && chunk_dss_boot())
 		return (true);
-#endif
-#ifdef JEMALLOC_IVSALLOC
-	chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - opt_lg_chunk);
-	if (chunks_rtree == NULL)
-		return (true);
-#endif
+	if (config_ivsalloc) {
+		chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
+		    opt_lg_chunk);
+		if (chunks_rtree == NULL)
+			return (true);
+	}
 
 	return (false);
 }
diff --git a/src/chunk_dss.c b/src/chunk_dss.c
index 5c0e290..c25baea 100644
--- a/src/chunk_dss.c
+++ b/src/chunk_dss.c
@@ -1,6 +1,5 @@
 #define	JEMALLOC_CHUNK_DSS_C_
 #include "jemalloc/internal/jemalloc_internal.h"
-#ifdef JEMALLOC_DSS
 /******************************************************************************/
 /* Data. */
 
@@ -35,6 +34,8 @@ chunk_recycle_dss(size_t size, bool *zero)
 {
 	extent_node_t *node, key;
 
+	cassert(config_dss);
+
 	key.addr = NULL;
 	key.size = size;
 	malloc_mutex_lock(&dss_mtx);
@@ -74,6 +75,8 @@ chunk_alloc_dss(size_t size, bool *zero)
 {
 	void *ret;
 
+	cassert(config_dss);
+
 	ret = chunk_recycle_dss(size, zero);
 	if (ret != NULL)
 		return (ret);
@@ -131,6 +134,8 @@ chunk_dealloc_dss_record(void *chunk, size_t size)
 {
 	extent_node_t *xnode, *node, *prev, key;
 
+	cassert(config_dss);
+
 	xnode = NULL;
 	while (true) {
 		key.addr = (void *)((uintptr_t)chunk + size);
@@ -204,6 +209,8 @@ chunk_in_dss(void *chunk)
 {
 	bool ret;
 
+	cassert(config_dss);
+
 	malloc_mutex_lock(&dss_mtx);
 	if ((uintptr_t)chunk >= (uintptr_t)dss_base
 	    && (uintptr_t)chunk < (uintptr_t)dss_max)
@@ -220,6 +227,8 @@ chunk_dealloc_dss(void *chunk, size_t size)
 {
 	bool ret;
 
+	cassert(config_dss);
+
 	malloc_mutex_lock(&dss_mtx);
 	if ((uintptr_t)chunk >= (uintptr_t)dss_base
 	    && (uintptr_t)chunk < (uintptr_t)dss_max) {
@@ -269,6 +278,8 @@ bool
 chunk_dss_boot(void)
 {
 
+	cassert(config_dss);
+
 	if (malloc_mutex_init(&dss_mtx))
 		return (true);
 	dss_base = sbrk(0);
@@ -281,4 +292,3 @@ chunk_dss_boot(void)
 }
 
 /******************************************************************************/
-#endif /* JEMALLOC_DSS */
diff --git a/src/chunk_swap.c b/src/chunk_swap.c
index cb25ae0..fe9ca30 100644
--- a/src/chunk_swap.c
+++ b/src/chunk_swap.c
@@ -1,6 +1,6 @@
 #define	JEMALLOC_CHUNK_SWAP_C_
 #include "jemalloc/internal/jemalloc_internal.h"
-#ifdef JEMALLOC_SWAP
+
 /******************************************************************************/
 /* Data. */
 
@@ -9,9 +9,7 @@ bool		swap_enabled;
 bool		swap_prezeroed;
 size_t		swap_nfds;
 int		*swap_fds;
-#ifdef JEMALLOC_STATS
 size_t		swap_avail;
-#endif
 
 /* Base address of the mmap()ed file(s). */
 static void	*swap_base;
@@ -42,6 +40,8 @@ chunk_recycle_swap(size_t size, bool *zero)
 {
 	extent_node_t *node, key;
 
+	cassert(config_swap);
+
 	key.addr = NULL;
 	key.size = size;
 	malloc_mutex_lock(&swap_mtx);
@@ -65,9 +65,8 @@ chunk_recycle_swap(size_t size, bool *zero)
 			node->size -= size;
 			extent_tree_szad_insert(&swap_chunks_szad, node);
 		}
-#ifdef JEMALLOC_STATS
-		swap_avail -= size;
-#endif
+		if (config_stats)
+			swap_avail -= size;
 		malloc_mutex_unlock(&swap_mtx);
 
 		if (*zero)
@@ -84,6 +83,7 @@ chunk_alloc_swap(size_t size, bool *zero)
 {
 	void *ret;
 
+	cassert(config_swap);
 	assert(swap_enabled);
 
 	ret = chunk_recycle_swap(size, zero);
@@ -94,9 +94,8 @@ chunk_alloc_swap(size_t size, bool *zero)
 	if ((uintptr_t)swap_end + size <= (uintptr_t)swap_max) {
 		ret = swap_end;
 		swap_end = (void *)((uintptr_t)swap_end + size);
-#ifdef JEMALLOC_STATS
-		swap_avail -= size;
-#endif
+		if (config_stats)
+			swap_avail -= size;
 		malloc_mutex_unlock(&swap_mtx);
 
 		if (swap_prezeroed)
@@ -116,6 +115,8 @@ chunk_dealloc_swap_record(void *chunk, size_t size)
 {
 	extent_node_t *xnode, *node, *prev, key;
 
+	cassert(config_swap);
+
 	xnode = NULL;
 	while (true) {
 		key.addr = (void *)((uintptr_t)chunk + size);
@@ -189,6 +190,7 @@ chunk_in_swap(void *chunk)
 {
 	bool ret;
 
+	cassert(config_swap);
 	assert(swap_enabled);
 
 	malloc_mutex_lock(&swap_mtx);
@@ -207,6 +209,7 @@ chunk_dealloc_swap(void *chunk, size_t size)
 {
 	bool ret;
 
+	cassert(config_swap);
 	assert(swap_enabled);
 
 	malloc_mutex_lock(&swap_mtx);
@@ -237,9 +240,8 @@ chunk_dealloc_swap(void *chunk, size_t size)
 		} else
 			madvise(chunk, size, MADV_DONTNEED);
 
-#ifdef JEMALLOC_STATS
-		swap_avail += size;
-#endif
+		if (config_stats)
+			swap_avail += size;
 		ret = false;
 		goto RETURN;
 	}
@@ -260,6 +262,8 @@ chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed)
 	size_t cumsize, voff;
 	size_t sizes[nfds];
 
+	cassert(config_swap);
+
 	malloc_mutex_lock(&swap_mtx);
 
 	/* Get file sizes. */
@@ -362,9 +366,8 @@ chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed)
 	memcpy(swap_fds, fds, nfds * sizeof(int));
 	swap_nfds = nfds;
 
-#ifdef JEMALLOC_STATS
-	swap_avail = cumsize;
-#endif
+	if (config_stats)
+		swap_avail = cumsize;
 
 	swap_enabled = true;
 
@@ -378,6 +381,8 @@ bool
 chunk_swap_boot(void)
 {
 
+	cassert(config_swap);
+
 	if (malloc_mutex_init(&swap_mtx))
 		return (true);
 
@@ -385,9 +390,8 @@ chunk_swap_boot(void)
 	swap_prezeroed = false; /* swap.* mallctl's depend on this. */
 	swap_nfds = 0;
 	swap_fds = NULL;
-#ifdef JEMALLOC_STATS
-	swap_avail = 0;
-#endif
+	if (config_stats)
+		swap_avail = 0;
 	swap_base = NULL;
 	swap_end = NULL;
 	swap_max = NULL;
@@ -397,6 +401,3 @@ chunk_swap_boot(void)
 
 	return (false);
 }
-
-/******************************************************************************/
-#endif /* JEMALLOC_SWAP */
diff --git a/src/ckh.c b/src/ckh.c
index 43fcc25..f7eaa78 100644
--- a/src/ckh.c
+++ b/src/ckh.c
@@ -73,7 +73,7 @@ ckh_isearch(ckh_t *ckh, const void *key)
 	size_t hash1, hash2, bucket, cell;
 
 	assert(ckh != NULL);
-	dassert(ckh->magic == CKH_MAGIC);
+	assert(ckh->magic == CKH_MAGIC);
 
 	ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
 
@@ -394,9 +394,8 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
 		goto RETURN;
 	}
 
-#ifdef JEMALLOC_DEBUG
-	ckh->magic = CKH_MAGIC;
-#endif
+	if (config_debug)
+		ckh->magic = CKH_MAGIC;
 
 	ret = false;
 RETURN:
@@ -408,7 +407,7 @@ ckh_delete(ckh_t *ckh)
 {
 
 	assert(ckh != NULL);
-	dassert(ckh->magic == CKH_MAGIC);
+	assert(ckh->magic == CKH_MAGIC);
 
 #ifdef CKH_VERBOSE
 	malloc_printf(
@@ -433,7 +432,7 @@ ckh_count(ckh_t *ckh)
 {
 
 	assert(ckh != NULL);
-	dassert(ckh->magic == CKH_MAGIC);
+	assert(ckh->magic == CKH_MAGIC);
 
 	return (ckh->count);
 }
@@ -464,7 +463,7 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
 	bool ret;
 
 	assert(ckh != NULL);
-	dassert(ckh->magic == CKH_MAGIC);
+	assert(ckh->magic == CKH_MAGIC);
 	assert(ckh_search(ckh, key, NULL, NULL));
 
 #ifdef CKH_COUNT
@@ -489,7 +488,7 @@ ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
 	size_t cell;
 
 	assert(ckh != NULL);
-	dassert(ckh->magic == CKH_MAGIC);
+	assert(ckh->magic == CKH_MAGIC);
 
 	cell = ckh_isearch(ckh, searchkey);
 	if (cell != SIZE_T_MAX) {
@@ -521,7 +520,7 @@ ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
 	size_t cell;
 
 	assert(ckh != NULL);
-	dassert(ckh->magic == CKH_MAGIC);
+	assert(ckh->magic == CKH_MAGIC);
 
 	cell = ckh_isearch(ckh, searchkey);
 	if (cell != SIZE_T_MAX) {
diff --git a/src/ctl.c b/src/ctl.c
index e5336d3..05be431 100644
--- a/src/ctl.c
+++ b/src/ctl.c
@@ -27,16 +27,12 @@ static int	n##_ctl(const size_t *mib, size_t miblen, void *oldp,	\
 const ctl_node_t	*n##_index(const size_t *mib, size_t miblen,	\
     size_t i);
 
-#ifdef JEMALLOC_STATS
 static bool	ctl_arena_init(ctl_arena_stats_t *astats);
-#endif
 static void	ctl_arena_clear(ctl_arena_stats_t *astats);
-#ifdef JEMALLOC_STATS
 static void	ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
     arena_t *arena);
 static void	ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
     ctl_arena_stats_t *astats);
-#endif
 static void	ctl_arena_refresh(arena_t *arena, unsigned i);
 static void	ctl_refresh(void);
 static bool	ctl_init(void);
@@ -45,16 +41,12 @@ static int	ctl_lookup(const char *name, ctl_node_t const **nodesp,
 
 CTL_PROTO(version)
 CTL_PROTO(epoch)
-#ifdef JEMALLOC_TCACHE
 CTL_PROTO(tcache_flush)
-#endif
 CTL_PROTO(thread_arena)
-#ifdef JEMALLOC_STATS
 CTL_PROTO(thread_allocated)
 CTL_PROTO(thread_allocatedp)
 CTL_PROTO(thread_deallocated)
 CTL_PROTO(thread_deallocatedp)
-#endif
 CTL_PROTO(config_debug)
 CTL_PROTO(config_dss)
 CTL_PROTO(config_dynamic_page_shift)
@@ -77,21 +69,12 @@ CTL_PROTO(opt_lg_chunk)
 CTL_PROTO(opt_narenas)
 CTL_PROTO(opt_lg_dirty_mult)
 CTL_PROTO(opt_stats_print)
-#ifdef JEMALLOC_FILL
 CTL_PROTO(opt_junk)
 CTL_PROTO(opt_zero)
-#endif
-#ifdef JEMALLOC_SYSV
 CTL_PROTO(opt_sysv)
-#endif
-#ifdef JEMALLOC_XMALLOC
 CTL_PROTO(opt_xmalloc)
-#endif
-#ifdef JEMALLOC_TCACHE
 CTL_PROTO(opt_tcache)
 CTL_PROTO(opt_lg_tcache_gc_sweep)
-#endif
-#ifdef JEMALLOC_PROF
 CTL_PROTO(opt_prof)
 CTL_PROTO(opt_prof_prefix)
 CTL_PROTO(opt_prof_active)
@@ -102,10 +85,7 @@ CTL_PROTO(opt_prof_gdump)
 CTL_PROTO(opt_prof_leak)
 CTL_PROTO(opt_prof_accum)
 CTL_PROTO(opt_lg_prof_tcmax)
-#endif
-#ifdef JEMALLOC_SWAP
 CTL_PROTO(opt_overcommit)
-#endif
 CTL_PROTO(arenas_bin_i_size)
 CTL_PROTO(arenas_bin_i_nregs)
 CTL_PROTO(arenas_bin_i_run_size)
@@ -119,35 +99,26 @@ CTL_PROTO(arenas_cacheline)
 CTL_PROTO(arenas_subpage)
 CTL_PROTO(arenas_pagesize)
 CTL_PROTO(arenas_chunksize)
-#ifdef JEMALLOC_TINY
 CTL_PROTO(arenas_tspace_min)
 CTL_PROTO(arenas_tspace_max)
-#endif
 CTL_PROTO(arenas_qspace_min)
 CTL_PROTO(arenas_qspace_max)
 CTL_PROTO(arenas_cspace_min)
 CTL_PROTO(arenas_cspace_max)
 CTL_PROTO(arenas_sspace_min)
 CTL_PROTO(arenas_sspace_max)
-#ifdef JEMALLOC_TCACHE
 CTL_PROTO(arenas_tcache_max)
-#endif
 CTL_PROTO(arenas_ntbins)
 CTL_PROTO(arenas_nqbins)
 CTL_PROTO(arenas_ncbins)
 CTL_PROTO(arenas_nsbins)
 CTL_PROTO(arenas_nbins)
-#ifdef JEMALLOC_TCACHE
 CTL_PROTO(arenas_nhbins)
-#endif
 CTL_PROTO(arenas_nlruns)
 CTL_PROTO(arenas_purge)
-#ifdef JEMALLOC_PROF
 CTL_PROTO(prof_active)
 CTL_PROTO(prof_dump)
 CTL_PROTO(prof_interval)
-#endif
-#ifdef JEMALLOC_STATS
 CTL_PROTO(stats_chunks_current)
 CTL_PROTO(stats_chunks_total)
 CTL_PROTO(stats_chunks_high)
@@ -166,10 +137,8 @@ CTL_PROTO(stats_arenas_i_bins_j_allocated)
 CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
 CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
 CTL_PROTO(stats_arenas_i_bins_j_nrequests)
-#ifdef JEMALLOC_TCACHE
 CTL_PROTO(stats_arenas_i_bins_j_nfills)
 CTL_PROTO(stats_arenas_i_bins_j_nflushes)
-#endif
 CTL_PROTO(stats_arenas_i_bins_j_nruns)
 CTL_PROTO(stats_arenas_i_bins_j_nreruns)
 CTL_PROTO(stats_arenas_i_bins_j_highruns)
@@ -181,31 +150,22 @@ CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
 CTL_PROTO(stats_arenas_i_lruns_j_highruns)
 CTL_PROTO(stats_arenas_i_lruns_j_curruns)
 INDEX_PROTO(stats_arenas_i_lruns_j)
-#endif
 CTL_PROTO(stats_arenas_i_nthreads)
 CTL_PROTO(stats_arenas_i_pactive)
 CTL_PROTO(stats_arenas_i_pdirty)
-#ifdef JEMALLOC_STATS
 CTL_PROTO(stats_arenas_i_mapped)
 CTL_PROTO(stats_arenas_i_npurge)
 CTL_PROTO(stats_arenas_i_nmadvise)
 CTL_PROTO(stats_arenas_i_purged)
-#endif
 INDEX_PROTO(stats_arenas_i)
-#ifdef JEMALLOC_STATS
 CTL_PROTO(stats_cactive)
 CTL_PROTO(stats_allocated)
 CTL_PROTO(stats_active)
 CTL_PROTO(stats_mapped)
-#endif
-#ifdef JEMALLOC_SWAP
-#  ifdef JEMALLOC_STATS
 CTL_PROTO(swap_avail)
-#  endif
 CTL_PROTO(swap_prezeroed)
 CTL_PROTO(swap_nfds)
 CTL_PROTO(swap_fds)
-#endif
 
 /******************************************************************************/
 /* mallctl tree. */
@@ -223,21 +183,16 @@ CTL_PROTO(swap_fds)
  */
 #define	INDEX(i)	false,	{.indexed = {i##_index}},		NULL
 
-#ifdef JEMALLOC_TCACHE
 static const ctl_node_t	tcache_node[] = {
 	{NAME("flush"),		CTL(tcache_flush)}
 };
-#endif
 
 static const ctl_node_t	thread_node[] = {
-	{NAME("arena"),		CTL(thread_arena)}
-#ifdef JEMALLOC_STATS
-	,
+	{NAME("arena"),		CTL(thread_arena)},
 	{NAME("allocated"),	CTL(thread_allocated)},
 	{NAME("allocatedp"),	CTL(thread_allocatedp)},
 	{NAME("deallocated"),	CTL(thread_deallocated)},
 	{NAME("deallocatedp"),	CTL(thread_deallocatedp)}
-#endif
 };
 
 static const ctl_node_t	config_node[] = {
@@ -265,27 +220,13 @@ static const ctl_node_t opt_node[] = {
 	{NAME("lg_chunk"),		CTL(opt_lg_chunk)},
 	{NAME("narenas"),		CTL(opt_narenas)},
 	{NAME("lg_dirty_mult"),		CTL(opt_lg_dirty_mult)},
-	{NAME("stats_print"),		CTL(opt_stats_print)}
-#ifdef JEMALLOC_FILL
-	,
+	{NAME("stats_print"),		CTL(opt_stats_print)},
 	{NAME("junk"),			CTL(opt_junk)},
-	{NAME("zero"),			CTL(opt_zero)}
-#endif
-#ifdef JEMALLOC_SYSV
-	,
-	{NAME("sysv"),			CTL(opt_sysv)}
-#endif
-#ifdef JEMALLOC_XMALLOC
-	,
-	{NAME("xmalloc"),		CTL(opt_xmalloc)}
-#endif
-#ifdef JEMALLOC_TCACHE
-	,
+	{NAME("zero"),			CTL(opt_zero)},
+	{NAME("sysv"),			CTL(opt_sysv)},
+	{NAME("xmalloc"),		CTL(opt_xmalloc)},
 	{NAME("tcache"),		CTL(opt_tcache)},
-	{NAME("lg_tcache_gc_sweep"),	CTL(opt_lg_tcache_gc_sweep)}
-#endif
-#ifdef JEMALLOC_PROF
-	,
+	{NAME("lg_tcache_gc_sweep"),	CTL(opt_lg_tcache_gc_sweep)},
 	{NAME("prof"),			CTL(opt_prof)},
 	{NAME("prof_prefix"),		CTL(opt_prof_prefix)},
 	{NAME("prof_active"),		CTL(opt_prof_active)},
@@ -295,12 +236,8 @@ static const ctl_node_t opt_node[] = {
 	{NAME("prof_gdump"),		CTL(opt_prof_gdump)},
 	{NAME("prof_leak"),		CTL(opt_prof_leak)},
 	{NAME("prof_accum"),		CTL(opt_prof_accum)},
-	{NAME("lg_prof_tcmax"),		CTL(opt_lg_prof_tcmax)}
-#endif
-#ifdef JEMALLOC_SWAP
-	,
+	{NAME("lg_prof_tcmax"),		CTL(opt_lg_prof_tcmax)},
 	{NAME("overcommit"),		CTL(opt_overcommit)}
-#endif
 };
 
 static const ctl_node_t arenas_bin_i_node[] = {
@@ -335,42 +272,33 @@ static const ctl_node_t arenas_node[] = {
 	{NAME("subpage"),		CTL(arenas_subpage)},
 	{NAME("pagesize"),		CTL(arenas_pagesize)},
 	{NAME("chunksize"),		CTL(arenas_chunksize)},
-#ifdef JEMALLOC_TINY
 	{NAME("tspace_min"),		CTL(arenas_tspace_min)},
 	{NAME("tspace_max"),		CTL(arenas_tspace_max)},
-#endif
 	{NAME("qspace_min"),		CTL(arenas_qspace_min)},
 	{NAME("qspace_max"),		CTL(arenas_qspace_max)},
 	{NAME("cspace_min"),		CTL(arenas_cspace_min)},
 	{NAME("cspace_max"),		CTL(arenas_cspace_max)},
 	{NAME("sspace_min"),		CTL(arenas_sspace_min)},
 	{NAME("sspace_max"),		CTL(arenas_sspace_max)},
-#ifdef JEMALLOC_TCACHE
 	{NAME("tcache_max"),		CTL(arenas_tcache_max)},
-#endif
 	{NAME("ntbins"),		CTL(arenas_ntbins)},
 	{NAME("nqbins"),		CTL(arenas_nqbins)},
 	{NAME("ncbins"),		CTL(arenas_ncbins)},
 	{NAME("nsbins"),		CTL(arenas_nsbins)},
 	{NAME("nbins"),			CTL(arenas_nbins)},
-#ifdef JEMALLOC_TCACHE
 	{NAME("nhbins"),		CTL(arenas_nhbins)},
-#endif
 	{NAME("bin"),			CHILD(arenas_bin)},
 	{NAME("nlruns"),		CTL(arenas_nlruns)},
 	{NAME("lrun"),			CHILD(arenas_lrun)},
 	{NAME("purge"),			CTL(arenas_purge)}
 };
 
-#ifdef JEMALLOC_PROF
 static const ctl_node_t	prof_node[] = {
 	{NAME("active"),	CTL(prof_active)},
 	{NAME("dump"),		CTL(prof_dump)},
 	{NAME("interval"),	CTL(prof_interval)}
 };
-#endif
 
-#ifdef JEMALLOC_STATS
 static const ctl_node_t stats_chunks_node[] = {
 	{NAME("current"),		CTL(stats_chunks_current)},
 	{NAME("total"),			CTL(stats_chunks_total)},
@@ -402,10 +330,8 @@ static const ctl_node_t stats_arenas_i_bins_j_node[] = {
 	{NAME("nmalloc"),		CTL(stats_arenas_i_bins_j_nmalloc)},
 	{NAME("ndalloc"),		CTL(stats_arenas_i_bins_j_ndalloc)},
 	{NAME("nrequests"),		CTL(stats_arenas_i_bins_j_nrequests)},
-#ifdef JEMALLOC_TCACHE
 	{NAME("nfills"),		CTL(stats_arenas_i_bins_j_nfills)},
 	{NAME("nflushes"),		CTL(stats_arenas_i_bins_j_nflushes)},
-#endif
 	{NAME("nruns"),			CTL(stats_arenas_i_bins_j_nruns)},
 	{NAME("nreruns"),		CTL(stats_arenas_i_bins_j_nreruns)},
 	{NAME("highruns"),		CTL(stats_arenas_i_bins_j_highruns)},
@@ -433,14 +359,11 @@ static const ctl_node_t super_stats_arenas_i_lruns_j_node[] = {
 static const ctl_node_t stats_arenas_i_lruns_node[] = {
 	{INDEX(stats_arenas_i_lruns_j)}
 };
-#endif
 
 static const ctl_node_t stats_arenas_i_node[] = {
 	{NAME("nthreads"),		CTL(stats_arenas_i_nthreads)},
 	{NAME("pactive"),		CTL(stats_arenas_i_pactive)},
-	{NAME("pdirty"),		CTL(stats_arenas_i_pdirty)}
-#ifdef JEMALLOC_STATS
-	,
+	{NAME("pdirty"),		CTL(stats_arenas_i_pdirty)},
 	{NAME("mapped"),		CTL(stats_arenas_i_mapped)},
 	{NAME("npurge"),		CTL(stats_arenas_i_npurge)},
 	{NAME("nmadvise"),		CTL(stats_arenas_i_nmadvise)},
@@ -449,7 +372,6 @@ static const ctl_node_t stats_arenas_i_node[] = {
 	{NAME("large"),			CHILD(stats_arenas_i_large)},
 	{NAME("bins"),			CHILD(stats_arenas_i_bins)},
 	{NAME("lruns"),		CHILD(stats_arenas_i_lruns)}
-#endif
 };
 static const ctl_node_t super_stats_arenas_i_node[] = {
 	{NAME(""),			CHILD(stats_arenas_i)}
@@ -460,46 +382,34 @@ static const ctl_node_t stats_arenas_node[] = {
 };
 
 static const ctl_node_t stats_node[] = {
-#ifdef JEMALLOC_STATS
 	{NAME("cactive"),		CTL(stats_cactive)},
 	{NAME("allocated"),		CTL(stats_allocated)},
 	{NAME("active"),		CTL(stats_active)},
 	{NAME("mapped"),		CTL(stats_mapped)},
 	{NAME("chunks"),		CHILD(stats_chunks)},
 	{NAME("huge"),			CHILD(stats_huge)},
-#endif
 	{NAME("arenas"),		CHILD(stats_arenas)}
 };
 
-#ifdef JEMALLOC_SWAP
 static const ctl_node_t swap_node[] = {
-#  ifdef JEMALLOC_STATS
 	{NAME("avail"),			CTL(swap_avail)},
-#  endif
 	{NAME("prezeroed"),		CTL(swap_prezeroed)},
 	{NAME("nfds"),			CTL(swap_nfds)},
 	{NAME("fds"),			CTL(swap_fds)}
 };
-#endif
 
 static const ctl_node_t	root_node[] = {
 	{NAME("version"),	CTL(version)},
 	{NAME("epoch"),		CTL(epoch)},
-#ifdef JEMALLOC_TCACHE
 	{NAME("tcache"),	CHILD(tcache)},
-#endif
 	{NAME("thread"),	CHILD(thread)},
 	{NAME("config"),	CHILD(config)},
 	{NAME("opt"),		CHILD(opt)},
 	{NAME("arenas"),	CHILD(arenas)},
-#ifdef JEMALLOC_PROF
 	{NAME("prof"),		CHILD(prof)},
-#endif
 	{NAME("stats"),		CHILD(stats)}
-#ifdef JEMALLOC_SWAP
 	,
 	{NAME("swap"),		CHILD(swap)}
-#endif
 };
 static const ctl_node_t super_root_node[] = {
 	{NAME(""),		CHILD(root)}
@@ -512,7 +422,6 @@ static const ctl_node_t super_root_node[] = {
 
 /******************************************************************************/
 
-#ifdef JEMALLOC_STATS
 static bool
 ctl_arena_init(ctl_arena_stats_t *astats)
 {
@@ -532,7 +441,6 @@ ctl_arena_init(ctl_arena_stats_t *astats)
 
 	return (false);
 }
-#endif
 
 static void
 ctl_arena_clear(ctl_arena_stats_t *astats)
@@ -540,18 +448,18 @@ ctl_arena_clear(ctl_arena_stats_t *astats)
 
 	astats->pactive = 0;
 	astats->pdirty = 0;
-#ifdef JEMALLOC_STATS
-	memset(&astats->astats, 0, sizeof(arena_stats_t));
-	astats->allocated_small = 0;
-	astats->nmalloc_small = 0;
-	astats->ndalloc_small = 0;
-	astats->nrequests_small = 0;
-	memset(astats->bstats, 0, nbins * sizeof(malloc_bin_stats_t));
-	memset(astats->lstats, 0, nlclasses * sizeof(malloc_large_stats_t));
-#endif
+	if (config_stats) {
+		memset(&astats->astats, 0, sizeof(arena_stats_t));
+		astats->allocated_small = 0;
+		astats->nmalloc_small = 0;
+		astats->ndalloc_small = 0;
+		astats->nrequests_small = 0;
+		memset(astats->bstats, 0, nbins * sizeof(malloc_bin_stats_t));
+		memset(astats->lstats, 0, nlclasses *
+		    sizeof(malloc_large_stats_t));
+	}
 }
 
-#ifdef JEMALLOC_STATS
 static void
 ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
 {
@@ -604,17 +512,17 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
 		sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
 		sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
 		sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
-#ifdef JEMALLOC_TCACHE
-		sstats->bstats[i].nfills += astats->bstats[i].nfills;
-		sstats->bstats[i].nflushes += astats->bstats[i].nflushes;
-#endif
+		if (config_tcache) {
+			sstats->bstats[i].nfills += astats->bstats[i].nfills;
+			sstats->bstats[i].nflushes +=
+			    astats->bstats[i].nflushes;
+		}
 		sstats->bstats[i].nruns += astats->bstats[i].nruns;
 		sstats->bstats[i].reruns += astats->bstats[i].reruns;
 		sstats->bstats[i].highruns += astats->bstats[i].highruns;
 		sstats->bstats[i].curruns += astats->bstats[i].curruns;
 	}
 }
-#endif
 
 static void
 ctl_arena_refresh(arena_t *arena, unsigned i)
@@ -625,17 +533,17 @@ ctl_arena_refresh(arena_t *arena, unsigned i)
 	ctl_arena_clear(astats);
 
 	sstats->nthreads += astats->nthreads;
-#ifdef JEMALLOC_STATS
-	ctl_arena_stats_amerge(astats, arena);
-	/* Merge into sum stats as well. */
-	ctl_arena_stats_smerge(sstats, astats);
-#else
-	astats->pactive += arena->nactive;
-	astats->pdirty += arena->ndirty;
-	/* Merge into sum stats as well. */
-	sstats->pactive += arena->nactive;
-	sstats->pdirty += arena->ndirty;
-#endif
+	if (config_stats) {
+		ctl_arena_stats_amerge(astats, arena);
+		/* Merge into sum stats as well. */
+		ctl_arena_stats_smerge(sstats, astats);
+	} else {
+		astats->pactive += arena->nactive;
+		astats->pdirty += arena->ndirty;
+		/* Merge into sum stats as well. */
+		sstats->pactive += arena->nactive;
+		sstats->pdirty += arena->ndirty;
+	}
 }
 
 static void
@@ -644,19 +552,19 @@ ctl_refresh(void)
 	unsigned i;
 	arena_t *tarenas[narenas];
 
-#ifdef JEMALLOC_STATS
-	malloc_mutex_lock(&chunks_mtx);
-	ctl_stats.chunks.current = stats_chunks.curchunks;
-	ctl_stats.chunks.total = stats_chunks.nchunks;
-	ctl_stats.chunks.high = stats_chunks.highchunks;
-	malloc_mutex_unlock(&chunks_mtx);
-
-	malloc_mutex_lock(&huge_mtx);
-	ctl_stats.huge.allocated = huge_allocated;
-	ctl_stats.huge.nmalloc = huge_nmalloc;
-	ctl_stats.huge.ndalloc = huge_ndalloc;
-	malloc_mutex_unlock(&huge_mtx);
-#endif
+	if (config_stats) {
+		malloc_mutex_lock(&chunks_mtx);
+		ctl_stats.chunks.current = stats_chunks.curchunks;
+		ctl_stats.chunks.total = stats_chunks.nchunks;
+		ctl_stats.chunks.high = stats_chunks.highchunks;
+		malloc_mutex_unlock(&chunks_mtx);
+
+		malloc_mutex_lock(&huge_mtx);
+		ctl_stats.huge.allocated = huge_allocated;
+		ctl_stats.huge.nmalloc = huge_nmalloc;
+		ctl_stats.huge.ndalloc = huge_ndalloc;
+		malloc_mutex_unlock(&huge_mtx);
+	}
 
 	/*
 	 * Clear sum stats, since they will be merged into by
@@ -682,20 +590,20 @@ ctl_refresh(void)
 			ctl_arena_refresh(tarenas[i], i);
 	}
 
-#ifdef JEMALLOC_STATS
-	ctl_stats.allocated = ctl_stats.arenas[narenas].allocated_small
-	    + ctl_stats.arenas[narenas].astats.allocated_large
-	    + ctl_stats.huge.allocated;
-	ctl_stats.active = (ctl_stats.arenas[narenas].pactive << PAGE_SHIFT)
-	    + ctl_stats.huge.allocated;
-	ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
-
-#  ifdef JEMALLOC_SWAP
-	malloc_mutex_lock(&swap_mtx);
-	ctl_stats.swap_avail = swap_avail;
-	malloc_mutex_unlock(&swap_mtx);
-#  endif
-#endif
+	if (config_stats) {
+		ctl_stats.allocated = ctl_stats.arenas[narenas].allocated_small
+		    + ctl_stats.arenas[narenas].astats.allocated_large
+		    + ctl_stats.huge.allocated;
+		ctl_stats.active = (ctl_stats.arenas[narenas].pactive <<
+		    PAGE_SHIFT) + ctl_stats.huge.allocated;
+		ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
+
+		if (config_swap) {
+			malloc_mutex_lock(&swap_mtx);
+			ctl_stats.swap_avail = swap_avail;
+			malloc_mutex_unlock(&swap_mtx);
+		}
+	}
 
 	ctl_epoch++;
 }
@@ -707,10 +615,6 @@ ctl_init(void)
 
 	malloc_mutex_lock(&ctl_mtx);
 	if (ctl_initialized == false) {
-#ifdef JEMALLOC_STATS
-		unsigned i;
-#endif
-
 		/*
 		 * Allocate space for one extra arena stats element, which
 		 * contains summed stats across all arenas.
@@ -729,14 +633,15 @@ ctl_init(void)
 		 * ever get used.  Lazy initialization would allow errors to
 		 * cause inconsistent state to be viewable by the application.
 		 */
-#ifdef JEMALLOC_STATS
-		for (i = 0; i <= narenas; i++) {
-			if (ctl_arena_init(&ctl_stats.arenas[i])) {
-				ret = true;
-				goto RETURN;
+		if (config_stats) {
+			unsigned i;
+			for (i = 0; i <= narenas; i++) {
+				if (ctl_arena_init(&ctl_stats.arenas[i])) {
+					ret = true;
+					goto RETURN;
+				}
 			}
 		}
-#endif
 		ctl_stats.arenas[narenas].initialized = true;
 
 		ctl_epoch = 0;
@@ -998,6 +903,54 @@ ctl_boot(void)
 	}								\
 } while (0)
 
+/*
+ * There's a lot of code duplication in the following macros due to limitations
+ * in how nested cpp macros are expanded.
+ */
+#define	CTL_RO_CLGEN(c, l, n, v, t)					\
+static int								\
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
+    void *newp, size_t newlen)						\
+{									\
+	int ret;							\
+	t oldval;							\
+									\
+	if ((c) == false)						\
+		return (ENOENT);					\
+	if (l)								\
+		malloc_mutex_lock(&ctl_mtx);				\
+	READONLY();							\
+	oldval = v;							\
+	READ(oldval, t);						\
+									\
+	ret = 0;							\
+RETURN:									\
+	if (l)								\
+		malloc_mutex_unlock(&ctl_mtx);				\
+	return (ret);							\
+}
+
+#define	CTL_RO_CGEN(c, n, v, t)						\
+static int								\
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
+    void *newp, size_t newlen)						\
+{									\
+	int ret;							\
+	t oldval;							\
+									\
+	if ((c) == false)						\
+		return (ENOENT);					\
+	malloc_mutex_lock(&ctl_mtx);					\
+	READONLY();							\
+	oldval = v;							\
+	READ(oldval, t);						\
+									\
+	ret = 0;							\
+RETURN:									\
+	malloc_mutex_unlock(&ctl_mtx);					\
+	return (ret);							\
+}
+
 #define	CTL_RO_GEN(n, v, t)						\
 static int								\
 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
@@ -1021,7 +974,7 @@ RETURN:									\
  * ctl_mtx is not acquired, under the assumption that no pertinent data will
  * mutate during the call.
  */
-#define	CTL_RO_NL_GEN(n, v, t)					\
+#define	CTL_RO_NL_CGEN(c, n, v, t)					\
 static int								\
 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
     void *newp, size_t newlen)						\
@@ -1029,6 +982,8 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
 	int ret;							\
 	t oldval;							\
 									\
+	if ((c) == false)						\
+		return (ENOENT);					\
 	READONLY();							\
 	oldval = v;							\
 	READ(oldval, t);						\
@@ -1038,24 +993,24 @@ RETURN:									\
 	return (ret);							\
 }
 
-#define	CTL_RO_TRUE_GEN(n)						\
+#define	CTL_RO_NL_GEN(n, v, t)						\
 static int								\
 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
     void *newp, size_t newlen)						\
 {									\
 	int ret;							\
-	bool oldval;							\
+	t oldval;							\
 									\
 	READONLY();							\
-	oldval = true;							\
-	READ(oldval, bool);						\
+	oldval = v;							\
+	READ(oldval, t);						\
 									\
 	ret = 0;							\
 RETURN:									\
 	return (ret);							\
 }
 
-#define	CTL_RO_FALSE_GEN(n)						\
+#define	CTL_RO_BOOL_CONFIG_GEN(n)					\
 static int								\
 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
     void *newp, size_t newlen)						\
@@ -1063,8 +1018,10 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
 	int ret;							\
 	bool oldval;							\
 									\
+	if (n == false)							\
+		return (ENOENT);					\
 	READONLY();							\
-	oldval = false;							\
+	oldval = n;							\
 	READ(oldval, bool);						\
 									\
 	ret = 0;							\
@@ -1094,7 +1051,6 @@ RETURN:
 	return (ret);
 }
 
-#ifdef JEMALLOC_TCACHE
 static int
 tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
     void *newp, size_t newlen)
@@ -1102,6 +1058,9 @@ tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
 	int ret;
 	tcache_t *tcache;
 
+	if (config_tcache == false)
+		return (ENOENT);
+
 	VOID();
 
 	tcache = TCACHE_GET();
@@ -1116,7 +1075,6 @@ tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
 RETURN:
 	return (ret);
 }
-#endif
 
 static int
 thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
@@ -1151,13 +1109,11 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
 
 		/* Set new arena association. */
 		ARENA_SET(arena);
-#ifdef JEMALLOC_TCACHE
-		{
+		if (config_tcache) {
 			tcache_t *tcache = TCACHE_GET();
 			if (tcache != NULL)
 				tcache->arena = arena;
 		}
-#endif
 	}
 
 	ret = 0;
@@ -1165,104 +1121,29 @@ RETURN:
 	return (ret);
 }
 
-#ifdef JEMALLOC_STATS
-CTL_RO_NL_GEN(thread_allocated, ALLOCATED_GET(), uint64_t);
-CTL_RO_NL_GEN(thread_allocatedp, ALLOCATEDP_GET(), uint64_t *);
-CTL_RO_NL_GEN(thread_deallocated, DEALLOCATED_GET(), uint64_t);
-CTL_RO_NL_GEN(thread_deallocatedp, DEALLOCATEDP_GET(), uint64_t *);
-#endif
+CTL_RO_NL_CGEN(config_stats, thread_allocated, ALLOCATED_GET(), uint64_t)
+CTL_RO_NL_CGEN(config_stats, thread_allocatedp, ALLOCATEDP_GET(), uint64_t *)
+CTL_RO_NL_CGEN(config_stats, thread_deallocated, DEALLOCATED_GET(), uint64_t)
+CTL_RO_NL_CGEN(config_stats, thread_deallocatedp, DEALLOCATEDP_GET(),
+    uint64_t *)
 
 /******************************************************************************/
 
-#ifdef JEMALLOC_DEBUG
-CTL_RO_TRUE_GEN(config_debug)
-#else
-CTL_RO_FALSE_GEN(config_debug)
-#endif
-
-#ifdef JEMALLOC_DSS
-CTL_RO_TRUE_GEN(config_dss)
-#else
-CTL_RO_FALSE_GEN(config_dss)
-#endif
-
-#ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT
-CTL_RO_TRUE_GEN(config_dynamic_page_shift)
-#else
-CTL_RO_FALSE_GEN(config_dynamic_page_shift)
-#endif
-
-#ifdef JEMALLOC_FILL
-CTL_RO_TRUE_GEN(config_fill)
-#else
-CTL_RO_FALSE_GEN(config_fill)
-#endif
-
-#ifdef JEMALLOC_LAZY_LOCK
-CTL_RO_TRUE_GEN(config_lazy_lock)
-#else
-CTL_RO_FALSE_GEN(config_lazy_lock)
-#endif
-
-#ifdef JEMALLOC_PROF
-CTL_RO_TRUE_GEN(config_prof)
-#else
-CTL_RO_FALSE_GEN(config_prof)
-#endif
-
-#ifdef JEMALLOC_PROF_LIBGCC
-CTL_RO_TRUE_GEN(config_prof_libgcc)
-#else
-CTL_RO_FALSE_GEN(config_prof_libgcc)
-#endif
-
-#ifdef JEMALLOC_PROF_LIBUNWIND
-CTL_RO_TRUE_GEN(config_prof_libunwind)
-#else
-CTL_RO_FALSE_GEN(config_prof_libunwind)
-#endif
-
-#ifdef JEMALLOC_STATS
-CTL_RO_TRUE_GEN(config_stats)
-#else
-CTL_RO_FALSE_GEN(config_stats)
-#endif
-
-#ifdef JEMALLOC_SWAP
-CTL_RO_TRUE_GEN(config_swap)
-#else
-CTL_RO_FALSE_GEN(config_swap)
-#endif
-
-#ifdef JEMALLOC_SYSV
-CTL_RO_TRUE_GEN(config_sysv)
-#else
-CTL_RO_FALSE_GEN(config_sysv)
-#endif
-
-#ifdef JEMALLOC_TCACHE
-CTL_RO_TRUE_GEN(config_tcache)
-#else
-CTL_RO_FALSE_GEN(config_tcache)
-#endif
-
-#ifdef JEMALLOC_TINY
-CTL_RO_TRUE_GEN(config_tiny)
-#else
-CTL_RO_FALSE_GEN(config_tiny)
-#endif
-
-#ifdef JEMALLOC_TLS
-CTL_RO_TRUE_GEN(config_tls)
-#else
-CTL_RO_FALSE_GEN(config_tls)
-#endif
-
-#ifdef JEMALLOC_XMALLOC
-CTL_RO_TRUE_GEN(config_xmalloc)
-#else
-CTL_RO_FALSE_GEN(config_xmalloc)
-#endif
+CTL_RO_BOOL_CONFIG_GEN(config_debug)
+CTL_RO_BOOL_CONFIG_GEN(config_dss)
+CTL_RO_BOOL_CONFIG_GEN(config_dynamic_page_shift)
+CTL_RO_BOOL_CONFIG_GEN(config_fill)
+CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
+CTL_RO_BOOL_CONFIG_GEN(config_prof)
+CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
+CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
+CTL_RO_BOOL_CONFIG_GEN(config_stats)
+CTL_RO_BOOL_CONFIG_GEN(config_swap)
+CTL_RO_BOOL_CONFIG_GEN(config_sysv)
+CTL_RO_BOOL_CONFIG_GEN(config_tcache)
+CTL_RO_BOOL_CONFIG_GEN(config_tiny)
+CTL_RO_BOOL_CONFIG_GEN(config_tls)
+CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
 
 /******************************************************************************/
 
@@ -1273,35 +1154,24 @@ CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
 CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
 CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
 CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
-#ifdef JEMALLOC_FILL
-CTL_RO_NL_GEN(opt_junk, opt_junk, bool)
-CTL_RO_NL_GEN(opt_zero, opt_zero, bool)
-#endif
-#ifdef JEMALLOC_SYSV
-CTL_RO_NL_GEN(opt_sysv, opt_sysv, bool)
-#endif
-#ifdef JEMALLOC_XMALLOC
-CTL_RO_NL_GEN(opt_xmalloc, opt_xmalloc, bool)
-#endif
-#ifdef JEMALLOC_TCACHE
-CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
-CTL_RO_NL_GEN(opt_lg_tcache_gc_sweep, opt_lg_tcache_gc_sweep, ssize_t)
-#endif
-#ifdef JEMALLOC_PROF
-CTL_RO_NL_GEN(opt_prof, opt_prof, bool)
-CTL_RO_NL_GEN(opt_prof_prefix, opt_prof_prefix, const char *)
-CTL_RO_GEN(opt_prof_active, opt_prof_active, bool) /* Mutable. */
-CTL_RO_NL_GEN(opt_lg_prof_bt_max, opt_lg_prof_bt_max, size_t)
-CTL_RO_NL_GEN(opt_lg_prof_sample, opt_lg_prof_sample, size_t)
-CTL_RO_NL_GEN(opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
-CTL_RO_NL_GEN(opt_prof_gdump, opt_prof_gdump, bool)
-CTL_RO_NL_GEN(opt_prof_leak, opt_prof_leak, bool)
-CTL_RO_NL_GEN(opt_prof_accum, opt_prof_accum, bool)
-CTL_RO_NL_GEN(opt_lg_prof_tcmax, opt_lg_prof_tcmax, ssize_t)
-#endif
-#ifdef JEMALLOC_SWAP
-CTL_RO_NL_GEN(opt_overcommit, opt_overcommit, bool)
-#endif
+CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
+CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
+CTL_RO_NL_CGEN(config_sysv, opt_sysv, opt_sysv, bool)
+CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
+CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
+CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_gc_sweep, opt_lg_tcache_gc_sweep,
+    ssize_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
+CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
+CTL_RO_NL_CGEN(config_prof, opt_lg_prof_bt_max, opt_lg_prof_bt_max, size_t)
+CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
+CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
+CTL_RO_NL_CGEN(config_prof, opt_lg_prof_tcmax, opt_lg_prof_tcmax, ssize_t)
+CTL_RO_NL_CGEN(config_swap, opt_overcommit, opt_overcommit, bool)
 
 /******************************************************************************/
 
@@ -1360,27 +1230,21 @@ CTL_RO_NL_GEN(arenas_cacheline, CACHELINE, size_t)
 CTL_RO_NL_GEN(arenas_subpage, SUBPAGE, size_t)
 CTL_RO_NL_GEN(arenas_pagesize, PAGE_SIZE, size_t)
 CTL_RO_NL_GEN(arenas_chunksize, chunksize, size_t)
-#ifdef JEMALLOC_TINY
-CTL_RO_NL_GEN(arenas_tspace_min, (1U << LG_TINY_MIN), size_t)
-CTL_RO_NL_GEN(arenas_tspace_max, (qspace_min >> 1), size_t)
-#endif
+CTL_RO_NL_CGEN(config_tiny, arenas_tspace_min, (1U << LG_TINY_MIN), size_t)
+CTL_RO_NL_CGEN(config_tiny, arenas_tspace_max, (qspace_min >> 1), size_t)
 CTL_RO_NL_GEN(arenas_qspace_min, qspace_min, size_t)
 CTL_RO_NL_GEN(arenas_qspace_max, qspace_max, size_t)
 CTL_RO_NL_GEN(arenas_cspace_min, cspace_min, size_t)
 CTL_RO_NL_GEN(arenas_cspace_max, cspace_max, size_t)
 CTL_RO_NL_GEN(arenas_sspace_min, sspace_min, size_t)
 CTL_RO_NL_GEN(arenas_sspace_max, sspace_max, size_t)
-#ifdef JEMALLOC_TCACHE
-CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
-#endif
+CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
 CTL_RO_NL_GEN(arenas_ntbins, ntbins, unsigned)
 CTL_RO_NL_GEN(arenas_nqbins, nqbins, unsigned)
 CTL_RO_NL_GEN(arenas_ncbins, ncbins, unsigned)
 CTL_RO_NL_GEN(arenas_nsbins, nsbins, unsigned)
 CTL_RO_NL_GEN(arenas_nbins, nbins, unsigned)
-#ifdef JEMALLOC_TCACHE
-CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
-#endif
+CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
 CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
 
 static int
@@ -1423,7 +1287,6 @@ RETURN:
 
 /******************************************************************************/
 
-#ifdef JEMALLOC_PROF
 static int
 prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
     void *newp, size_t newlen)
@@ -1431,6 +1294,9 @@ prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
 	int ret;
 	bool oldval;
 
+	if (config_prof == false)
+		return (ENOENT);
+
 	malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */
 	oldval = opt_prof_active;
 	if (newp != NULL) {
@@ -1457,6 +1323,9 @@ prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
 	int ret;
 	const char *filename = NULL;
 
+	if (config_prof == false)
+		return (ENOENT);
+
 	WRITEONLY();
 	WRITE(filename, const char *);
 
@@ -1470,56 +1339,53 @@ RETURN:
 	return (ret);
 }
 
-CTL_RO_NL_GEN(prof_interval, prof_interval, uint64_t)
-#endif
+CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
 
 /******************************************************************************/
 
-#ifdef JEMALLOC_STATS
-CTL_RO_GEN(stats_chunks_current, ctl_stats.chunks.current, size_t)
-CTL_RO_GEN(stats_chunks_total, ctl_stats.chunks.total, uint64_t)
-CTL_RO_GEN(stats_chunks_high, ctl_stats.chunks.high, size_t)
-CTL_RO_GEN(stats_huge_allocated, huge_allocated, size_t)
-CTL_RO_GEN(stats_huge_nmalloc, huge_nmalloc, uint64_t)
-CTL_RO_GEN(stats_huge_ndalloc, huge_ndalloc, uint64_t)
-CTL_RO_GEN(stats_arenas_i_small_allocated,
+CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
+    size_t)
+CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
+CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
+CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t)
+CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
     ctl_stats.arenas[mib[2]].allocated_small, size_t)
-CTL_RO_GEN(stats_arenas_i_small_nmalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
     ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t)
-CTL_RO_GEN(stats_arenas_i_small_ndalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
     ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t)
-CTL_RO_GEN(stats_arenas_i_small_nrequests,
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
     ctl_stats.arenas[mib[2]].nrequests_small, uint64_t)
-CTL_RO_GEN(stats_arenas_i_large_allocated,
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
     ctl_stats.arenas[mib[2]].astats.allocated_large, size_t)
-CTL_RO_GEN(stats_arenas_i_large_nmalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
     ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t)
-CTL_RO_GEN(stats_arenas_i_large_ndalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
     ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
-CTL_RO_GEN(stats_arenas_i_large_nrequests,
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
     ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
 
-CTL_RO_GEN(stats_arenas_i_bins_j_allocated,
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated,
     ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t)
-CTL_RO_GEN(stats_arenas_i_bins_j_nmalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
     ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
-CTL_RO_GEN(stats_arenas_i_bins_j_ndalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
     ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t)
-CTL_RO_GEN(stats_arenas_i_bins_j_nrequests,
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
     ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t)
-#ifdef JEMALLOC_TCACHE
-CTL_RO_GEN(stats_arenas_i_bins_j_nfills,
+CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
     ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
-CTL_RO_GEN(stats_arenas_i_bins_j_nflushes,
+CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
     ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t)
-#endif
-CTL_RO_GEN(stats_arenas_i_bins_j_nruns,
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns,
     ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t)
-CTL_RO_GEN(stats_arenas_i_bins_j_nreruns,
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
     ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t)
-CTL_RO_GEN(stats_arenas_i_bins_j_highruns,
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_highruns,
     ctl_stats.arenas[mib[2]].bstats[mib[4]].highruns, size_t)
-CTL_RO_GEN(stats_arenas_i_bins_j_curruns,
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
     ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
 
 const ctl_node_t *
@@ -1531,15 +1397,15 @@ stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
 	return (super_stats_arenas_i_bins_j_node);
 }
 
-CTL_RO_GEN(stats_arenas_i_lruns_j_nmalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc,
     ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t)
-CTL_RO_GEN(stats_arenas_i_lruns_j_ndalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc,
     ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t)
-CTL_RO_GEN(stats_arenas_i_lruns_j_nrequests,
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
     ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t)
-CTL_RO_GEN(stats_arenas_i_lruns_j_curruns,
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
     ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
-CTL_RO_GEN(stats_arenas_i_lruns_j_highruns,
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_highruns,
     ctl_stats.arenas[mib[2]].lstats[mib[4]].highruns, size_t)
 
 const ctl_node_t *
@@ -1551,20 +1417,17 @@ stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
 	return (super_stats_arenas_i_lruns_j_node);
 }
 
-#endif
 CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
 CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
 CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
-#ifdef JEMALLOC_STATS
-CTL_RO_GEN(stats_arenas_i_mapped, ctl_stats.arenas[mib[2]].astats.mapped,
-    size_t)
-CTL_RO_GEN(stats_arenas_i_npurge, ctl_stats.arenas[mib[2]].astats.npurge,
-    uint64_t)
-CTL_RO_GEN(stats_arenas_i_nmadvise, ctl_stats.arenas[mib[2]].astats.nmadvise,
-    uint64_t)
-CTL_RO_GEN(stats_arenas_i_purged, ctl_stats.arenas[mib[2]].astats.purged,
-    uint64_t)
-#endif
+CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
+    ctl_stats.arenas[mib[2]].astats.mapped, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
+    ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
+    ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
+    ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
 
 const ctl_node_t *
 stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
@@ -1583,19 +1446,15 @@ RETURN:
 	return (ret);
 }
 
-#ifdef JEMALLOC_STATS
-CTL_RO_GEN(stats_cactive, &stats_cactive, size_t *)
-CTL_RO_GEN(stats_allocated, ctl_stats.allocated, size_t)
-CTL_RO_GEN(stats_active, ctl_stats.active, size_t)
-CTL_RO_GEN(stats_mapped, ctl_stats.mapped, size_t)
-#endif
+CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
+CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
+CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
+CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
 
 /******************************************************************************/
 
-#ifdef JEMALLOC_SWAP
-#  ifdef JEMALLOC_STATS
-CTL_RO_GEN(swap_avail, ctl_stats.swap_avail, size_t)
-#  endif
+CTL_RO_CGEN(config_swap && config_stats, swap_avail, ctl_stats.swap_avail,
+    size_t)
 
 static int
 swap_prezeroed_ctl(const size_t *mib, size_t miblen, void *oldp,
@@ -1603,6 +1462,9 @@ swap_prezeroed_ctl(const size_t *mib, size_t miblen, void *oldp,
 {
 	int ret;
 
+	if (config_swap == false)
+		return (ENOENT);
+
 	malloc_mutex_lock(&ctl_mtx);
 	if (swap_enabled) {
 		READONLY();
@@ -1625,7 +1487,7 @@ RETURN:
 	return (ret);
 }
 
-CTL_RO_GEN(swap_nfds, swap_nfds, size_t)
+CTL_RO_CGEN(config_swap, swap_nfds, swap_nfds, size_t)
 
 static int
 swap_fds_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
@@ -1633,6 +1495,9 @@ swap_fds_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
 {
 	int ret;
 
+	if (config_swap == false)
+		return (ENOENT);
+
 	malloc_mutex_lock(&ctl_mtx);
 	if (swap_enabled) {
 		READONLY();
@@ -1667,4 +1532,3 @@ RETURN:
 	malloc_mutex_unlock(&ctl_mtx);
 	return (ret);
 }
-#endif
diff --git a/src/extent.c b/src/extent.c
index 3c04d3a..8c09b48 100644
--- a/src/extent.c
+++ b/src/extent.c
@@ -3,7 +3,6 @@
 
 /******************************************************************************/
 
-#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
 static inline int
 extent_szad_comp(extent_node_t *a, extent_node_t *b)
 {
@@ -25,7 +24,6 @@ extent_szad_comp(extent_node_t *a, extent_node_t *b)
 /* Generate red-black tree functions. */
 rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad,
     extent_szad_comp)
-#endif
 
 static inline int
 extent_ad_comp(extent_node_t *a, extent_node_t *b)
diff --git a/src/huge.c b/src/huge.c
index a4f9b05..1eee436 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -4,11 +4,9 @@
 /******************************************************************************/
 /* Data. */
 
-#ifdef JEMALLOC_STATS
 uint64_t	huge_nmalloc;
 uint64_t	huge_ndalloc;
 size_t		huge_allocated;
-#endif
 
 malloc_mutex_t	huge_mtx;
 
@@ -49,21 +47,19 @@ huge_malloc(size_t size, bool zero)
 
 	malloc_mutex_lock(&huge_mtx);
 	extent_tree_ad_insert(&huge, node);
-#ifdef JEMALLOC_STATS
-	stats_cactive_add(csize);
-	huge_nmalloc++;
-	huge_allocated += csize;
-#endif
+	if (config_stats) {
+		stats_cactive_add(csize);
+		huge_nmalloc++;
+		huge_allocated += csize;
+	}
 	malloc_mutex_unlock(&huge_mtx);
 
-#ifdef JEMALLOC_FILL
-	if (zero == false) {
+	if (config_fill && zero == false) {
 		if (opt_junk)
 			memset(ret, 0xa5, csize);
 		else if (opt_zero)
 			memset(ret, 0, csize);
 	}
-#endif
 
 	return (ret);
 }
@@ -134,21 +130,19 @@ huge_palloc(size_t size, size_t alignment, bool zero)
 
 	malloc_mutex_lock(&huge_mtx);
 	extent_tree_ad_insert(&huge, node);
-#ifdef JEMALLOC_STATS
-	stats_cactive_add(chunk_size);
-	huge_nmalloc++;
-	huge_allocated += chunk_size;
-#endif
+	if (config_stats) {
+		stats_cactive_add(chunk_size);
+		huge_nmalloc++;
+		huge_allocated += chunk_size;
+	}
 	malloc_mutex_unlock(&huge_mtx);
 
-#ifdef JEMALLOC_FILL
-	if (zero == false) {
+	if (config_fill && zero == false) {
 		if (opt_junk)
 			memset(ret, 0xa5, chunk_size);
 		else if (opt_zero)
 			memset(ret, 0, chunk_size);
 	}
-#endif
 
 	return (ret);
 }
@@ -164,12 +158,10 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
 	    && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
 	    && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
 		assert(CHUNK_CEILING(oldsize) == oldsize);
-#ifdef JEMALLOC_FILL
-		if (opt_junk && size < oldsize) {
+		if (config_fill && opt_junk && size < oldsize) {
 			memset((void *)((uintptr_t)ptr + size), 0x5a,
 			    oldsize - size);
 		}
-#endif
 		return (ptr);
 	}
 
@@ -223,15 +215,10 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
 	 * source nor the destination are in swap or dss.
 	 */
 #ifdef JEMALLOC_MREMAP_FIXED
-	if (oldsize >= chunksize
-#  ifdef JEMALLOC_SWAP
-	    && (swap_enabled == false || (chunk_in_swap(ptr) == false &&
-	    chunk_in_swap(ret) == false))
-#  endif
-#  ifdef JEMALLOC_DSS
-	    && chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false
-#  endif
-	    ) {
+	if (oldsize >= chunksize && (config_swap == false || swap_enabled ==
+	    false || (chunk_in_swap(ptr) == false && chunk_in_swap(ret) ==
+	    false)) && (config_dss == false || (chunk_in_dss(ptr) == false &&
+	    chunk_in_dss(ret) == false))) {
 		size_t newsize = huge_salloc(ret);
 
 		/*
@@ -285,23 +272,16 @@ huge_dalloc(void *ptr, bool unmap)
 	assert(node->addr == ptr);
 	extent_tree_ad_remove(&huge, node);
 
-#ifdef JEMALLOC_STATS
-	stats_cactive_sub(node->size);
-	huge_ndalloc++;
-	huge_allocated -= node->size;
-#endif
+	if (config_stats) {
+		stats_cactive_sub(node->size);
+		huge_ndalloc++;
+		huge_allocated -= node->size;
+	}
 
 	malloc_mutex_unlock(&huge_mtx);
 
-	if (unmap) {
-	/* Unmap chunk. */
-#ifdef JEMALLOC_FILL
-#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
-		if (opt_junk)
-			memset(node->addr, 0x5a, node->size);
-#endif
-#endif
-	}
+	if (unmap && config_fill && (config_swap || config_dss) && opt_junk)
+		memset(node->addr, 0x5a, node->size);
 
 	chunk_dealloc(node->addr, node->size, unmap);
 
@@ -328,7 +308,6 @@ huge_salloc(const void *ptr)
 	return (ret);
 }
 
-#ifdef JEMALLOC_PROF
 prof_ctx_t *
 huge_prof_ctx_get(const void *ptr)
 {
@@ -365,7 +344,6 @@ huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
 
 	malloc_mutex_unlock(&huge_mtx);
 }
-#endif
 
 bool
 huge_boot(void)
@@ -376,11 +354,11 @@ huge_boot(void)
 		return (true);
 	extent_tree_ad_new(&huge);
 
-#ifdef JEMALLOC_STATS
-	huge_nmalloc = 0;
-	huge_ndalloc = 0;
-	huge_allocated = 0;
-#endif
+	if (config_stats) {
+		huge_nmalloc = 0;
+		huge_ndalloc = 0;
+		huge_allocated = 0;
+	}
 
 	return (false);
 }
diff --git a/src/jemalloc.c b/src/jemalloc.c
index a161c2e..9e1814d 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -13,13 +13,10 @@ pthread_key_t		arenas_tsd;
 __thread arena_t	*arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
 #endif
 
-#ifdef JEMALLOC_STATS
-#  ifndef NO_TLS
+#ifndef NO_TLS
 __thread thread_allocated_t	thread_allocated_tls;
-#  else
-pthread_key_t		thread_allocated_tsd;
-#  endif
 #endif
+pthread_key_t		thread_allocated_tsd;
 
 /* Set to true once the allocator has been initialized. */
 static bool		malloc_initialized = false;
@@ -28,13 +25,7 @@ static bool		malloc_initialized = false;
 static pthread_t	malloc_initializer = (unsigned long)0;
 
 /* Used to avoid initialization races. */
-static malloc_mutex_t	init_lock =
-#ifdef JEMALLOC_OSSPIN
-    0
-#else
-    MALLOC_MUTEX_INITIALIZER
-#endif
-    ;
+static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
 
 #ifdef DYNAMIC_PAGE_SHIFT
 size_t		pagesize;
@@ -50,22 +41,16 @@ const char	*JEMALLOC_P(malloc_conf) JEMALLOC_ATTR(visibility("default"));
 bool	opt_abort = true;
 #  ifdef JEMALLOC_FILL
 bool	opt_junk = true;
+#  else
+bool	opt_junk = false;
 #  endif
 #else
 bool	opt_abort = false;
-#  ifdef JEMALLOC_FILL
 bool	opt_junk = false;
-#  endif
 #endif
-#ifdef JEMALLOC_SYSV
 bool	opt_sysv = false;
-#endif
-#ifdef JEMALLOC_XMALLOC
 bool	opt_xmalloc = false;
-#endif
-#ifdef JEMALLOC_FILL
 bool	opt_zero = false;
-#endif
 size_t	opt_narenas = 0;
 
 /******************************************************************************/
@@ -75,7 +60,7 @@ static void	wrtmessage(void *cbopaque, const char *s);
 static void	stats_print_atexit(void);
 static unsigned	malloc_ncpus(void);
 static void	arenas_cleanup(void *arg);
-#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
+#ifdef NO_TLS
 static void	thread_allocated_cleanup(void *arg);
 #endif
 static bool	malloc_conf_next(char const **opts_p, char const **k_p,
@@ -89,22 +74,11 @@ static int	imemalign(void **memptr, size_t alignment, size_t size);
 /******************************************************************************/
 /* malloc_message() setup. */
 
-#ifdef JEMALLOC_HAVE_ATTR
-JEMALLOC_ATTR(visibility("hidden"))
-#else
-static
-#endif
+JEMALLOC_CATTR(visibility("hidden"), static)
 void
 wrtmessage(void *cbopaque, const char *s)
 {
-#ifdef JEMALLOC_CC_SILENCE
-	int result =
-#endif
-	    write(STDERR_FILENO, s, strlen(s));
-#ifdef JEMALLOC_CC_SILENCE
-	if (result < 0)
-		result = errno;
-#endif
+	UNUSED int result = write(STDERR_FILENO, s, strlen(s));
 }
 
 void	(*JEMALLOC_P(malloc_message))(void *, const char *s)
@@ -229,37 +203,38 @@ static void
 stats_print_atexit(void)
 {
 
-#if (defined(JEMALLOC_TCACHE) && defined(JEMALLOC_STATS))
-	unsigned i;
+	if (config_tcache && config_stats) {
+		unsigned i;
 
-	/*
-	 * Merge stats from extant threads.  This is racy, since individual
-	 * threads do not lock when recording tcache stats events.  As a
-	 * consequence, the final stats may be slightly out of date by the time
-	 * they are reported, if other threads continue to allocate.
-	 */
-	for (i = 0; i < narenas; i++) {
-		arena_t *arena = arenas[i];
-		if (arena != NULL) {
-			tcache_t *tcache;
+		/*
+		 * Merge stats from extant threads.  This is racy, since
+		 * individual threads do not lock when recording tcache stats
+		 * events.  As a consequence, the final stats may be slightly
+		 * out of date by the time they are reported, if other threads
+		 * continue to allocate.
+		 */
+		for (i = 0; i < narenas; i++) {
+			arena_t *arena = arenas[i];
+			if (arena != NULL) {
+				tcache_t *tcache;
 
-			/*
-			 * tcache_stats_merge() locks bins, so if any code is
-			 * introduced that acquires both arena and bin locks in
-			 * the opposite order, deadlocks may result.
-			 */
-			malloc_mutex_lock(&arena->lock);
-			ql_foreach(tcache, &arena->tcache_ql, link) {
-				tcache_stats_merge(tcache, arena);
+				/*
+				 * tcache_stats_merge() locks bins, so if any
+				 * code is introduced that acquires both arena
+				 * and bin locks in the opposite order,
+				 * deadlocks may result.
+				 */
+				malloc_mutex_lock(&arena->lock);
+				ql_foreach(tcache, &arena->tcache_ql, link) {
+					tcache_stats_merge(tcache, arena);
+				}
+				malloc_mutex_unlock(&arena->lock);
 			}
-			malloc_mutex_unlock(&arena->lock);
 		}
 	}
-#endif
 	JEMALLOC_P(malloc_stats_print)(NULL, NULL, NULL);
 }
 
-#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
 thread_allocated_t *
 thread_allocated_get_hard(void)
 {
@@ -279,7 +254,6 @@ thread_allocated_get_hard(void)
 	thread_allocated->deallocated = 0;
 	return (thread_allocated);
 }
-#endif
 
 /*
  * End miscellaneous support functions.
@@ -315,7 +289,7 @@ arenas_cleanup(void *arg)
 	malloc_mutex_unlock(&arenas_lock);
 }
 
-#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
+#ifdef NO_TLS
 static void
 thread_allocated_cleanup(void *arg)
 {
@@ -603,41 +577,42 @@ malloc_conf_init(void)
 			CONF_HANDLE_SSIZE_T(lg_dirty_mult, -1,
 			    (sizeof(size_t) << 3) - 1)
 			CONF_HANDLE_BOOL(stats_print)
-#ifdef JEMALLOC_FILL
-			CONF_HANDLE_BOOL(junk)
-			CONF_HANDLE_BOOL(zero)
-#endif
-#ifdef JEMALLOC_SYSV
-			CONF_HANDLE_BOOL(sysv)
-#endif
-#ifdef JEMALLOC_XMALLOC
-			CONF_HANDLE_BOOL(xmalloc)
-#endif
-#ifdef JEMALLOC_TCACHE
-			CONF_HANDLE_BOOL(tcache)
-			CONF_HANDLE_SSIZE_T(lg_tcache_gc_sweep, -1,
-			    (sizeof(size_t) << 3) - 1)
-			CONF_HANDLE_SSIZE_T(lg_tcache_max, -1,
-			    (sizeof(size_t) << 3) - 1)
-#endif
-#ifdef JEMALLOC_PROF
-			CONF_HANDLE_BOOL(prof)
-			CONF_HANDLE_CHAR_P(prof_prefix, "jeprof")
-			CONF_HANDLE_SIZE_T(lg_prof_bt_max, 0, LG_PROF_BT_MAX)
-			CONF_HANDLE_BOOL(prof_active)
-			CONF_HANDLE_SSIZE_T(lg_prof_sample, 0,
-			    (sizeof(uint64_t) << 3) - 1)
-			CONF_HANDLE_BOOL(prof_accum)
-			CONF_HANDLE_SSIZE_T(lg_prof_tcmax, -1,
-			    (sizeof(size_t) << 3) - 1)
-			CONF_HANDLE_SSIZE_T(lg_prof_interval, -1,
-			    (sizeof(uint64_t) << 3) - 1)
-			CONF_HANDLE_BOOL(prof_gdump)
-			CONF_HANDLE_BOOL(prof_leak)
-#endif
-#ifdef JEMALLOC_SWAP
-			CONF_HANDLE_BOOL(overcommit)
-#endif
+			if (config_fill) {
+				CONF_HANDLE_BOOL(junk)
+				CONF_HANDLE_BOOL(zero)
+			}
+			if (config_sysv) {
+				CONF_HANDLE_BOOL(sysv)
+			}
+			if (config_xmalloc) {
+				CONF_HANDLE_BOOL(xmalloc)
+			}
+			if (config_tcache) {
+				CONF_HANDLE_BOOL(tcache)
+				CONF_HANDLE_SSIZE_T(lg_tcache_gc_sweep, -1,
+				    (sizeof(size_t) << 3) - 1)
+				CONF_HANDLE_SSIZE_T(lg_tcache_max, -1,
+				    (sizeof(size_t) << 3) - 1)
+			}
+			if (config_prof) {
+				CONF_HANDLE_BOOL(prof)
+				CONF_HANDLE_CHAR_P(prof_prefix, "jeprof")
+				CONF_HANDLE_SIZE_T(lg_prof_bt_max, 0,
+				    LG_PROF_BT_MAX)
+				CONF_HANDLE_BOOL(prof_active)
+				CONF_HANDLE_SSIZE_T(lg_prof_sample, 0,
+				    (sizeof(uint64_t) << 3) - 1)
+				CONF_HANDLE_BOOL(prof_accum)
+				CONF_HANDLE_SSIZE_T(lg_prof_tcmax, -1,
+				    (sizeof(size_t) << 3) - 1)
+				CONF_HANDLE_SSIZE_T(lg_prof_interval, -1,
+				    (sizeof(uint64_t) << 3) - 1)
+				CONF_HANDLE_BOOL(prof_gdump)
+				CONF_HANDLE_BOOL(prof_leak)
+			}
+			if (config_swap) {
+				CONF_HANDLE_BOOL(overcommit)
+			}
 			malloc_conf_error("Invalid conf pair", k, klen, v,
 			    vlen);
 #undef CONF_HANDLE_BOOL
@@ -701,9 +676,8 @@ malloc_init_hard(void)
 	}
 #endif
 
-#ifdef JEMALLOC_PROF
-	prof_boot0();
-#endif
+	if (config_prof)
+		prof_boot0();
 
 	malloc_conf_init();
 
@@ -739,31 +713,28 @@ malloc_init_hard(void)
 		return (true);
 	}
 
-#ifdef JEMALLOC_PROF
-	prof_boot1();
-#endif
+	if (config_prof)
+		prof_boot1();
 
 	if (arena_boot()) {
 		malloc_mutex_unlock(&init_lock);
 		return (true);
 	}
 
-#ifdef JEMALLOC_TCACHE
-	if (tcache_boot()) {
+	if (config_tcache && tcache_boot()) {
 		malloc_mutex_unlock(&init_lock);
 		return (true);
 	}
-#endif
 
 	if (huge_boot()) {
 		malloc_mutex_unlock(&init_lock);
 		return (true);
 	}
 
-#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
+#ifdef NO_TLS
 	/* Initialize allocation counters before any allocations can occur. */
-	if (pthread_key_create(&thread_allocated_tsd, thread_allocated_cleanup)
-	    != 0) {
+	if (config_stats && pthread_key_create(&thread_allocated_tsd,
+	    thread_allocated_cleanup) != 0) {
 		malloc_mutex_unlock(&init_lock);
 		return (true);
 	}
@@ -803,12 +774,10 @@ malloc_init_hard(void)
 	ARENA_SET(arenas[0]);
 	arenas[0]->nthreads++;
 
-#ifdef JEMALLOC_PROF
-	if (prof_boot2()) {
+	if (config_prof && prof_boot2()) {
 		malloc_mutex_unlock(&init_lock);
 		return (true);
 	}
-#endif
 
 	/* Get number of CPUs. */
 	malloc_initializer = pthread_self();
@@ -897,20 +866,8 @@ void *
 JEMALLOC_P(malloc)(size_t size)
 {
 	void *ret;
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
-	size_t usize
-#  ifdef JEMALLOC_CC_SILENCE
-	    = 0
-#  endif
-	    ;
-#endif
-#ifdef JEMALLOC_PROF
-	prof_thr_cnt_t *cnt
-#  ifdef JEMALLOC_CC_SILENCE
-	    = NULL
-#  endif
-	    ;
-#endif
+	size_t usize;
+	prof_thr_cnt_t *cnt;
 
 	if (malloc_init()) {
 		ret = NULL;
@@ -918,27 +875,20 @@ JEMALLOC_P(malloc)(size_t size)
 	}
 
 	if (size == 0) {
-#ifdef JEMALLOC_SYSV
-		if (opt_sysv == false)
-#endif
+		if (config_sysv == false || opt_sysv == false)
 			size = 1;
-#ifdef JEMALLOC_SYSV
 		else {
-#  ifdef JEMALLOC_XMALLOC
-			if (opt_xmalloc) {
+			if (config_xmalloc && opt_xmalloc) {
 				malloc_write("<jemalloc>: Error in malloc(): "
 				    "invalid size 0\n");
 				abort();
 			}
-#  endif
 			ret = NULL;
 			goto RETURN;
 		}
-#endif
 	}
 
-#ifdef JEMALLOC_PROF
-	if (opt_prof) {
+	if (config_prof && opt_prof) {
 		usize = s2u(size);
 		PROF_ALLOC_PREP(1, usize, cnt);
 		if (cnt == NULL) {
@@ -952,47 +902,36 @@ JEMALLOC_P(malloc)(size_t size)
 				arena_prof_promoted(ret, usize);
 		} else
 			ret = imalloc(size);
-	} else
-#endif
-	{
-#ifdef JEMALLOC_STATS
-		usize = s2u(size);
-#endif
+	} else {
+		if (config_stats)
+			usize = s2u(size);
 		ret = imalloc(size);
 	}
 
 OOM:
 	if (ret == NULL) {
-#ifdef JEMALLOC_XMALLOC
-		if (opt_xmalloc) {
+		if (config_xmalloc && opt_xmalloc) {
 			malloc_write("<jemalloc>: Error in malloc(): "
 			    "out of memory\n");
 			abort();
 		}
-#endif
 		errno = ENOMEM;
 	}
 
-#ifdef JEMALLOC_SYSV
 RETURN:
-#endif
-#ifdef JEMALLOC_PROF
-	if (opt_prof && ret != NULL)
+	if (config_prof && opt_prof && ret != NULL)
 		prof_malloc(ret, usize, cnt);
-#endif
-#ifdef JEMALLOC_STATS
-	if (ret != NULL) {
+	if (config_stats && ret != NULL) {
 		assert(usize == isalloc(ret));
 		ALLOCATED_ADD(usize, 0);
 	}
-#endif
 	return (ret);
 }
 
 JEMALLOC_ATTR(nonnull(1))
 #ifdef JEMALLOC_PROF
 /*
- * Avoid any uncertainty as to how many backtrace frames to ignore in 
+ * Avoid any uncertainty as to how many backtrace frames to ignore in
  * PROF_ALLOC_PREP().
  */
 JEMALLOC_ATTR(noinline)
@@ -1001,56 +940,38 @@ static int
 imemalign(void **memptr, size_t alignment, size_t size)
 {
 	int ret;
-	size_t usize
-#ifdef JEMALLOC_CC_SILENCE
-	    = 0
-#endif
-	    ;
+	size_t usize;
 	void *result;
-#ifdef JEMALLOC_PROF
-	prof_thr_cnt_t *cnt
-#  ifdef JEMALLOC_CC_SILENCE
-	    = NULL
-#  endif
-	    ;
-#endif
+	prof_thr_cnt_t *cnt;
 
 	if (malloc_init())
 		result = NULL;
 	else {
 		if (size == 0) {
-#ifdef JEMALLOC_SYSV
-			if (opt_sysv == false)
-#endif
+			if (config_sysv == false || opt_sysv == false)
 				size = 1;
-#ifdef JEMALLOC_SYSV
 			else {
-#  ifdef JEMALLOC_XMALLOC
-				if (opt_xmalloc) {
+				if (config_xmalloc && opt_xmalloc) {
 					malloc_write("<jemalloc>: Error in "
 					    "posix_memalign(): invalid size "
 					    "0\n");
 					abort();
 				}
-#  endif
 				result = NULL;
 				*memptr = NULL;
 				ret = 0;
 				goto RETURN;
 			}
-#endif
 		}
 
 		/* Make sure that alignment is a large enough power of 2. */
 		if (((alignment - 1) & alignment) != 0
 		    || alignment < sizeof(void *)) {
-#ifdef JEMALLOC_XMALLOC
-			if (opt_xmalloc) {
+			if (config_xmalloc && opt_xmalloc) {
 				malloc_write("<jemalloc>: Error in "
 				    "posix_memalign(): invalid alignment\n");
 				abort();
 			}
-#endif
 			result = NULL;
 			ret = EINVAL;
 			goto RETURN;
@@ -1063,8 +984,7 @@ imemalign(void **memptr, size_t alignment, size_t size)
 			goto RETURN;
 		}
 
-#ifdef JEMALLOC_PROF
-		if (opt_prof) {
+		if (config_prof && opt_prof) {
 			PROF_ALLOC_PREP(2, usize, cnt);
 			if (cnt == NULL) {
 				result = NULL;
@@ -1086,18 +1006,15 @@ imemalign(void **memptr, size_t alignment, size_t size)
 				}
 			}
 		} else
-#endif
 			result = ipalloc(usize, alignment, false);
 	}
 
 	if (result == NULL) {
-#ifdef JEMALLOC_XMALLOC
-		if (opt_xmalloc) {
+		if (config_xmalloc && opt_xmalloc) {
 			malloc_write("<jemalloc>: Error in posix_memalign(): "
 			    "out of memory\n");
 			abort();
 		}
-#endif
 		ret = ENOMEM;
 		goto RETURN;
 	}
@@ -1106,16 +1023,12 @@ imemalign(void **memptr, size_t alignment, size_t size)
 	ret = 0;
 
 RETURN:
-#ifdef JEMALLOC_STATS
-	if (result != NULL) {
+	if (config_stats && result != NULL) {
 		assert(usize == isalloc(result));
 		ALLOCATED_ADD(usize, 0);
 	}
-#endif
-#ifdef JEMALLOC_PROF
-	if (opt_prof && result != NULL)
+	if (config_prof && opt_prof && result != NULL)
 		prof_malloc(result, usize, cnt);
-#endif
 	return (ret);
 }
 
@@ -1135,20 +1048,8 @@ JEMALLOC_P(calloc)(size_t num, size_t size)
 {
 	void *ret;
 	size_t num_size;
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
-	size_t usize
-#  ifdef JEMALLOC_CC_SILENCE
-	    = 0
-#  endif
-	    ;
-#endif
-#ifdef JEMALLOC_PROF
-	prof_thr_cnt_t *cnt
-#  ifdef JEMALLOC_CC_SILENCE
-	    = NULL
-#  endif
-	    ;
-#endif
+	size_t usize;
+	prof_thr_cnt_t *cnt;
 
 	if (malloc_init()) {
 		num_size = 0;
@@ -1158,16 +1059,13 @@ JEMALLOC_P(calloc)(size_t num, size_t size)
 
 	num_size = num * size;
 	if (num_size == 0) {
-#ifdef JEMALLOC_SYSV
-		if ((opt_sysv == false) && ((num == 0) || (size == 0)))
-#endif
+		if ((config_sysv == false || opt_sysv == false)
+		    && ((num == 0) || (size == 0)))
 			num_size = 1;
-#ifdef JEMALLOC_SYSV
 		else {
 			ret = NULL;
 			goto RETURN;
 		}
-#endif
 	/*
 	 * Try to avoid division here.  We know that it isn't possible to
 	 * overflow during multiplication if neither operand uses any of the
@@ -1180,8 +1078,7 @@ JEMALLOC_P(calloc)(size_t num, size_t size)
 		goto RETURN;
 	}
 
-#ifdef JEMALLOC_PROF
-	if (opt_prof) {
+	if (config_prof && opt_prof) {
 		usize = s2u(num_size);
 		PROF_ALLOC_PREP(1, usize, cnt);
 		if (cnt == NULL) {
@@ -1195,37 +1092,28 @@ JEMALLOC_P(calloc)(size_t num, size_t size)
 				arena_prof_promoted(ret, usize);
 		} else
 			ret = icalloc(num_size);
-	} else
-#endif
-	{
-#ifdef JEMALLOC_STATS
-		usize = s2u(num_size);
-#endif
+	} else {
+		if (config_stats)
+			usize = s2u(num_size);
 		ret = icalloc(num_size);
 	}
 
 RETURN:
 	if (ret == NULL) {
-#ifdef JEMALLOC_XMALLOC
-		if (opt_xmalloc) {
+		if (config_xmalloc && opt_xmalloc) {
 			malloc_write("<jemalloc>: Error in calloc(): out of "
 			    "memory\n");
 			abort();
 		}
-#endif
 		errno = ENOMEM;
 	}
 
-#ifdef JEMALLOC_PROF
-	if (opt_prof && ret != NULL)
+	if (config_prof && opt_prof && ret != NULL)
 		prof_malloc(ret, usize, cnt);
-#endif
-#ifdef JEMALLOC_STATS
-	if (ret != NULL) {
+	if (config_stats && ret != NULL) {
 		assert(usize == isalloc(ret));
 		ALLOCATED_ADD(usize, 0);
 	}
-#endif
 	return (ret);
 }
 
@@ -1234,67 +1122,39 @@ void *
 JEMALLOC_P(realloc)(void *ptr, size_t size)
 {
 	void *ret;
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
-	size_t usize
-#  ifdef JEMALLOC_CC_SILENCE
-	    = 0
-#  endif
-	    ;
+	size_t usize;
 	size_t old_size = 0;
-#endif
-#ifdef JEMALLOC_PROF
-	prof_thr_cnt_t *cnt
-#  ifdef JEMALLOC_CC_SILENCE
-	    = NULL
-#  endif
-	    ;
-	prof_ctx_t *old_ctx
-#  ifdef JEMALLOC_CC_SILENCE
-	    = NULL
-#  endif
-	    ;
-#endif
+	prof_thr_cnt_t *cnt;
+	prof_ctx_t *old_ctx;
 
 	if (size == 0) {
-#ifdef JEMALLOC_SYSV
-		if (opt_sysv == false)
-#endif
+		if (config_sysv == false || opt_sysv == false)
 			size = 1;
-#ifdef JEMALLOC_SYSV
 		else {
 			if (ptr != NULL) {
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
-				old_size = isalloc(ptr);
-#endif
-#ifdef JEMALLOC_PROF
-				if (opt_prof) {
+				if (config_prof || config_stats)
+					old_size = isalloc(ptr);
+				if (config_prof && opt_prof) {
 					old_ctx = prof_ctx_get(ptr);
 					cnt = NULL;
 				}
-#endif
 				idalloc(ptr);
-			}
-#ifdef JEMALLOC_PROF
-			else if (opt_prof) {
+			} else if (config_prof && opt_prof) {
 				old_ctx = NULL;
 				cnt = NULL;
 			}
-#endif
 			ret = NULL;
 			goto RETURN;
 		}
-#endif
 	}
 
 	if (ptr != NULL) {
 		assert(malloc_initialized || malloc_initializer ==
 		    pthread_self());
 
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
-		old_size = isalloc(ptr);
-#endif
-#ifdef JEMALLOC_PROF
-		if (opt_prof) {
+		if (config_prof || config_stats)
+			old_size = isalloc(ptr);
+		if (config_prof && opt_prof) {
 			usize = s2u(size);
 			old_ctx = prof_ctx_get(ptr);
 			PROF_ALLOC_PREP(1, usize, cnt);
@@ -1316,42 +1176,30 @@ JEMALLOC_P(realloc)(void *ptr, size_t size)
 				if (ret == NULL)
 					old_ctx = NULL;
 			}
-		} else
-#endif
-		{
-#ifdef JEMALLOC_STATS
-			usize = s2u(size);
-#endif
+		} else {
+			if (config_stats)
+				usize = s2u(size);
 			ret = iralloc(ptr, size, 0, 0, false, false);
 		}
 
-#ifdef JEMALLOC_PROF
 OOM:
-#endif
 		if (ret == NULL) {
-#ifdef JEMALLOC_XMALLOC
-			if (opt_xmalloc) {
+			if (config_xmalloc && opt_xmalloc) {
 				malloc_write("<jemalloc>: Error in realloc(): "
 				    "out of memory\n");
 				abort();
 			}
-#endif
 			errno = ENOMEM;
 		}
 	} else {
-#ifdef JEMALLOC_PROF
-		if (opt_prof)
+		if (config_prof && opt_prof)
 			old_ctx = NULL;
-#endif
 		if (malloc_init()) {
-#ifdef JEMALLOC_PROF
-			if (opt_prof)
+			if (config_prof && opt_prof)
 				cnt = NULL;
-#endif
 			ret = NULL;
 		} else {
-#ifdef JEMALLOC_PROF
-			if (opt_prof) {
+			if (config_prof && opt_prof) {
 				usize = s2u(size);
 				PROF_ALLOC_PREP(1, usize, cnt);
 				if (cnt == NULL)
@@ -1368,41 +1216,30 @@ OOM:
 					} else
 						ret = imalloc(size);
 				}
-			} else
-#endif
-			{
-#ifdef JEMALLOC_STATS
-				usize = s2u(size);
-#endif
+			} else {
+				if (config_stats)
+					usize = s2u(size);
 				ret = imalloc(size);
 			}
 		}
 
 		if (ret == NULL) {
-#ifdef JEMALLOC_XMALLOC
-			if (opt_xmalloc) {
+			if (config_xmalloc && opt_xmalloc) {
 				malloc_write("<jemalloc>: Error in realloc(): "
 				    "out of memory\n");
 				abort();
 			}
-#endif
 			errno = ENOMEM;
 		}
 	}
 
-#ifdef JEMALLOC_SYSV
 RETURN:
-#endif
-#ifdef JEMALLOC_PROF
-	if (opt_prof)
+	if (config_prof && opt_prof)
 		prof_realloc(ret, usize, cnt, old_size, old_ctx);
-#endif
-#ifdef JEMALLOC_STATS
-	if (ret != NULL) {
+	if (config_stats && ret != NULL) {
 		assert(usize == isalloc(ret));
 		ALLOCATED_ADD(usize, old_size);
 	}
-#endif
 	return (ret);
 }
 
@@ -1412,27 +1249,19 @@ JEMALLOC_P(free)(void *ptr)
 {
 
 	if (ptr != NULL) {
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
 		size_t usize;
-#endif
 
 		assert(malloc_initialized || malloc_initializer ==
 		    pthread_self());
 
-#ifdef JEMALLOC_STATS
-		usize = isalloc(ptr);
-#endif
-#ifdef JEMALLOC_PROF
-		if (opt_prof) {
-#  ifndef JEMALLOC_STATS
+		if (config_prof && opt_prof) {
 			usize = isalloc(ptr);
-#  endif
 			prof_free(ptr, usize);
+		} else if (config_stats) {
+			usize = isalloc(ptr);
 		}
-#endif
-#ifdef JEMALLOC_STATS
-		ALLOCATED_ADD(0, usize);
-#endif
+		if (config_stats)
+			ALLOCATED_ADD(0, usize);
 		idalloc(ptr);
 	}
 }
@@ -1455,15 +1284,12 @@ JEMALLOC_ATTR(visibility("default"))
 void *
 JEMALLOC_P(memalign)(size_t alignment, size_t size)
 {
-	void *ret;
-#ifdef JEMALLOC_CC_SILENCE
-	int result =
-#endif
-	    imemalign(&ret, alignment, size);
+	void *ret
 #ifdef JEMALLOC_CC_SILENCE
-	if (result != 0)
-		return (NULL);
+	    = NULL
 #endif
+	    ;
+	imemalign(&ret, alignment, size);
 	return (ret);
 }
 #endif
@@ -1474,15 +1300,12 @@ JEMALLOC_ATTR(visibility("default"))
 void *
 JEMALLOC_P(valloc)(size_t size)
 {
-	void *ret;
+	void *ret
 #ifdef JEMALLOC_CC_SILENCE
-	int result =
-#endif
-	    imemalign(&ret, PAGE_SIZE, size);
-#ifdef JEMALLOC_CC_SILENCE
-	if (result != 0)
-		return (NULL);
+	    = NULL
 #endif
+	    ;
+	imemalign(&ret, PAGE_SIZE, size);
 	return (ret);
 }
 #endif
@@ -1504,12 +1327,12 @@ JEMALLOC_P(malloc_usable_size)(const void *ptr)
 
 	assert(malloc_initialized || malloc_initializer == pthread_self());
 
-#ifdef JEMALLOC_IVSALLOC
-	ret = ivsalloc(ptr);
-#else
-	assert(ptr != NULL);
-	ret = isalloc(ptr);
-#endif
+	if (config_ivsalloc)
+		ret = ivsalloc(ptr);
+	else {
+		assert(ptr != NULL);
+		ret = isalloc(ptr);
+	}
 
 	return (ret);
 }
@@ -1583,9 +1406,7 @@ JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags)
 	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
 	    & (SIZE_T_MAX-1));
 	bool zero = flags & ALLOCM_ZERO;
-#ifdef JEMALLOC_PROF
 	prof_thr_cnt_t *cnt;
-#endif
 
 	assert(ptr != NULL);
 	assert(size != 0);
@@ -1597,8 +1418,7 @@ JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags)
 	if (usize == 0)
 		goto OOM;
 
-#ifdef JEMALLOC_PROF
-	if (opt_prof) {
+	if (config_prof && opt_prof) {
 		PROF_ALLOC_PREP(1, usize, cnt);
 		if (cnt == NULL)
 			goto OOM;
@@ -1618,39 +1438,26 @@ JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags)
 				goto OOM;
 		}
 		prof_malloc(p, usize, cnt);
-		if (rsize != NULL)
-			*rsize = usize;
-	} else
-#endif
-	{
+	} else {
 		p = iallocm(usize, alignment, zero);
 		if (p == NULL)
 			goto OOM;
-#ifndef JEMALLOC_STATS
-		if (rsize != NULL)
-#endif
-		{
-#ifdef JEMALLOC_STATS
-			if (rsize != NULL)
-#endif
-				*rsize = usize;
-		}
 	}
+	if (rsize != NULL)
+		*rsize = usize;
 
 	*ptr = p;
-#ifdef JEMALLOC_STATS
-	assert(usize == isalloc(p));
-	ALLOCATED_ADD(usize, 0);
-#endif
+	if (config_stats) {
+		assert(usize == isalloc(p));
+		ALLOCATED_ADD(usize, 0);
+	}
 	return (ALLOCM_SUCCESS);
 OOM:
-#ifdef JEMALLOC_XMALLOC
-	if (opt_xmalloc) {
+	if (config_xmalloc && opt_xmalloc) {
 		malloc_write("<jemalloc>: Error in allocm(): "
 		    "out of memory\n");
 		abort();
 	}
-#endif
 	*ptr = NULL;
 	return (ALLOCM_ERR_OOM);
 }
@@ -1663,16 +1470,12 @@ JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size, size_t extra,
 {
 	void *p, *q;
 	size_t usize;
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
 	size_t old_size;
-#endif
 	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
 	    & (SIZE_T_MAX-1));
 	bool zero = flags & ALLOCM_ZERO;
 	bool no_move = flags & ALLOCM_NO_MOVE;
-#ifdef JEMALLOC_PROF
 	prof_thr_cnt_t *cnt;
-#endif
 
 	assert(ptr != NULL);
 	assert(*ptr != NULL);
@@ -1681,8 +1484,7 @@ JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size, size_t extra,
 	assert(malloc_initialized || malloc_initializer == pthread_self());
 
 	p = *ptr;
-#ifdef JEMALLOC_PROF
-	if (opt_prof) {
+	if (config_prof && opt_prof) {
 		/*
 		 * usize isn't knowable before iralloc() returns when extra is
 		 * non-zero.  Therefore, compute its maximum possible value and
@@ -1722,45 +1524,34 @@ JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size, size_t extra,
 		prof_realloc(q, usize, cnt, old_size, old_ctx);
 		if (rsize != NULL)
 			*rsize = usize;
-	} else
-#endif
-	{
-#ifdef JEMALLOC_STATS
-		old_size = isalloc(p);
-#endif
+	} else {
+		if (config_stats)
+			old_size = isalloc(p);
 		q = iralloc(p, size, extra, alignment, zero, no_move);
 		if (q == NULL)
 			goto ERR;
-#ifndef JEMALLOC_STATS
-		if (rsize != NULL)
-#endif
-		{
+		if (config_stats)
 			usize = isalloc(q);
-#ifdef JEMALLOC_STATS
-			if (rsize != NULL)
-#endif
-				*rsize = usize;
+		if (rsize != NULL) {
+			if (config_stats == false)
+				usize = isalloc(q);
+			*rsize = usize;
 		}
 	}
 
 	*ptr = q;
-#ifdef JEMALLOC_STATS
-	ALLOCATED_ADD(usize, old_size);
-#endif
+	if (config_stats)
+		ALLOCATED_ADD(usize, old_size);
 	return (ALLOCM_SUCCESS);
 ERR:
 	if (no_move)
 		return (ALLOCM_ERR_NOT_MOVED);
-#ifdef JEMALLOC_PROF
 OOM:
-#endif
-#ifdef JEMALLOC_XMALLOC
-	if (opt_xmalloc) {
+	if (config_xmalloc && opt_xmalloc) {
 		malloc_write("<jemalloc>: Error in rallocm(): "
 		    "out of memory\n");
 		abort();
 	}
-#endif
 	return (ALLOCM_ERR_OOM);
 }
 
@@ -1773,12 +1564,12 @@ JEMALLOC_P(sallocm)(const void *ptr, size_t *rsize, int flags)
 
 	assert(malloc_initialized || malloc_initializer == pthread_self());
 
-#ifdef JEMALLOC_IVSALLOC
-	sz = ivsalloc(ptr);
-#else
-	assert(ptr != NULL);
-	sz = isalloc(ptr);
-#endif
+	if (config_ivsalloc)
+		sz = ivsalloc(ptr);
+	else {
+		assert(ptr != NULL);
+		sz = isalloc(ptr);
+	}
 	assert(rsize != NULL);
 	*rsize = sz;
 
@@ -1790,27 +1581,20 @@ JEMALLOC_ATTR(visibility("default"))
 int
 JEMALLOC_P(dallocm)(void *ptr, int flags)
 {
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
 	size_t usize;
-#endif
 
 	assert(ptr != NULL);
 	assert(malloc_initialized || malloc_initializer == pthread_self());
 
-#ifdef JEMALLOC_STATS
-	usize = isalloc(ptr);
-#endif
-#ifdef JEMALLOC_PROF
-	if (opt_prof) {
-#  ifndef JEMALLOC_STATS
+	if (config_stats)
 		usize = isalloc(ptr);
-#  endif
+	if (config_prof && opt_prof) {
+		if (config_stats == false)
+			usize = isalloc(ptr);
 		prof_free(ptr, usize);
 	}
-#endif
-#ifdef JEMALLOC_STATS
-	ALLOCATED_ADD(0, usize);
-#endif
+	if (config_stats)
+		ALLOCATED_ADD(0, usize);
 	idalloc(ptr);
 
 	return (ALLOCM_SUCCESS);
@@ -1843,13 +1627,11 @@ jemalloc_prefork(void)
 
 	malloc_mutex_lock(&huge_mtx);
 
-#ifdef JEMALLOC_DSS
-	malloc_mutex_lock(&dss_mtx);
-#endif
+	if (config_dss)
+		malloc_mutex_lock(&dss_mtx);
 
-#ifdef JEMALLOC_SWAP
-	malloc_mutex_lock(&swap_mtx);
-#endif
+	if (config_swap)
+		malloc_mutex_lock(&swap_mtx);
 }
 
 void
@@ -1859,13 +1641,11 @@ jemalloc_postfork(void)
 
 	/* Release all mutexes, now that fork() has completed. */
 
-#ifdef JEMALLOC_SWAP
-	malloc_mutex_unlock(&swap_mtx);
-#endif
+	if (config_swap)
+		malloc_mutex_unlock(&swap_mtx);
 
-#ifdef JEMALLOC_DSS
-	malloc_mutex_unlock(&dss_mtx);
-#endif
+	if (config_dss)
+		malloc_mutex_unlock(&dss_mtx);
 
 	malloc_mutex_unlock(&huge_mtx);
 
diff --git a/src/prof.c b/src/prof.c
index 8a144b4..113cf15 100644
--- a/src/prof.c
+++ b/src/prof.c
@@ -1,6 +1,5 @@
 #define	JEMALLOC_PROF_C_
 #include "jemalloc/internal/jemalloc_internal.h"
-#ifdef JEMALLOC_PROF
 /******************************************************************************/
 
 #ifdef JEMALLOC_PROF_LIBUNWIND
@@ -102,6 +101,8 @@ void
 bt_init(prof_bt_t *bt, void **vec)
 {
 
+	cassert(config_prof);
+
 	bt->vec = vec;
 	bt->len = 0;
 }
@@ -110,6 +111,8 @@ static void
 bt_destroy(prof_bt_t *bt)
 {
 
+	cassert(config_prof);
+
 	idalloc(bt);
 }
 
@@ -118,6 +121,8 @@ bt_dup(prof_bt_t *bt)
 {
 	prof_bt_t *ret;
 
+	cassert(config_prof);
+
 	/*
 	 * Create a single allocation that has space for vec immediately
 	 * following the prof_bt_t structure.  The backtraces that get
@@ -141,6 +146,8 @@ static inline void
 prof_enter(void)
 {
 
+	cassert(config_prof);
+
 	malloc_mutex_lock(&enq_mtx);
 	enq = true;
 	malloc_mutex_unlock(&enq_mtx);
@@ -153,6 +160,8 @@ prof_leave(void)
 {
 	bool idump, gdump;
 
+	cassert(config_prof);
+
 	malloc_mutex_unlock(&bt2ctx_mtx);
 
 	malloc_mutex_lock(&enq_mtx);
@@ -178,6 +187,7 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
 	unsigned i;
 	int err;
 
+	cassert(config_prof);
 	assert(bt->len == 0);
 	assert(bt->vec != NULL);
 	assert(max <= (1U << opt_lg_prof_bt_max));
@@ -204,12 +214,13 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
 			break;
 	}
 }
-#endif
-#ifdef JEMALLOC_PROF_LIBGCC
+#elif (defined(JEMALLOC_PROF_LIBGCC))
 static _Unwind_Reason_Code
 prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
 {
 
+	cassert(config_prof);
+
 	return (_URC_NO_REASON);
 }
 
@@ -218,6 +229,8 @@ prof_unwind_callback(struct _Unwind_Context *context, void *arg)
 {
 	prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
 
+	cassert(config_prof);
+
 	if (data->nignore > 0)
 		data->nignore--;
 	else {
@@ -235,10 +248,11 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
 {
 	prof_unwind_data_t data = {bt, nignore, max};
 
+	cassert(config_prof);
+
 	_Unwind_Backtrace(prof_unwind_callback, &data);
 }
-#endif
-#ifdef JEMALLOC_PROF_GCC
+#elif (defined(JEMALLOC_PROF_GCC))
 void
 prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
 {
@@ -257,6 +271,7 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
 	} else								\
 		return;
 
+	cassert(config_prof);
 	assert(nignore <= 3);
 	assert(max <= (1U << opt_lg_prof_bt_max));
 
@@ -407,6 +422,14 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
 	BT_FRAME(130)
 #undef BT_FRAME
 }
+#else
+void
+prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
+{
+
+	cassert(config_prof);
+	assert(false);
+}
 #endif
 
 prof_thr_cnt_t *
@@ -418,6 +441,8 @@ prof_lookup(prof_bt_t *bt)
 	} ret;
 	prof_tdata_t *prof_tdata;
 
+	cassert(config_prof);
+
 	prof_tdata = PROF_TCACHE_GET();
 	if (prof_tdata == NULL) {
 		prof_tdata = prof_tdata_init();
@@ -553,6 +578,8 @@ prof_flush(bool propagate_err)
 	bool ret = false;
 	ssize_t err;
 
+	cassert(config_prof);
+
 	err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
 	if (err == -1) {
 		if (propagate_err == false) {
@@ -573,6 +600,8 @@ prof_write(const char *s, bool propagate_err)
 {
 	unsigned i, slen, n;
 
+	cassert(config_prof);
+
 	i = 0;
 	slen = strlen(s);
 	while (i < slen) {
@@ -602,6 +631,8 @@ prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx)
 	prof_thr_cnt_t *thr_cnt;
 	prof_cnt_t tcnt;
 
+	cassert(config_prof);
+
 	malloc_mutex_lock(&ctx->lock);
 
 	memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t));
@@ -648,6 +679,8 @@ static void
 prof_ctx_destroy(prof_ctx_t *ctx)
 {
 
+	cassert(config_prof);
+
 	/*
 	 * Check that ctx is still unused by any thread cache before destroying
 	 * it.  prof_lookup() artificially raises ctx->cnt_merge.curobjs in
@@ -686,6 +719,8 @@ prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt)
 {
 	bool destroy;
 
+	cassert(config_prof);
+
 	/* Merge cnt stats and detach from ctx. */
 	malloc_mutex_lock(&ctx->lock);
 	ctx->cnt_merged.curobjs += cnt->cnts.curobjs;
@@ -723,6 +758,8 @@ prof_dump_ctx(prof_ctx_t *ctx, prof_bt_t *bt, bool propagate_err)
 	char buf[UMAX2S_BUFSIZE];
 	unsigned i;
 
+	cassert(config_prof);
+
 	if (opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) {
 		assert(ctx->cnt_summed.curbytes == 0);
 		assert(ctx->cnt_summed.accumobjs == 0);
@@ -767,6 +804,8 @@ prof_dump_maps(bool propagate_err)
 	char mpath[6     + UMAX2S_BUFSIZE
 			      + 5  + 1];
 
+	cassert(config_prof);
+
 	i = 0;
 
 	s = "/proc/";
@@ -827,6 +866,8 @@ prof_dump(const char *filename, bool leakcheck, bool propagate_err)
 	char buf[UMAX2S_BUFSIZE];
 	size_t leak_nctx;
 
+	cassert(config_prof);
+
 	prof_enter();
 	prof_dump_fd = creat(filename, 0644);
 	if (prof_dump_fd == -1) {
@@ -917,6 +958,8 @@ prof_dump_filename(char *filename, char v, int64_t vseq)
 	char *s;
 	unsigned i, slen;
 
+	cassert(config_prof);
+
 	/*
 	 * Construct a filename of the form:
 	 *
@@ -979,6 +1022,8 @@ prof_fdump(void)
 {
 	char filename[DUMP_FILENAME_BUFSIZE];
 
+	cassert(config_prof);
+
 	if (prof_booted == false)
 		return;
 
@@ -995,6 +1040,8 @@ prof_idump(void)
 {
 	char filename[DUMP_FILENAME_BUFSIZE];
 
+	cassert(config_prof);
+
 	if (prof_booted == false)
 		return;
 	malloc_mutex_lock(&enq_mtx);
@@ -1019,6 +1066,8 @@ prof_mdump(const char *filename)
 {
 	char filename_buf[DUMP_FILENAME_BUFSIZE];
 
+	cassert(config_prof);
+
 	if (opt_prof == false || prof_booted == false)
 		return (true);
 
@@ -1040,6 +1089,8 @@ prof_gdump(void)
 {
 	char filename[DUMP_FILENAME_BUFSIZE];
 
+	cassert(config_prof);
+
 	if (prof_booted == false)
 		return;
 	malloc_mutex_lock(&enq_mtx);
@@ -1066,6 +1117,7 @@ prof_bt_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
 	uint64_t h;
 	prof_bt_t *bt = (prof_bt_t *)key;
 
+	cassert(config_prof);
 	assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
 	assert(hash1 != NULL);
 	assert(hash2 != NULL);
@@ -1094,6 +1146,8 @@ prof_bt_keycomp(const void *k1, const void *k2)
 	const prof_bt_t *bt1 = (prof_bt_t *)k1;
 	const prof_bt_t *bt2 = (prof_bt_t *)k2;
 
+	cassert(config_prof);
+
 	if (bt1->len != bt2->len)
 		return (false);
 	return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
@@ -1104,6 +1158,8 @@ prof_tdata_init(void)
 {
 	prof_tdata_t *prof_tdata;
 
+	cassert(config_prof);
+
 	/* Initialize an empty cache for this thread. */
 	prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t));
 	if (prof_tdata == NULL)
@@ -1138,6 +1194,8 @@ prof_tdata_cleanup(void *arg)
 	prof_thr_cnt_t *cnt;
 	prof_tdata_t *prof_tdata = (prof_tdata_t *)arg;
 
+	cassert(config_prof);
+
 	/*
 	 * Delete the hash table.  All of its contents can still be iterated
 	 * over via the LRU.
@@ -1161,6 +1219,8 @@ void
 prof_boot0(void)
 {
 
+	cassert(config_prof);
+
 	memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
 	    sizeof(PROF_PREFIX_DEFAULT));
 }
@@ -1169,6 +1229,8 @@ void
 prof_boot1(void)
 {
 
+	cassert(config_prof);
+
 	/*
 	 * opt_prof and prof_promote must be in their final state before any
 	 * arenas are initialized, so this function must be executed early.
@@ -1197,6 +1259,8 @@ bool
 prof_boot2(void)
 {
 
+	cassert(config_prof);
+
 	if (opt_prof) {
 		if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash,
 		    prof_bt_keycomp))
@@ -1241,4 +1305,3 @@ prof_boot2(void)
 }
 
 /******************************************************************************/
-#endif /* JEMALLOC_PROF */
diff --git a/src/stats.c b/src/stats.c
index dc172e4..e644653 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -39,14 +39,11 @@
 
 bool	opt_stats_print = false;
 
-#ifdef JEMALLOC_STATS
 size_t	stats_cactive = 0;
-#endif
 
 /******************************************************************************/
 /* Function prototypes for non-inline static functions. */
 
-#ifdef JEMALLOC_STATS
 static void	malloc_vcprintf(void (*write_cb)(void *, const char *),
     void *cbopaque, const char *format, va_list ap);
 static void	stats_arena_bins_print(void (*write_cb)(void *, const char *),
@@ -55,10 +52,10 @@ static void	stats_arena_lruns_print(void (*write_cb)(void *, const char *),
     void *cbopaque, unsigned i);
 static void	stats_arena_print(void (*write_cb)(void *, const char *),
     void *cbopaque, unsigned i);
-#endif
 
 /******************************************************************************/
 
+/* XXX Refactor by adding malloc_vsnprintf(). */
 /*
  * We don't want to depend on vsnprintf() for production builds, since that can
  * cause unnecessary bloat for static binaries.  u2s() provides minimal integer
@@ -99,7 +96,6 @@ u2s(uint64_t x, unsigned base, char *s)
 	return (&s[i]);
 }
 
-#ifdef JEMALLOC_STATS
 static void
 malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
     const char *format, va_list ap)
@@ -149,9 +145,7 @@ malloc_printf(const char *format, ...)
 	malloc_vcprintf(NULL, NULL, format, ap);
 	va_end(ap);
 }
-#endif
 
-#ifdef JEMALLOC_STATS
 static void
 stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
     unsigned i)
@@ -377,7 +371,6 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
 	stats_arena_bins_print(write_cb, cbopaque, i);
 	stats_arena_lruns_print(write_cb, cbopaque, i);
 }
-#endif
 
 void
 stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
@@ -674,8 +667,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
 		write_cb(cbopaque, ")\n");
 	}
 
-#ifdef JEMALLOC_STATS
-	{
+	if (config_stats) {
 		int err;
 		size_t sszp, ssz;
 		size_t *cactive;
@@ -785,6 +777,5 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
 			}
 		}
 	}
-#endif /* #ifdef JEMALLOC_STATS */
 	write_cb(cbopaque, "--- End jemalloc statistics ---\n");
 }
diff --git a/src/tcache.c b/src/tcache.c
index 31c329e..398fc0a 100644
--- a/src/tcache.c
+++ b/src/tcache.c
@@ -38,31 +38,22 @@ tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
 {
 	void *ret;
 
-	arena_tcache_fill_small(tcache->arena, tbin, binind
-#ifdef JEMALLOC_PROF
-	    , tcache->prof_accumbytes
-#endif
-	    );
-#ifdef JEMALLOC_PROF
-	tcache->prof_accumbytes = 0;
-#endif
+	arena_tcache_fill_small(tcache->arena, tbin, binind,
+	    config_prof ? tcache->prof_accumbytes : 0);
+	if (config_prof)
+		tcache->prof_accumbytes = 0;
 	ret = tcache_alloc_easy(tbin);
 
 	return (ret);
 }
 
 void
-tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
-    , tcache_t *tcache
-#endif
-    )
+tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
+    tcache_t *tcache)
 {
 	void *ptr;
 	unsigned i, nflush, ndeferred;
-#ifdef JEMALLOC_STATS
 	bool merged_stats = false;
-#endif
 
 	assert(binind < nbins);
 	assert(rem <= tbin->ncached);
@@ -74,25 +65,21 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
 		arena_t *arena = chunk->arena;
 		arena_bin_t *bin = &arena->bins[binind];
 
-#ifdef JEMALLOC_PROF
-		if (arena == tcache->arena) {
+		if (config_prof && arena == tcache->arena) {
 			malloc_mutex_lock(&arena->lock);
 			arena_prof_accum(arena, tcache->prof_accumbytes);
 			malloc_mutex_unlock(&arena->lock);
 			tcache->prof_accumbytes = 0;
 		}
-#endif
 
 		malloc_mutex_lock(&bin->lock);
-#ifdef JEMALLOC_STATS
-		if (arena == tcache->arena) {
+		if (config_stats && arena == tcache->arena) {
 			assert(merged_stats == false);
 			merged_stats = true;
 			bin->stats.nflushes++;
 			bin->stats.nrequests += tbin->tstats.nrequests;
 			tbin->tstats.nrequests = 0;
 		}
-#endif
 		ndeferred = 0;
 		for (i = 0; i < nflush; i++) {
 			ptr = tbin->avail[i];
@@ -117,8 +104,7 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
 		}
 		malloc_mutex_unlock(&bin->lock);
 	}
-#ifdef JEMALLOC_STATS
-	if (merged_stats == false) {
+	if (config_stats && merged_stats == false) {
 		/*
 		 * The flush loop didn't happen to flush to this thread's
 		 * arena, so the stats didn't get merged.  Manually do so now.
@@ -130,7 +116,6 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
 		tbin->tstats.nrequests = 0;
 		malloc_mutex_unlock(&bin->lock);
 	}
-#endif
 
 	memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
 	    rem * sizeof(void *));
@@ -140,17 +125,12 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
 }
 
 void
-tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
-    , tcache_t *tcache
-#endif
-    )
+tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
+    tcache_t *tcache)
 {
 	void *ptr;
 	unsigned i, nflush, ndeferred;
-#ifdef JEMALLOC_STATS
 	bool merged_stats = false;
-#endif
 
 	assert(binind < nhbins);
 	assert(rem <= tbin->ncached);
@@ -162,23 +142,21 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
 		arena_t *arena = chunk->arena;
 
 		malloc_mutex_lock(&arena->lock);
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
-		if (arena == tcache->arena) {
-#endif
-#ifdef JEMALLOC_PROF
-			arena_prof_accum(arena, tcache->prof_accumbytes);
-			tcache->prof_accumbytes = 0;
-#endif
-#ifdef JEMALLOC_STATS
-			merged_stats = true;
-			arena->stats.nrequests_large += tbin->tstats.nrequests;
-			arena->stats.lstats[binind - nbins].nrequests +=
-			    tbin->tstats.nrequests;
-			tbin->tstats.nrequests = 0;
-#endif
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
+		if ((config_prof || config_stats) && arena == tcache->arena) {
+			if (config_prof) {
+				arena_prof_accum(arena,
+				    tcache->prof_accumbytes);
+				tcache->prof_accumbytes = 0;
+			}
+			if (config_stats) {
+				merged_stats = true;
+				arena->stats.nrequests_large +=
+				    tbin->tstats.nrequests;
+				arena->stats.lstats[binind - nbins].nrequests +=
+				    tbin->tstats.nrequests;
+				tbin->tstats.nrequests = 0;
+			}
 		}
-#endif
 		ndeferred = 0;
 		for (i = 0; i < nflush; i++) {
 			ptr = tbin->avail[i];
@@ -199,8 +177,7 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
 		}
 		malloc_mutex_unlock(&arena->lock);
 	}
-#ifdef JEMALLOC_STATS
-	if (merged_stats == false) {
+	if (config_stats && merged_stats == false) {
 		/*
 		 * The flush loop didn't happen to flush to this thread's
 		 * arena, so the stats didn't get merged.  Manually do so now.
@@ -213,7 +190,6 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
 		tbin->tstats.nrequests = 0;
 		malloc_mutex_unlock(&arena->lock);
 	}
-#endif
 
 	memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
 	    rem * sizeof(void *));
@@ -254,13 +230,13 @@ tcache_create(arena_t *arena)
 	if (tcache == NULL)
 		return (NULL);
 
-#ifdef JEMALLOC_STATS
-	/* Link into list of extant tcaches. */
-	malloc_mutex_lock(&arena->lock);
-	ql_elm_new(tcache, link);
-	ql_tail_insert(&arena->tcache_ql, tcache, link);
-	malloc_mutex_unlock(&arena->lock);
-#endif
+	if (config_stats) {
+		/* Link into list of extant tcaches. */
+		malloc_mutex_lock(&arena->lock);
+		ql_elm_new(tcache, link);
+		ql_tail_insert(&arena->tcache_ql, tcache, link);
+		malloc_mutex_unlock(&arena->lock);
+	}
 
 	tcache->arena = arena;
 	assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
@@ -282,43 +258,32 @@ tcache_destroy(tcache_t *tcache)
 	unsigned i;
 	size_t tcache_size;
 
-#ifdef JEMALLOC_STATS
-	/* Unlink from list of extant tcaches. */
-	malloc_mutex_lock(&tcache->arena->lock);
-	ql_remove(&tcache->arena->tcache_ql, tcache, link);
-	malloc_mutex_unlock(&tcache->arena->lock);
-	tcache_stats_merge(tcache, tcache->arena);
-#endif
+	if (config_stats) {
+		/* Unlink from list of extant tcaches. */
+		malloc_mutex_lock(&tcache->arena->lock);
+		ql_remove(&tcache->arena->tcache_ql, tcache, link);
+		malloc_mutex_unlock(&tcache->arena->lock);
+		tcache_stats_merge(tcache, tcache->arena);
+	}
 
 	for (i = 0; i < nbins; i++) {
 		tcache_bin_t *tbin = &tcache->tbins[i];
-		tcache_bin_flush_small(tbin, i, 0
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
-		    , tcache
-#endif
-		    );
+		tcache_bin_flush_small(tbin, i, 0, tcache);
 
-#ifdef JEMALLOC_STATS
-		if (tbin->tstats.nrequests != 0) {
+		if (config_stats && tbin->tstats.nrequests != 0) {
 			arena_t *arena = tcache->arena;
 			arena_bin_t *bin = &arena->bins[i];
 			malloc_mutex_lock(&bin->lock);
 			bin->stats.nrequests += tbin->tstats.nrequests;
 			malloc_mutex_unlock(&bin->lock);
 		}
-#endif
 	}
 
 	for (; i < nhbins; i++) {
 		tcache_bin_t *tbin = &tcache->tbins[i];
-		tcache_bin_flush_large(tbin, i, 0
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
-		    , tcache
-#endif
-		    );
+		tcache_bin_flush_large(tbin, i, 0, tcache);
 
-#ifdef JEMALLOC_STATS
-		if (tbin->tstats.nrequests != 0) {
+		if (config_stats && tbin->tstats.nrequests != 0) {
 			arena_t *arena = tcache->arena;
 			malloc_mutex_lock(&arena->lock);
 			arena->stats.nrequests_large += tbin->tstats.nrequests;
@@ -326,16 +291,13 @@ tcache_destroy(tcache_t *tcache)
 			    tbin->tstats.nrequests;
 			malloc_mutex_unlock(&arena->lock);
 		}
-#endif
 	}
 
-#ifdef JEMALLOC_PROF
-	if (tcache->prof_accumbytes > 0) {
+	if (config_prof && tcache->prof_accumbytes > 0) {
 		malloc_mutex_lock(&tcache->arena->lock);
 		arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
 		malloc_mutex_unlock(&tcache->arena->lock);
 	}
-#endif
 
 	tcache_size = arena_salloc(tcache);
 	if (tcache_size <= small_maxclass) {
@@ -389,7 +351,6 @@ tcache_thread_cleanup(void *arg)
 	}
 }
 
-#ifdef JEMALLOC_STATS
 void
 tcache_stats_merge(tcache_t *tcache, arena_t *arena)
 {
@@ -413,7 +374,6 @@ tcache_stats_merge(tcache_t *tcache, arena_t *arena)
 		tbin->tstats.nrequests = 0;
 	}
 }
-#endif
 
 bool
 tcache_boot(void)
-- 
cgit v0.12