diff options
author | Jason Evans <je@fb.com> | 2012-02-11 04:22:09 (GMT) |
---|---|---|
committer | Jason Evans <je@fb.com> | 2012-02-11 04:22:09 (GMT) |
commit | 7372b15a31c63ac5cb9ed8aeabc2a0a3c005e8bf (patch) | |
tree | 9b0f1156e6aa61f50a01c90b72fdaefeabe414a8 /include/jemalloc | |
parent | b3bd885090230cc28add77c399b4ed440b760ca3 (diff) | |
download | jemalloc-7372b15a31c63ac5cb9ed8aeabc2a0a3c005e8bf.zip jemalloc-7372b15a31c63ac5cb9ed8aeabc2a0a3c005e8bf.tar.gz jemalloc-7372b15a31c63ac5cb9ed8aeabc2a0a3c005e8bf.tar.bz2 |
Reduce cpp conditional logic complexity.
Convert configuration-related cpp conditional logic to use static
constant variables, e.g.:
#ifdef JEMALLOC_DEBUG
[...]
#endif
becomes:
if (config_debug) {
[...]
}
The advantage is clearer, more concise code. The main disadvantage is
that data structures no longer have conditionally defined fields, so
they pay the cost of all fields regardless of whether they are used. In
practice, this is only a minor concern; config_stats will go away in an
upcoming change, and config_prof is the only other major feature that
depends on more than a few special-purpose fields.
Diffstat (limited to 'include/jemalloc')
-rw-r--r-- | include/jemalloc/internal/arena.h | 112 | ||||
-rw-r--r-- | include/jemalloc/internal/chunk.h | 6 | ||||
-rw-r--r-- | include/jemalloc/internal/chunk_dss.h | 2 | ||||
-rw-r--r-- | include/jemalloc/internal/chunk_swap.h | 4 | ||||
-rw-r--r-- | include/jemalloc/internal/ckh.h | 2 | ||||
-rw-r--r-- | include/jemalloc/internal/ctl.h | 6 | ||||
-rw-r--r-- | include/jemalloc/internal/extent.h | 6 | ||||
-rw-r--r-- | include/jemalloc/internal/huge.h | 4 | ||||
-rw-r--r-- | include/jemalloc/internal/jemalloc_internal.h.in | 169 | ||||
-rw-r--r-- | include/jemalloc/internal/mutex.h | 12 | ||||
-rw-r--r-- | include/jemalloc/internal/prof.h | 15 | ||||
-rw-r--r-- | include/jemalloc/internal/stats.h | 21 | ||||
-rw-r--r-- | include/jemalloc/internal/tcache.h | 114 | ||||
-rw-r--r-- | include/jemalloc/jemalloc_defs.h.in | 6 |
14 files changed, 235 insertions, 244 deletions
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h index b80c118..b6a5c23 100644 --- a/include/jemalloc/internal/arena.h +++ b/include/jemalloc/internal/arena.h @@ -16,11 +16,9 @@ #define SUBPAGE_CEILING(s) \ (((s) + SUBPAGE_MASK) & ~SUBPAGE_MASK) -#ifdef JEMALLOC_TINY - /* Smallest size class to support. */ -# define LG_TINY_MIN LG_SIZEOF_PTR -# define TINY_MIN (1U << LG_TINY_MIN) -#endif +/* Smallest size class to support. */ +#define LG_TINY_MIN LG_SIZEOF_PTR +#define TINY_MIN (1U << LG_TINY_MIN) /* * Maximum size class that is a multiple of the quantum, but not (necessarily) @@ -85,6 +83,15 @@ typedef struct arena_s arena_t; /* Each element of the chunk map corresponds to one page within the chunk. */ struct arena_chunk_map_s { +#ifndef JEMALLOC_PROF + /* + * Overlay prof_ctx in order to allow it to be referenced by dead code. + * Such antics aren't warranted for per arena data structures, but + * chunk map overhead accounts for a percentage of memory, rather than + * being just a fixed cost. + */ + union { +#endif union { /* * Linkage for run trees. There are two disjoint uses: @@ -103,9 +110,10 @@ struct arena_chunk_map_s { ql_elm(arena_chunk_map_t) ql_link; } u; -#ifdef JEMALLOC_PROF /* Profile counters, used for large object runs. */ prof_ctx_t *prof_ctx; +#ifndef JEMALLOC_PROF + }; /* union { ... }; */ #endif /* @@ -162,10 +170,8 @@ struct arena_chunk_map_s { * ssssssss ssssssss ssss---- ----D-LA */ size_t bits; -#ifdef JEMALLOC_PROF #define CHUNK_MAP_CLASS_SHIFT 4 #define CHUNK_MAP_CLASS_MASK ((size_t)0xff0U) -#endif #define CHUNK_MAP_FLAGS_MASK ((size_t)0xfU) #define CHUNK_MAP_DIRTY ((size_t)0x8U) #define CHUNK_MAP_UNZEROED ((size_t)0x4U) @@ -205,10 +211,8 @@ struct arena_chunk_s { typedef rb_tree(arena_chunk_t) arena_chunk_tree_t; struct arena_run_s { -#ifdef JEMALLOC_DEBUG uint32_t magic; # define ARENA_RUN_MAGIC 0x384adf93 -#endif /* Bin this run is associated with. */ arena_bin_t *bin; @@ -247,13 +251,11 @@ struct arena_bin_info_s { */ bitmap_info_t bitmap_info; -#ifdef JEMALLOC_PROF /* * Offset of first (prof_ctx_t *) in a run header for this bin's size - * class, or 0 if (opt_prof == false). + * class, or 0 if (config_prof == false || opt_prof == false). */ uint32_t ctx0_offset; -#endif /* Offset of first region in a run for this bin's size class. */ uint32_t reg0_offset; @@ -283,17 +285,13 @@ struct arena_bin_s { */ arena_run_tree_t runs; -#ifdef JEMALLOC_STATS /* Bin statistics. */ malloc_bin_stats_t stats; -#endif }; struct arena_s { -#ifdef JEMALLOC_DEBUG uint32_t magic; # define ARENA_MAGIC 0x947d3d24 -#endif /* This arena's index within the arenas array. */ unsigned ind; @@ -314,20 +312,14 @@ struct arena_s { */ malloc_mutex_t lock; -#ifdef JEMALLOC_STATS arena_stats_t stats; -# ifdef JEMALLOC_TCACHE /* * List of tcaches for extant threads associated with this arena. * Stats from these are merged incrementally, and at exit. */ ql_head(tcache_t) tcache_ql; -# endif -#endif -#ifdef JEMALLOC_PROF uint64_t prof_accumbytes; -#endif /* List of dirty-page-containing chunks this arena manages. */ ql_head(arena_chunk_t) chunks_dirty; @@ -455,35 +447,23 @@ extern size_t sspace_max; #define nlclasses (chunk_npages - map_bias) void arena_purge_all(arena_t *arena); -#ifdef JEMALLOC_PROF void arena_prof_accum(arena_t *arena, uint64_t accumbytes); -#endif -#ifdef JEMALLOC_TCACHE void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, - size_t binind -# ifdef JEMALLOC_PROF - , uint64_t prof_accumbytes -# endif - ); -#endif + size_t binind, uint64_t prof_accumbytes); void *arena_malloc_small(arena_t *arena, size_t size, bool zero); void *arena_malloc_large(arena_t *arena, size_t size, bool zero); void *arena_malloc(size_t size, bool zero); void *arena_palloc(arena_t *arena, size_t size, size_t alloc_size, size_t alignment, bool zero); size_t arena_salloc(const void *ptr); -#ifdef JEMALLOC_PROF void arena_prof_promoted(const void *ptr, size_t size); size_t arena_salloc_demote(const void *ptr); -#endif void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_chunk_map_t *mapelm); void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); -#ifdef JEMALLOC_STATS void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats); -#endif void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, bool zero); void *arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, @@ -499,10 +479,8 @@ bool arena_boot(void); size_t arena_bin_index(arena_t *arena, arena_bin_t *bin); unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr); -# ifdef JEMALLOC_PROF prof_ctx_t *arena_prof_ctx_get(const void *ptr); void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx); -# endif void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr); #endif @@ -521,7 +499,7 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) unsigned shift, diff, regind; size_t size; - dassert(run->magic == ARENA_RUN_MAGIC); + assert(run->magic == ARENA_RUN_MAGIC); /* * Freeing a pointer lower than region zero can cause assertion * failure. @@ -586,7 +564,6 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) return (regind); } -#ifdef JEMALLOC_PROF JEMALLOC_INLINE prof_ctx_t * arena_prof_ctx_get(const void *ptr) { @@ -594,6 +571,7 @@ arena_prof_ctx_get(const void *ptr) arena_chunk_t *chunk; size_t pageind, mapbits; + cassert(config_prof); assert(ptr != NULL); assert(CHUNK_ADDR2BASE(ptr) != ptr); @@ -612,7 +590,7 @@ arena_prof_ctx_get(const void *ptr) arena_bin_info_t *bin_info = &arena_bin_info[binind]; unsigned regind; - dassert(run->magic == ARENA_RUN_MAGIC); + assert(run->magic == ARENA_RUN_MAGIC); regind = arena_run_regind(run, bin_info, ptr); ret = *(prof_ctx_t **)((uintptr_t)run + bin_info->ctx0_offset + (regind * @@ -630,6 +608,7 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) arena_chunk_t *chunk; size_t pageind, mapbits; + cassert(config_prof); assert(ptr != NULL); assert(CHUNK_ADDR2BASE(ptr) != ptr); @@ -647,7 +626,7 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) arena_bin_info_t *bin_info; unsigned regind; - dassert(run->magic == ARENA_RUN_MAGIC); + assert(run->magic == ARENA_RUN_MAGIC); binind = arena_bin_index(chunk->arena, bin); bin_info = &arena_bin_info[binind]; regind = arena_run_regind(run, bin_info, ptr); @@ -659,7 +638,6 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) } else chunk->map[pageind-map_bias].prof_ctx = ctx; } -#endif JEMALLOC_INLINE void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr) @@ -668,7 +646,7 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr) arena_chunk_map_t *mapelm; assert(arena != NULL); - dassert(arena->magic == ARENA_MAGIC); + assert(arena->magic == ARENA_MAGIC); assert(chunk->arena == arena); assert(ptr != NULL); assert(CHUNK_ADDR2BASE(ptr) != ptr); @@ -678,63 +656,57 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr) assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0); if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) { /* Small allocation. */ -#ifdef JEMALLOC_TCACHE tcache_t *tcache; - if ((tcache = tcache_get()) != NULL) + if (config_tcache && (tcache = tcache_get()) != NULL) tcache_dalloc_small(tcache, ptr); else { -#endif arena_run_t *run; arena_bin_t *bin; run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - (mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT)); - dassert(run->magic == ARENA_RUN_MAGIC); + assert(run->magic == ARENA_RUN_MAGIC); bin = run->bin; -#ifdef JEMALLOC_DEBUG - { + if (config_debug) { size_t binind = arena_bin_index(arena, bin); - arena_bin_info_t *bin_info = + UNUSED arena_bin_info_t *bin_info = &arena_bin_info[binind]; assert(((uintptr_t)ptr - ((uintptr_t)run + (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_size == 0); } -#endif malloc_mutex_lock(&bin->lock); arena_dalloc_bin(arena, chunk, ptr, mapelm); malloc_mutex_unlock(&bin->lock); -#ifdef JEMALLOC_TCACHE } -#endif } else { -#ifdef JEMALLOC_TCACHE - size_t size = mapelm->bits & ~PAGE_MASK; - - assert(((uintptr_t)ptr & PAGE_MASK) == 0); - if (size <= tcache_maxclass) { - tcache_t *tcache; - - if ((tcache = tcache_get()) != NULL) - tcache_dalloc_large(tcache, ptr, size); - else { + if (config_tcache) { + size_t size = mapelm->bits & ~PAGE_MASK; + + assert(((uintptr_t)ptr & PAGE_MASK) == 0); + if (size <= tcache_maxclass) { + tcache_t *tcache; + + if ((tcache = tcache_get()) != NULL) + tcache_dalloc_large(tcache, ptr, size); + else { + malloc_mutex_lock(&arena->lock); + arena_dalloc_large(arena, chunk, ptr); + malloc_mutex_unlock(&arena->lock); + } + } else { malloc_mutex_lock(&arena->lock); arena_dalloc_large(arena, chunk, ptr); malloc_mutex_unlock(&arena->lock); } } else { + assert(((uintptr_t)ptr & PAGE_MASK) == 0); malloc_mutex_lock(&arena->lock); arena_dalloc_large(arena, chunk, ptr); malloc_mutex_unlock(&arena->lock); } -#else - assert(((uintptr_t)ptr & PAGE_MASK) == 0); - malloc_mutex_lock(&arena->lock); - arena_dalloc_large(arena, chunk, ptr); - malloc_mutex_unlock(&arena->lock); -#endif } } #endif diff --git a/include/jemalloc/internal/chunk.h b/include/jemalloc/internal/chunk.h index 54b6a3e..4cc1e80 100644 --- a/include/jemalloc/internal/chunk.h +++ b/include/jemalloc/internal/chunk.h @@ -28,20 +28,14 @@ #ifdef JEMALLOC_H_EXTERNS extern size_t opt_lg_chunk; -#ifdef JEMALLOC_SWAP extern bool opt_overcommit; -#endif -#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF)) /* Protects stats_chunks; currently not used for any other purpose. */ extern malloc_mutex_t chunks_mtx; /* Chunk statistics. */ extern chunk_stats_t stats_chunks; -#endif -#ifdef JEMALLOC_IVSALLOC extern rtree_t *chunks_rtree; -#endif extern size_t chunksize; extern size_t chunksize_mask; /* (chunksize - 1). */ diff --git a/include/jemalloc/internal/chunk_dss.h b/include/jemalloc/internal/chunk_dss.h index 6f00522..35cd461 100644 --- a/include/jemalloc/internal/chunk_dss.h +++ b/include/jemalloc/internal/chunk_dss.h @@ -1,4 +1,3 @@ -#ifdef JEMALLOC_DSS /******************************************************************************/ #ifdef JEMALLOC_H_TYPES @@ -27,4 +26,3 @@ bool chunk_dss_boot(void); #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ -#endif /* JEMALLOC_DSS */ diff --git a/include/jemalloc/internal/chunk_swap.h b/include/jemalloc/internal/chunk_swap.h index 9faa739..99a079e 100644 --- a/include/jemalloc/internal/chunk_swap.h +++ b/include/jemalloc/internal/chunk_swap.h @@ -1,4 +1,3 @@ -#ifdef JEMALLOC_SWAP /******************************************************************************/ #ifdef JEMALLOC_H_TYPES @@ -15,9 +14,7 @@ extern bool swap_enabled; extern bool swap_prezeroed; extern size_t swap_nfds; extern int *swap_fds; -#ifdef JEMALLOC_STATS extern size_t swap_avail; -#endif void *chunk_alloc_swap(size_t size, bool *zero); bool chunk_in_swap(void *chunk); @@ -31,4 +28,3 @@ bool chunk_swap_boot(void); #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ -#endif /* JEMALLOC_SWAP */ diff --git a/include/jemalloc/internal/ckh.h b/include/jemalloc/internal/ckh.h index 3e4ad4c..28f171c 100644 --- a/include/jemalloc/internal/ckh.h +++ b/include/jemalloc/internal/ckh.h @@ -30,10 +30,8 @@ struct ckhc_s { }; struct ckh_s { -#ifdef JEMALLOC_DEBUG #define CKH_MAGIC 0x3af2489d uint32_t magic; -#endif #ifdef CKH_COUNT /* Counters used to get an idea of performance. */ diff --git a/include/jemalloc/internal/ctl.h b/include/jemalloc/internal/ctl.h index f1f5eb7..31f9d99 100644 --- a/include/jemalloc/internal/ctl.h +++ b/include/jemalloc/internal/ctl.h @@ -32,7 +32,6 @@ struct ctl_arena_stats_s { unsigned nthreads; size_t pactive; size_t pdirty; -#ifdef JEMALLOC_STATS arena_stats_t astats; /* Aggregate stats for small size classes, based on bin stats. */ @@ -43,11 +42,9 @@ struct ctl_arena_stats_s { malloc_bin_stats_t *bstats; /* nbins elements. */ malloc_large_stats_t *lstats; /* nlclasses elements. */ -#endif }; struct ctl_stats_s { -#ifdef JEMALLOC_STATS size_t allocated; size_t active; size_t mapped; @@ -61,11 +58,8 @@ struct ctl_stats_s { uint64_t nmalloc; /* huge_nmalloc */ uint64_t ndalloc; /* huge_ndalloc */ } huge; -#endif ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ -#ifdef JEMALLOC_SWAP size_t swap_avail; -#endif }; #endif /* JEMALLOC_H_STRUCTS */ diff --git a/include/jemalloc/internal/extent.h b/include/jemalloc/internal/extent.h index 6fe9702..36af8be 100644 --- a/include/jemalloc/internal/extent.h +++ b/include/jemalloc/internal/extent.h @@ -9,18 +9,14 @@ typedef struct extent_node_s extent_node_t; /* Tree of extents. */ struct extent_node_s { -#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS)) /* Linkage for the size/address-ordered tree. */ rb_node(extent_node_t) link_szad; -#endif /* Linkage for the address-ordered tree. */ rb_node(extent_node_t) link_ad; -#ifdef JEMALLOC_PROF /* Profile counters, used for huge objects. */ prof_ctx_t *prof_ctx; -#endif /* Pointer to the extent that this tree node is responsible for. */ void *addr; @@ -34,9 +30,7 @@ typedef rb_tree(extent_node_t) extent_tree_t; /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS -#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS)) rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t) -#endif rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t) diff --git a/include/jemalloc/internal/huge.h b/include/jemalloc/internal/huge.h index 66544cf..3a6b0b8 100644 --- a/include/jemalloc/internal/huge.h +++ b/include/jemalloc/internal/huge.h @@ -9,12 +9,10 @@ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS -#ifdef JEMALLOC_STATS /* Huge allocation statistics. */ extern uint64_t huge_nmalloc; extern uint64_t huge_ndalloc; extern size_t huge_allocated; -#endif /* Protects chunk-related data structures. */ extern malloc_mutex_t huge_mtx; @@ -27,10 +25,8 @@ void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero); void huge_dalloc(void *ptr, bool unmap); size_t huge_salloc(const void *ptr); -#ifdef JEMALLOC_PROF prof_ctx_t *huge_prof_ctx_get(const void *ptr); void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx); -#endif bool huge_boot(void); #endif /* JEMALLOC_H_EXTERNS */ diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in index a44f097..8842e4b 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -35,6 +35,125 @@ #include "jemalloc/internal/private_namespace.h" +#ifdef JEMALLOC_CC_SILENCE +#define UNUSED JEMALLOC_ATTR(unused) +#else +#define UNUSED +#endif + +static const bool config_debug = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; +static const bool config_dss = +#ifdef JEMALLOC_DSS + true +#else + false +#endif + ; +static const bool config_dynamic_page_shift = +#ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT + true +#else + false +#endif + ; +static const bool config_fill = +#ifdef JEMALLOC_FILL + true +#else + false +#endif + ; +static const bool config_lazy_lock = +#ifdef JEMALLOC_LAZY_LOCK + true +#else + false +#endif + ; +static const bool config_prof = +#ifdef JEMALLOC_PROF + true +#else + false +#endif + ; +static const bool config_prof_libgcc = +#ifdef JEMALLOC_PROF_LIBGCC + true +#else + false +#endif + ; +static const bool config_prof_libunwind = +#ifdef JEMALLOC_PROF_LIBUNWIND + true +#else + false +#endif + ; +static const bool config_stats = +#ifdef JEMALLOC_STATS + true +#else + false +#endif + ; +static const bool config_swap = +#ifdef JEMALLOC_SWAP + true +#else + false +#endif + ; +static const bool config_sysv = +#ifdef JEMALLOC_SYSV + true +#else + false +#endif + ; +static const bool config_tcache = +#ifdef JEMALLOC_TCACHE + true +#else + false +#endif + ; +static const bool config_tiny = +#ifdef JEMALLOC_TINY + true +#else + false +#endif + ; +static const bool config_tls = +#ifdef JEMALLOC_TLS + true +#else + false +#endif + ; +static const bool config_xmalloc = +#ifdef JEMALLOC_XMALLOC + true +#else + false +#endif + ; +static const bool config_ivsalloc = +#ifdef JEMALLOC_IVSALLOC + true +#else + false +#endif + ; + #if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) #include <libkern/OSAtomic.h> #endif @@ -82,11 +201,11 @@ extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s); # endif #endif -#ifdef JEMALLOC_DEBUG -# define dassert(e) assert(e) -#else -# define dassert(e) -#endif +/* Use to assert a particular configuration, e.g., cassert(config_debug). */ +#define cassert(c) do { \ + if ((c) == false) \ + assert(false); \ +} while (0) /* * jemalloc can conceptually be broken into components (arena, tcache, etc.), @@ -265,30 +384,20 @@ extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s); #endif #include "jemalloc/internal/prof.h" -#ifdef JEMALLOC_STATS typedef struct { uint64_t allocated; uint64_t deallocated; } thread_allocated_t; -#endif #undef JEMALLOC_H_STRUCTS /******************************************************************************/ #define JEMALLOC_H_EXTERNS extern bool opt_abort; -#ifdef JEMALLOC_FILL extern bool opt_junk; -#endif -#ifdef JEMALLOC_SYSV extern bool opt_sysv; -#endif -#ifdef JEMALLOC_XMALLOC extern bool opt_xmalloc; -#endif -#ifdef JEMALLOC_FILL extern bool opt_zero; -#endif extern size_t opt_narenas; #ifdef DYNAMIC_PAGE_SHIFT @@ -327,8 +436,7 @@ extern __thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec")); extern arena_t **arenas; extern unsigned narenas; -#ifdef JEMALLOC_STATS -# ifndef NO_TLS +#ifndef NO_TLS extern __thread thread_allocated_t thread_allocated_tls; # define ALLOCATED_GET() (thread_allocated_tls.allocated) # define ALLOCATEDP_GET() (&thread_allocated_tls.allocated) @@ -338,10 +446,7 @@ extern __thread thread_allocated_t thread_allocated_tls; thread_allocated_tls.allocated += a; \ thread_allocated_tls.deallocated += d; \ } while (0) -# else -extern pthread_key_t thread_allocated_tsd; -thread_allocated_t *thread_allocated_get_hard(void); - +#else # define ALLOCATED_GET() (thread_allocated_get()->allocated) # define ALLOCATEDP_GET() (&thread_allocated_get()->allocated) # define DEALLOCATED_GET() (thread_allocated_get()->deallocated) @@ -351,8 +456,9 @@ thread_allocated_t *thread_allocated_get_hard(void); thread_allocated->allocated += (a); \ thread_allocated->deallocated += (d); \ } while (0) -# endif #endif +extern pthread_key_t thread_allocated_tsd; +thread_allocated_t *thread_allocated_get_hard(void); arena_t *arenas_extend(unsigned ind); arena_t *choose_arena_hard(void); @@ -403,9 +509,7 @@ size_t s2u(size_t size); size_t sa2u(size_t size, size_t alignment, size_t *run_size_p); void malloc_write(const char *s); arena_t *choose_arena(void); -# if (defined(JEMALLOC_STATS) && defined(NO_TLS)) thread_allocated_t *thread_allocated_get(void); -# endif #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) @@ -565,7 +669,6 @@ choose_arena(void) return (ret); } -#if (defined(JEMALLOC_STATS) && defined(NO_TLS)) JEMALLOC_INLINE thread_allocated_t * thread_allocated_get(void) { @@ -577,7 +680,6 @@ thread_allocated_get(void) return (thread_allocated); } #endif -#endif #include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/rtree.h" @@ -593,9 +695,7 @@ void *imalloc(size_t size); void *icalloc(size_t size); void *ipalloc(size_t usize, size_t alignment, bool zero); size_t isalloc(const void *ptr); -# ifdef JEMALLOC_IVSALLOC size_t ivsalloc(const void *ptr); -# endif void idalloc(void *ptr); void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, bool no_move); @@ -674,20 +774,18 @@ isalloc(const void *ptr) chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (chunk != ptr) { /* Region. */ - dassert(chunk->arena->magic == ARENA_MAGIC); + assert(chunk->arena->magic == ARENA_MAGIC); -#ifdef JEMALLOC_PROF - ret = arena_salloc_demote(ptr); -#else - ret = arena_salloc(ptr); -#endif + if (config_prof) + ret = arena_salloc_demote(ptr); + else + ret = arena_salloc(ptr); } else ret = huge_salloc(ptr); return (ret); } -#ifdef JEMALLOC_IVSALLOC JEMALLOC_INLINE size_t ivsalloc(const void *ptr) { @@ -698,7 +796,6 @@ ivsalloc(const void *ptr) return (isalloc(ptr)); } -#endif JEMALLOC_INLINE void idalloc(void *ptr) diff --git a/include/jemalloc/internal/mutex.h b/include/jemalloc/internal/mutex.h index 62947ce..6a7b4fc 100644 --- a/include/jemalloc/internal/mutex.h +++ b/include/jemalloc/internal/mutex.h @@ -3,14 +3,14 @@ #ifdef JEMALLOC_OSSPIN typedef OSSpinLock malloc_mutex_t; +#define MALLOC_MUTEX_INITIALIZER 0 #else typedef pthread_mutex_t malloc_mutex_t; -#endif - -#ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP -# define MALLOC_MUTEX_INITIALIZER PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP -#else -# define MALLOC_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER +# ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP +# define MALLOC_MUTEX_INITIALIZER PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP +# else +# define MALLOC_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER +# endif #endif #endif /* JEMALLOC_H_TYPES */ diff --git a/include/jemalloc/internal/prof.h b/include/jemalloc/internal/prof.h index e9064ba..d470080 100644 --- a/include/jemalloc/internal/prof.h +++ b/include/jemalloc/internal/prof.h @@ -1,4 +1,3 @@ -#ifdef JEMALLOC_PROF /******************************************************************************/ #ifdef JEMALLOC_H_TYPES @@ -297,6 +296,8 @@ prof_sample_threshold_update(prof_tdata_t *prof_tdata) uint64_t r; double u; + cassert(config_prof); + /* * Compute sample threshold as a geometrically distributed random * variable with mean (2^opt_lg_prof_sample). @@ -329,12 +330,13 @@ prof_ctx_get(const void *ptr) prof_ctx_t *ret; arena_chunk_t *chunk; + cassert(config_prof); assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (chunk != ptr) { /* Region. */ - dassert(chunk->arena->magic == ARENA_MAGIC); + assert(chunk->arena->magic == ARENA_MAGIC); ret = arena_prof_ctx_get(ptr); } else @@ -348,12 +350,13 @@ prof_ctx_set(const void *ptr, prof_ctx_t *ctx) { arena_chunk_t *chunk; + cassert(config_prof); assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (chunk != ptr) { /* Region. */ - dassert(chunk->arena->magic == ARENA_MAGIC); + assert(chunk->arena->magic == ARENA_MAGIC); arena_prof_ctx_set(ptr, ctx); } else @@ -365,6 +368,7 @@ prof_sample_accum_update(size_t size) { prof_tdata_t *prof_tdata; + cassert(config_prof); /* Sampling logic is unnecessary if the interval is 1. */ assert(opt_lg_prof_sample != 0); @@ -391,6 +395,7 @@ JEMALLOC_INLINE void prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt) { + cassert(config_prof); assert(ptr != NULL); assert(size == isalloc(ptr)); @@ -437,6 +442,7 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt, { prof_thr_cnt_t *told_cnt; + cassert(config_prof); assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U); if (ptr != NULL) { @@ -510,6 +516,8 @@ prof_free(const void *ptr, size_t size) { prof_ctx_t *ctx = prof_ctx_get(ptr); + cassert(config_prof); + if ((uintptr_t)ctx > (uintptr_t)1) { assert(size == isalloc(ptr)); prof_thr_cnt_t *tcnt = prof_lookup(ctx->bt); @@ -544,4 +552,3 @@ prof_free(const void *ptr, size_t size) #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ -#endif /* JEMALLOC_PROF */ diff --git a/include/jemalloc/internal/stats.h b/include/jemalloc/internal/stats.h index 2a9b31d..64ba4bd 100644 --- a/include/jemalloc/internal/stats.h +++ b/include/jemalloc/internal/stats.h @@ -3,23 +3,16 @@ #define UMAX2S_BUFSIZE 65 -#ifdef JEMALLOC_STATS typedef struct tcache_bin_stats_s tcache_bin_stats_t; typedef struct malloc_bin_stats_s malloc_bin_stats_t; typedef struct malloc_large_stats_s malloc_large_stats_t; typedef struct arena_stats_s arena_stats_t; -#endif -#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF)) typedef struct chunk_stats_s chunk_stats_t; -#endif #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS -#ifdef JEMALLOC_STATS - -#ifdef JEMALLOC_TCACHE struct tcache_bin_stats_s { /* * Number of allocation requests that corresponded to the size of this @@ -27,7 +20,6 @@ struct tcache_bin_stats_s { */ uint64_t nrequests; }; -#endif struct malloc_bin_stats_s { /* @@ -52,13 +44,11 @@ struct malloc_bin_stats_s { */ uint64_t nrequests; -#ifdef JEMALLOC_TCACHE /* Number of tcache fills from this bin. */ uint64_t nfills; /* Number of tcache flushes to this bin. */ uint64_t nflushes; -#endif /* Total number of runs created for this bin's size class. */ uint64_t nruns; @@ -127,14 +117,10 @@ struct arena_stats_s { */ malloc_large_stats_t *lstats; }; -#endif /* JEMALLOC_STATS */ -#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF)) struct chunk_stats_s { -# ifdef JEMALLOC_STATS /* Number of chunks that were allocated. */ uint64_t nchunks; -# endif /* High-water mark for number of chunks allocated. */ size_t highchunks; @@ -146,7 +132,6 @@ struct chunk_stats_s { */ size_t curchunks; }; -#endif /* JEMALLOC_STATS */ #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ @@ -154,24 +139,19 @@ struct chunk_stats_s { extern bool opt_stats_print; -#ifdef JEMALLOC_STATS extern size_t stats_cactive; -#endif char *u2s(uint64_t x, unsigned base, char *s); -#ifdef JEMALLOC_STATS void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque, const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4)); void malloc_printf(const char *format, ...) JEMALLOC_ATTR(format(printf, 1, 2)); -#endif void stats_print(void (*write)(void *, const char *), void *cbopaque, const char *opts); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES -#ifdef JEMALLOC_STATS #ifndef JEMALLOC_ENABLE_INLINE size_t stats_cactive_get(void); @@ -202,6 +182,5 @@ stats_cactive_sub(size_t size) } #endif -#endif /* JEMALLOC_STATS */ #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ diff --git a/include/jemalloc/internal/tcache.h b/include/jemalloc/internal/tcache.h index da3c68c..0855d32 100644 --- a/include/jemalloc/internal/tcache.h +++ b/include/jemalloc/internal/tcache.h @@ -42,9 +42,7 @@ struct tcache_bin_info_s { }; struct tcache_bin_s { -# ifdef JEMALLOC_STATS tcache_bin_stats_t tstats; -# endif int low_water; /* Min # cached since last GC. */ unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */ unsigned ncached; /* # of cached objects. */ @@ -52,12 +50,8 @@ struct tcache_bin_s { }; struct tcache_s { -# ifdef JEMALLOC_STATS ql_elm(tcache_t) link; /* Used for aggregating stats. */ -# endif -# ifdef JEMALLOC_PROF uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */ -# endif arena_t *arena; /* This thread's arena. */ unsigned ev_cnt; /* Event count since incremental GC. */ unsigned next_gc_bin; /* Next bin to GC. */ @@ -109,23 +103,15 @@ extern size_t tcache_maxclass; /* Number of tcache allocation/deallocation events between incremental GCs. */ extern unsigned tcache_gc_incr; -void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem -#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF)) - , tcache_t *tcache -#endif - ); -void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem -#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF)) - , tcache_t *tcache -#endif - ); +void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem, + tcache_t *tcache); +void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem, + tcache_t *tcache); tcache_t *tcache_create(arena_t *arena); void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind); void tcache_destroy(tcache_t *tcache); -#ifdef JEMALLOC_STATS void tcache_stats_merge(tcache_t *tcache, arena_t *arena); -#endif bool tcache_boot(void); #endif /* JEMALLOC_H_EXTERNS */ @@ -195,19 +181,11 @@ tcache_event(tcache_t *tcache) if (binind < nbins) { tcache_bin_flush_small(tbin, binind, tbin->ncached - tbin->low_water + - (tbin->low_water >> 2) -#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF)) - , tcache -#endif - ); + (tbin->low_water >> 2), tcache); } else { tcache_bin_flush_large(tbin, binind, tbin->ncached - tbin->low_water + - (tbin->low_water >> 2) -#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF)) - , tcache -#endif - ); + (tbin->low_water >> 2), tcache); } /* * Reduce fill count by 2X. Limit lg_fill_div such that @@ -268,21 +246,19 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero) assert(arena_salloc(ret) == arena_bin_info[binind].reg_size); if (zero == false) { -#ifdef JEMALLOC_FILL - if (opt_junk) - memset(ret, 0xa5, size); - else if (opt_zero) - memset(ret, 0, size); -#endif + if (config_fill) { + if (opt_junk) + memset(ret, 0xa5, size); + else if (opt_zero) + memset(ret, 0, size); + } } else memset(ret, 0, size); -#ifdef JEMALLOC_STATS - tbin->tstats.nrequests++; -#endif -#ifdef JEMALLOC_PROF - tcache->prof_accumbytes += arena_bin_info[binind].reg_size; -#endif + if (config_stats) + tbin->tstats.nrequests++; + if (config_prof) + tcache->prof_accumbytes += arena_bin_info[binind].reg_size; tcache_event(tcache); return (ret); } @@ -309,28 +285,28 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero) if (ret == NULL) return (NULL); } else { -#ifdef JEMALLOC_PROF - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret); - size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >> - PAGE_SHIFT); - chunk->map[pageind-map_bias].bits &= ~CHUNK_MAP_CLASS_MASK; -#endif + if (config_prof) { + arena_chunk_t *chunk = + (arena_chunk_t *)CHUNK_ADDR2BASE(ret); + size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >> + PAGE_SHIFT); + chunk->map[pageind-map_bias].bits &= + ~CHUNK_MAP_CLASS_MASK; + } if (zero == false) { -#ifdef JEMALLOC_FILL - if (opt_junk) - memset(ret, 0xa5, size); - else if (opt_zero) - memset(ret, 0, size); -#endif + if (config_fill) { + if (opt_junk) + memset(ret, 0xa5, size); + else if (opt_zero) + memset(ret, 0, size); + } } else memset(ret, 0, size); -#ifdef JEMALLOC_STATS - tbin->tstats.nrequests++; -#endif -#ifdef JEMALLOC_PROF - tcache->prof_accumbytes += size; -#endif + if (config_stats) + tbin->tstats.nrequests++; + if (config_prof) + tcache->prof_accumbytes += size; } tcache_event(tcache); @@ -357,26 +333,20 @@ tcache_dalloc_small(tcache_t *tcache, void *ptr) mapelm = &chunk->map[pageind-map_bias]; run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - (mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT)); - dassert(run->magic == ARENA_RUN_MAGIC); + assert(run->magic == ARENA_RUN_MAGIC); bin = run->bin; binind = ((uintptr_t)bin - (uintptr_t)&arena->bins) / sizeof(arena_bin_t); assert(binind < nbins); -#ifdef JEMALLOC_FILL - if (opt_junk) + if (config_fill && opt_junk) memset(ptr, 0x5a, arena_bin_info[binind].reg_size); -#endif tbin = &tcache->tbins[binind]; tbin_info = &tcache_bin_info[binind]; if (tbin->ncached == tbin_info->ncached_max) { tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >> - 1) -#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF)) - , tcache -#endif - ); + 1), tcache); } assert(tbin->ncached < tbin_info->ncached_max); tbin->avail[tbin->ncached] = ptr; @@ -403,20 +373,14 @@ tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size) pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT; binind = nbins + (size >> PAGE_SHIFT) - 1; -#ifdef JEMALLOC_FILL - if (opt_junk) + if (config_fill && opt_junk) memset(ptr, 0x5a, size); -#endif tbin = &tcache->tbins[binind]; tbin_info = &tcache_bin_info[binind]; if (tbin->ncached == tbin_info->ncached_max) { tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >> - 1) -#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF)) - , tcache -#endif - ); + 1), tcache); } assert(tbin->ncached < tbin_info->ncached_max); tbin->avail[tbin->ncached] = ptr; diff --git a/include/jemalloc/jemalloc_defs.h.in b/include/jemalloc/jemalloc_defs.h.in index 9ac7e1c..d8052e2 100644 --- a/include/jemalloc/jemalloc_defs.h.in +++ b/include/jemalloc/jemalloc_defs.h.in @@ -48,9 +48,11 @@ /* Defined if __attribute__((...)) syntax is supported. */ #undef JEMALLOC_HAVE_ATTR #ifdef JEMALLOC_HAVE_ATTR -# define JEMALLOC_ATTR(s) __attribute__((s)) +# define JEMALLOC_CATTR(s, a) __attribute__((s)) +# define JEMALLOC_ATTR(s) JEMALLOC_CATTR(s,) #else -# define JEMALLOC_ATTR(s) +# define JEMALLOC_CATTR(s, a) a +# define JEMALLOC_ATTR(s) JEMALLOC_CATTR(s,) #endif /* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */ |