summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2017-01-16 00:56:30 (GMT)
committerJason Evans <jasone@canonware.com>2017-01-21 05:43:07 (GMT)
commitc4c2592c834d8a37beb0a0d53842095160cbf9ee (patch)
treee4717ea6a2f13926dadd74ea1fc83f9742f77968
parent5154ff32ee8c37bacb6afd8a07b923eb33228357 (diff)
downloadjemalloc-c4c2592c834d8a37beb0a0d53842095160cbf9ee.zip
jemalloc-c4c2592c834d8a37beb0a0d53842095160cbf9ee.tar.gz
jemalloc-c4c2592c834d8a37beb0a0d53842095160cbf9ee.tar.bz2
Update brace style.
Add braces around single-line blocks, and remove line breaks before function-opening braces. This resolves #537.
-rw-r--r--include/jemalloc/internal/arena_inlines_a.h27
-rw-r--r--include/jemalloc/internal/arena_inlines_b.h59
-rw-r--r--include/jemalloc/internal/assert.h3
-rw-r--r--include/jemalloc/internal/atomic_inlines.h180
-rw-r--r--include/jemalloc/internal/base_inlines.h3
-rw-r--r--include/jemalloc/internal/bitmap_inlines.h24
-rw-r--r--include/jemalloc/internal/extent_inlines.h111
-rw-r--r--include/jemalloc/internal/hash_inlines.h30
-rw-r--r--include/jemalloc/internal/jemalloc_internal.h.in172
-rw-r--r--include/jemalloc/internal/jemalloc_internal_decls.h3
-rw-r--r--include/jemalloc/internal/mutex_inlines.h18
-rw-r--r--include/jemalloc/internal/ph.h50
-rw-r--r--include/jemalloc/internal/prng_inlines.h27
-rw-r--r--include/jemalloc/internal/prof_inlines.h59
-rw-r--r--include/jemalloc/internal/qr.h6
-rw-r--r--include/jemalloc/internal/rb.h3
-rw-r--r--include/jemalloc/internal/rtree_inlines.h99
-rw-r--r--include/jemalloc/internal/spin_inlines.h12
-rw-r--r--include/jemalloc/internal/tcache_inlines.h90
-rw-r--r--include/jemalloc/internal/ticker_inlines.h15
-rw-r--r--include/jemalloc/internal/tsd_inlines.h42
-rw-r--r--include/jemalloc/internal/tsd_types.h161
-rw-r--r--include/jemalloc/internal/util_inlines.h48
-rw-r--r--include/jemalloc/internal/util_types.h3
-rw-r--r--include/jemalloc/internal/witness_inlines.h75
-rw-r--r--include/msvc_compat/strings.h20
-rw-r--r--msvc/projects/vc2015/test_threads/test_threads.cpp3
-rw-r--r--msvc/projects/vc2015/test_threads/test_threads_main.cpp3
-rw-r--r--src/arena.c516
-rw-r--r--src/base.c99
-rw-r--r--src/bitmap.c30
-rw-r--r--src/ckh.c101
-rw-r--r--src/ctl.c344
-rw-r--r--src/extent.c375
-rw-r--r--src/extent_dss.c68
-rw-r--r--src/extent_mmap.c22
-rw-r--r--src/jemalloc.c497
-rw-r--r--src/jemalloc_cpp.cpp33
-rw-r--r--src/large.c87
-rw-r--r--src/mutex.c40
-rw-r--r--src/nstime.c57
-rw-r--r--src/pages.c85
-rw-r--r--src/prof.c568
-rw-r--r--src/rtree.c70
-rw-r--r--src/stats.c45
-rw-r--r--src/tcache.c120
-rw-r--r--src/tsd.c42
-rw-r--r--src/util.c96
-rw-r--r--src/witness.c27
-rw-r--r--src/zone.c96
-rw-r--r--test/include/test/SFMT.h34
-rw-r--r--test/include/test/btalloc.h7
-rw-r--r--test/include/test/extent_hooks.h51
-rw-r--r--test/include/test/jemalloc_test.h.in3
-rw-r--r--test/include/test/math.h50
-rw-r--r--test/include/test/mq.h27
-rw-r--r--test/include/test/test.h3
-rw-r--r--test/integration/MALLOCX_ARENA.c12
-rw-r--r--test/integration/aligned_alloc.c21
-rw-r--r--test/integration/allocated.c24
-rw-r--r--test/integration/cpp/basic.cpp6
-rw-r--r--test/integration/extent.c18
-rw-r--r--test/integration/mallocx.c42
-rw-r--r--test/integration/overflow.c6
-rw-r--r--test/integration/posix_memalign.c21
-rw-r--r--test/integration/rallocx.c33
-rw-r--r--test/integration/sdallocx.c15
-rw-r--r--test/integration/thread_arena.c9
-rw-r--r--test/integration/thread_tcache_enabled.c12
-rw-r--r--test/integration/xallocx.c63
-rw-r--r--test/src/btalloc.c3
-rw-r--r--test/src/mq.c3
-rw-r--r--test/src/mtx.c19
-rw-r--r--test/src/test.c30
-rw-r--r--test/src/thd.c18
-rw-r--r--test/src/timer.c18
-rw-r--r--test/stress/microbench.c57
-rw-r--r--test/unit/SFMT.c15
-rw-r--r--test/unit/a0.c6
-rw-r--r--test/unit/arena_reset.c69
-rw-r--r--test/unit/atomic.c18
-rw-r--r--test/unit/base.c12
-rw-r--r--test/unit/bitmap.c51
-rw-r--r--test/unit/ckh.c18
-rw-r--r--test/unit/decay.c27
-rw-r--r--test/unit/extent_quantize.c12
-rw-r--r--test/unit/fork.c9
-rw-r--r--test/unit/hash.c27
-rw-r--r--test/unit/junk.c33
-rw-r--r--test/unit/mallctl.c75
-rw-r--r--test/unit/math.c33
-rw-r--r--test/unit/mq.c21
-rw-r--r--test/unit/mtx.c18
-rw-r--r--test/unit/nstime.c36
-rw-r--r--test/unit/pack.c27
-rw-r--r--test/unit/pages.c6
-rw-r--r--test/unit/ph.c57
-rw-r--r--test/unit/prng.c51
-rw-r--r--test/unit/prof_accum.c18
-rw-r--r--test/unit/prof_active.c27
-rw-r--r--test/unit/prof_gdump.c9
-rw-r--r--test/unit/prof_idump.c9
-rw-r--r--test/unit/prof_reset.c36
-rw-r--r--test/unit/prof_tctx.c6
-rw-r--r--test/unit/prof_thread_name.c21
-rw-r--r--test/unit/ql.c42
-rw-r--r--test/unit/qr.c42
-rw-r--r--test/unit/rb.c84
-rw-r--r--test/unit/rtree.c39
-rw-r--r--test/unit/size_classes.c15
-rw-r--r--test/unit/slab.c6
-rw-r--r--test/unit/smoothstep.c15
-rw-r--r--test/unit/stats.c30
-rw-r--r--test/unit/stats_print.c51
-rw-r--r--test/unit/ticker.c12
-rw-r--r--test/unit/tsd.c15
-rw-r--r--test/unit/util.c24
-rw-r--r--test/unit/witness.c40
-rw-r--r--test/unit/zero.c12
119 files changed, 2971 insertions, 3572 deletions
diff --git a/include/jemalloc/internal/arena_inlines_a.h b/include/jemalloc/internal/arena_inlines_a.h
index d241b8a..3c2b9b0 100644
--- a/include/jemalloc/internal/arena_inlines_a.h
+++ b/include/jemalloc/internal/arena_inlines_a.h
@@ -14,32 +14,27 @@ bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
JEMALLOC_INLINE unsigned
-arena_ind_get(const arena_t *arena)
-{
+arena_ind_get(const arena_t *arena) {
return (base_ind_get(arena->base));
}
JEMALLOC_INLINE void
-arena_internal_add(arena_t *arena, size_t size)
-{
+arena_internal_add(arena_t *arena, size_t size) {
atomic_add_zu(&arena->stats.internal, size);
}
JEMALLOC_INLINE void
-arena_internal_sub(arena_t *arena, size_t size)
-{
+arena_internal_sub(arena_t *arena, size_t size) {
atomic_sub_zu(&arena->stats.internal, size);
}
JEMALLOC_INLINE size_t
-arena_internal_get(arena_t *arena)
-{
+arena_internal_get(arena_t *arena) {
return (atomic_read_zu(&arena->stats.internal));
}
JEMALLOC_INLINE bool
-arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
-{
+arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes) {
cassert(config_prof);
assert(prof_interval != 0);
@@ -52,22 +47,22 @@ arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
}
JEMALLOC_INLINE bool
-arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
-{
+arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes) {
cassert(config_prof);
- if (likely(prof_interval == 0))
+ if (likely(prof_interval == 0)) {
return (false);
+ }
return (arena_prof_accum_impl(arena, accumbytes));
}
JEMALLOC_INLINE bool
-arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes)
-{
+arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) {
cassert(config_prof);
- if (likely(prof_interval == 0))
+ if (likely(prof_interval == 0)) {
return (false);
+ }
{
bool ret;
diff --git a/include/jemalloc/internal/arena_inlines_b.h b/include/jemalloc/internal/arena_inlines_b.h
index 9461466..5772781 100644
--- a/include/jemalloc/internal/arena_inlines_b.h
+++ b/include/jemalloc/internal/arena_inlines_b.h
@@ -23,39 +23,37 @@ void arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
JEMALLOC_INLINE szind_t
-arena_bin_index(arena_t *arena, arena_bin_t *bin)
-{
+arena_bin_index(arena_t *arena, arena_bin_t *bin) {
szind_t binind = (szind_t)(bin - arena->bins);
assert(binind < NBINS);
return (binind);
}
JEMALLOC_INLINE prof_tctx_t *
-arena_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
-{
+arena_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr) {
cassert(config_prof);
assert(ptr != NULL);
- if (unlikely(!extent_slab_get(extent)))
+ if (unlikely(!extent_slab_get(extent))) {
return (large_prof_tctx_get(tsdn, extent));
+ }
return ((prof_tctx_t *)(uintptr_t)1U);
}
JEMALLOC_INLINE void
arena_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
- size_t usize, prof_tctx_t *tctx)
-{
+ size_t usize, prof_tctx_t *tctx) {
cassert(config_prof);
assert(ptr != NULL);
- if (unlikely(!extent_slab_get(extent)))
+ if (unlikely(!extent_slab_get(extent))) {
large_prof_tctx_set(tsdn, extent, tctx);
+ }
}
JEMALLOC_INLINE void
arena_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
- prof_tctx_t *tctx)
-{
+ prof_tctx_t *tctx) {
cassert(config_prof);
assert(ptr != NULL);
assert(!extent_slab_get(extent));
@@ -64,24 +62,25 @@ arena_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
}
JEMALLOC_ALWAYS_INLINE void
-arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks)
-{
+arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
tsd_t *tsd;
ticker_t *decay_ticker;
- if (unlikely(tsdn_null(tsdn)))
+ if (unlikely(tsdn_null(tsdn))) {
return;
+ }
tsd = tsdn_tsd(tsdn);
decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena));
- if (unlikely(decay_ticker == NULL))
+ if (unlikely(decay_ticker == NULL)) {
return;
- if (unlikely(ticker_ticks(decay_ticker, nticks)))
+ }
+ if (unlikely(ticker_ticks(decay_ticker, nticks))) {
arena_purge(tsdn, arena, false);
+ }
}
JEMALLOC_ALWAYS_INLINE void
-arena_decay_tick(tsdn_t *tsdn, arena_t *arena)
-{
+arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_assert_not_owner(tsdn, &arena->lock);
arena_decay_ticks(tsdn, arena, 1);
@@ -89,8 +88,7 @@ arena_decay_tick(tsdn_t *tsdn, arena_t *arena)
JEMALLOC_ALWAYS_INLINE void *
arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
- tcache_t *tcache, bool slow_path)
-{
+ tcache_t *tcache, bool slow_path) {
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(size != 0);
@@ -111,31 +109,29 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
}
JEMALLOC_ALWAYS_INLINE arena_t *
-arena_aalloc(tsdn_t *tsdn, const void *ptr)
-{
+arena_aalloc(tsdn_t *tsdn, const void *ptr) {
return (extent_arena_get(iealloc(tsdn, ptr)));
}
/* Return the size of the allocation pointed to by ptr. */
JEMALLOC_ALWAYS_INLINE size_t
-arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
-{
+arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr) {
size_t ret;
assert(ptr != NULL);
- if (likely(extent_slab_get(extent)))
+ if (likely(extent_slab_get(extent))) {
ret = index2size(extent_slab_data_get_const(extent)->binind);
- else
+ } else {
ret = large_salloc(tsdn, extent);
+ }
return (ret);
}
JEMALLOC_ALWAYS_INLINE void
arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
- bool slow_path)
-{
+ bool slow_path) {
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
@@ -160,15 +156,15 @@ arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
tcache_dalloc_large(tsdn_tsd(tsdn), tcache,
ptr, usize, slow_path);
}
- } else
+ } else {
large_dalloc(tsdn, extent);
+ }
}
}
JEMALLOC_ALWAYS_INLINE void
arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
- tcache_t *tcache, bool slow_path)
-{
+ tcache_t *tcache, bool slow_path) {
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
@@ -192,8 +188,9 @@ arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
size, slow_path);
}
- } else
+ } else {
large_dalloc(tsdn, extent);
+ }
}
}
diff --git a/include/jemalloc/internal/assert.h b/include/jemalloc/internal/assert.h
index 6f8f7eb..5da0ef4 100644
--- a/include/jemalloc/internal/assert.h
+++ b/include/jemalloc/internal/assert.h
@@ -37,8 +37,9 @@
#ifndef assert_not_implemented
#define assert_not_implemented(e) do { \
- if (unlikely(config_debug && !(e))) \
+ if (unlikely(config_debug && !(e))) { \
not_implemented(); \
+ } \
} while (0)
#endif
diff --git a/include/jemalloc/internal/atomic_inlines.h b/include/jemalloc/internal/atomic_inlines.h
index 89d1b35..790a08a 100644
--- a/include/jemalloc/internal/atomic_inlines.h
+++ b/include/jemalloc/internal/atomic_inlines.h
@@ -53,8 +53,7 @@ void atomic_write_u(unsigned *p, unsigned x);
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
# if (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE uint64_t
-atomic_add_u64(uint64_t *p, uint64_t x)
-{
+atomic_add_u64(uint64_t *p, uint64_t x) {
uint64_t t = x;
asm volatile (
@@ -67,8 +66,7 @@ atomic_add_u64(uint64_t *p, uint64_t x)
}
JEMALLOC_INLINE uint64_t
-atomic_sub_u64(uint64_t *p, uint64_t x)
-{
+atomic_sub_u64(uint64_t *p, uint64_t x) {
uint64_t t;
x = (uint64_t)(-(int64_t)x);
@@ -83,8 +81,7 @@ atomic_sub_u64(uint64_t *p, uint64_t x)
}
JEMALLOC_INLINE bool
-atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
-{
+atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s) {
uint8_t success;
asm volatile (
@@ -99,8 +96,7 @@ atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
}
JEMALLOC_INLINE void
-atomic_write_u64(uint64_t *p, uint64_t x)
-{
+atomic_write_u64(uint64_t *p, uint64_t x) {
asm volatile (
"xchgq %1, %0;" /* Lock is implied by xchgq. */
: "=m" (*p), "+r" (x) /* Outputs. */
@@ -110,36 +106,31 @@ atomic_write_u64(uint64_t *p, uint64_t x)
}
# elif (defined(JEMALLOC_C11ATOMICS))
JEMALLOC_INLINE uint64_t
-atomic_add_u64(uint64_t *p, uint64_t x)
-{
+atomic_add_u64(uint64_t *p, uint64_t x) {
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (atomic_fetch_add(a, x) + x);
}
JEMALLOC_INLINE uint64_t
-atomic_sub_u64(uint64_t *p, uint64_t x)
-{
+atomic_sub_u64(uint64_t *p, uint64_t x) {
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (atomic_fetch_sub(a, x) - x);
}
JEMALLOC_INLINE bool
-atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
-{
+atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s) {
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (!atomic_compare_exchange_strong(a, &c, s));
}
JEMALLOC_INLINE void
-atomic_write_u64(uint64_t *p, uint64_t x)
-{
+atomic_write_u64(uint64_t *p, uint64_t x) {
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
atomic_store(a, x);
}
# elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint64_t
-atomic_add_u64(uint64_t *p, uint64_t x)
-{
+atomic_add_u64(uint64_t *p, uint64_t x) {
/*
* atomic_fetchadd_64() doesn't exist, but we only ever use this
* function on LP64 systems, so atomic_fetchadd_long() will do.
@@ -150,50 +141,43 @@ atomic_add_u64(uint64_t *p, uint64_t x)
}
JEMALLOC_INLINE uint64_t
-atomic_sub_u64(uint64_t *p, uint64_t x)
-{
+atomic_sub_u64(uint64_t *p, uint64_t x) {
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
}
JEMALLOC_INLINE bool
-atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
-{
+atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s) {
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s));
}
JEMALLOC_INLINE void
-atomic_write_u64(uint64_t *p, uint64_t x)
-{
+atomic_write_u64(uint64_t *p, uint64_t x) {
assert(sizeof(uint64_t) == sizeof(unsigned long));
atomic_store_rel_long(p, x);
}
# elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint64_t
-atomic_add_u64(uint64_t *p, uint64_t x)
-{
+atomic_add_u64(uint64_t *p, uint64_t x) {
return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
}
JEMALLOC_INLINE uint64_t
-atomic_sub_u64(uint64_t *p, uint64_t x)
-{
+atomic_sub_u64(uint64_t *p, uint64_t x) {
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
}
JEMALLOC_INLINE bool
-atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
-{
+atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s) {
return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p));
}
JEMALLOC_INLINE void
-atomic_write_u64(uint64_t *p, uint64_t x)
-{
+atomic_write_u64(uint64_t *p, uint64_t x) {
uint64_t o;
/*The documented OSAtomic*() API does not expose an atomic exchange. */
@@ -203,20 +187,17 @@ atomic_write_u64(uint64_t *p, uint64_t x)
}
# elif (defined(_MSC_VER))
JEMALLOC_INLINE uint64_t
-atomic_add_u64(uint64_t *p, uint64_t x)
-{
+atomic_add_u64(uint64_t *p, uint64_t x) {
return (InterlockedExchangeAdd64(p, x) + x);
}
JEMALLOC_INLINE uint64_t
-atomic_sub_u64(uint64_t *p, uint64_t x)
-{
+atomic_sub_u64(uint64_t *p, uint64_t x) {
return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x);
}
JEMALLOC_INLINE bool
-atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
-{
+atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s) {
uint64_t o;
o = InterlockedCompareExchange64(p, s, c);
@@ -224,33 +205,28 @@ atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
}
JEMALLOC_INLINE void
-atomic_write_u64(uint64_t *p, uint64_t x)
-{
+atomic_write_u64(uint64_t *p, uint64_t x) {
InterlockedExchange64(p, x);
}
# elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
JEMALLOC_INLINE uint64_t
-atomic_add_u64(uint64_t *p, uint64_t x)
-{
+atomic_add_u64(uint64_t *p, uint64_t x) {
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint64_t
-atomic_sub_u64(uint64_t *p, uint64_t x)
-{
+atomic_sub_u64(uint64_t *p, uint64_t x) {
return (__sync_sub_and_fetch(p, x));
}
JEMALLOC_INLINE bool
-atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
-{
+atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s) {
return (!__sync_bool_compare_and_swap(p, c, s));
}
JEMALLOC_INLINE void
-atomic_write_u64(uint64_t *p, uint64_t x)
-{
+atomic_write_u64(uint64_t *p, uint64_t x) {
__sync_lock_test_and_set(p, x);
}
# else
@@ -262,8 +238,7 @@ atomic_write_u64(uint64_t *p, uint64_t x)
/* 32-bit operations. */
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE uint32_t
-atomic_add_u32(uint32_t *p, uint32_t x)
-{
+atomic_add_u32(uint32_t *p, uint32_t x) {
uint32_t t = x;
asm volatile (
@@ -276,8 +251,7 @@ atomic_add_u32(uint32_t *p, uint32_t x)
}
JEMALLOC_INLINE uint32_t
-atomic_sub_u32(uint32_t *p, uint32_t x)
-{
+atomic_sub_u32(uint32_t *p, uint32_t x) {
uint32_t t;
x = (uint32_t)(-(int32_t)x);
@@ -292,8 +266,7 @@ atomic_sub_u32(uint32_t *p, uint32_t x)
}
JEMALLOC_INLINE bool
-atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
-{
+atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s) {
uint8_t success;
asm volatile (
@@ -308,8 +281,7 @@ atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
}
JEMALLOC_INLINE void
-atomic_write_u32(uint32_t *p, uint32_t x)
-{
+atomic_write_u32(uint32_t *p, uint32_t x) {
asm volatile (
"xchgl %1, %0;" /* Lock is implied by xchgl. */
: "=m" (*p), "+r" (x) /* Outputs. */
@@ -319,78 +291,66 @@ atomic_write_u32(uint32_t *p, uint32_t x)
}
# elif (defined(JEMALLOC_C11ATOMICS))
JEMALLOC_INLINE uint32_t
-atomic_add_u32(uint32_t *p, uint32_t x)
-{
+atomic_add_u32(uint32_t *p, uint32_t x) {
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (atomic_fetch_add(a, x) + x);
}
JEMALLOC_INLINE uint32_t
-atomic_sub_u32(uint32_t *p, uint32_t x)
-{
+atomic_sub_u32(uint32_t *p, uint32_t x) {
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (atomic_fetch_sub(a, x) - x);
}
JEMALLOC_INLINE bool
-atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
-{
+atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s) {
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (!atomic_compare_exchange_strong(a, &c, s));
}
JEMALLOC_INLINE void
-atomic_write_u32(uint32_t *p, uint32_t x)
-{
+atomic_write_u32(uint32_t *p, uint32_t x) {
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
atomic_store(a, x);
}
#elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint32_t
-atomic_add_u32(uint32_t *p, uint32_t x)
-{
+atomic_add_u32(uint32_t *p, uint32_t x) {
return (atomic_fetchadd_32(p, x) + x);
}
JEMALLOC_INLINE uint32_t
-atomic_sub_u32(uint32_t *p, uint32_t x)
-{
+atomic_sub_u32(uint32_t *p, uint32_t x) {
return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
}
JEMALLOC_INLINE bool
-atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
-{
+atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s) {
return (!atomic_cmpset_32(p, c, s));
}
JEMALLOC_INLINE void
-atomic_write_u32(uint32_t *p, uint32_t x)
-{
+atomic_write_u32(uint32_t *p, uint32_t x) {
atomic_store_rel_32(p, x);
}
#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint32_t
-atomic_add_u32(uint32_t *p, uint32_t x)
-{
+atomic_add_u32(uint32_t *p, uint32_t x) {
return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
}
JEMALLOC_INLINE uint32_t
-atomic_sub_u32(uint32_t *p, uint32_t x)
-{
+atomic_sub_u32(uint32_t *p, uint32_t x) {
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
}
JEMALLOC_INLINE bool
-atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
-{
+atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s) {
return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p));
}
JEMALLOC_INLINE void
-atomic_write_u32(uint32_t *p, uint32_t x)
-{
+atomic_write_u32(uint32_t *p, uint32_t x) {
uint32_t o;
/*The documented OSAtomic*() API does not expose an atomic exchange. */
@@ -400,20 +360,17 @@ atomic_write_u32(uint32_t *p, uint32_t x)
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE uint32_t
-atomic_add_u32(uint32_t *p, uint32_t x)
-{
+atomic_add_u32(uint32_t *p, uint32_t x) {
return (InterlockedExchangeAdd(p, x) + x);
}
JEMALLOC_INLINE uint32_t
-atomic_sub_u32(uint32_t *p, uint32_t x)
-{
+atomic_sub_u32(uint32_t *p, uint32_t x) {
return (InterlockedExchangeAdd(p, -((int32_t)x)) - x);
}
JEMALLOC_INLINE bool
-atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
-{
+atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s) {
uint32_t o;
o = InterlockedCompareExchange(p, s, c);
@@ -421,33 +378,28 @@ atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
}
JEMALLOC_INLINE void
-atomic_write_u32(uint32_t *p, uint32_t x)
-{
+atomic_write_u32(uint32_t *p, uint32_t x) {
InterlockedExchange(p, x);
}
#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
JEMALLOC_INLINE uint32_t
-atomic_add_u32(uint32_t *p, uint32_t x)
-{
+atomic_add_u32(uint32_t *p, uint32_t x) {
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint32_t
-atomic_sub_u32(uint32_t *p, uint32_t x)
-{
+atomic_sub_u32(uint32_t *p, uint32_t x) {
return (__sync_sub_and_fetch(p, x));
}
JEMALLOC_INLINE bool
-atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
-{
+atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s) {
return (!__sync_bool_compare_and_swap(p, c, s));
}
JEMALLOC_INLINE void
-atomic_write_u32(uint32_t *p, uint32_t x)
-{
+atomic_write_u32(uint32_t *p, uint32_t x) {
__sync_lock_test_and_set(p, x);
}
#else
@@ -457,8 +409,7 @@ atomic_write_u32(uint32_t *p, uint32_t x)
/******************************************************************************/
/* Pointer operations. */
JEMALLOC_INLINE void *
-atomic_add_p(void **p, void *x)
-{
+atomic_add_p(void **p, void *x) {
#if (LG_SIZEOF_PTR == 3)
return ((void *)atomic_add_u64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_PTR == 2)
@@ -467,8 +418,7 @@ atomic_add_p(void **p, void *x)
}
JEMALLOC_INLINE void *
-atomic_sub_p(void **p, void *x)
-{
+atomic_sub_p(void **p, void *x) {
#if (LG_SIZEOF_PTR == 3)
return ((void *)atomic_add_u64((uint64_t *)p, (uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_PTR == 2)
@@ -477,8 +427,7 @@ atomic_sub_p(void **p, void *x)
}
JEMALLOC_INLINE bool
-atomic_cas_p(void **p, void *c, void *s)
-{
+atomic_cas_p(void **p, void *c, void *s) {
#if (LG_SIZEOF_PTR == 3)
return (atomic_cas_u64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_PTR == 2)
@@ -487,8 +436,7 @@ atomic_cas_p(void **p, void *c, void *s)
}
JEMALLOC_INLINE void
-atomic_write_p(void **p, const void *x)
-{
+atomic_write_p(void **p, const void *x) {
#if (LG_SIZEOF_PTR == 3)
atomic_write_u64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_PTR == 2)
@@ -499,8 +447,7 @@ atomic_write_p(void **p, const void *x)
/******************************************************************************/
/* size_t operations. */
JEMALLOC_INLINE size_t
-atomic_add_zu(size_t *p, size_t x)
-{
+atomic_add_zu(size_t *p, size_t x) {
#if (LG_SIZEOF_PTR == 3)
return ((size_t)atomic_add_u64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_PTR == 2)
@@ -509,8 +456,7 @@ atomic_add_zu(size_t *p, size_t x)
}
JEMALLOC_INLINE size_t
-atomic_sub_zu(size_t *p, size_t x)
-{
+atomic_sub_zu(size_t *p, size_t x) {
#if (LG_SIZEOF_PTR == 3)
return ((size_t)atomic_add_u64((uint64_t *)p, (uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_PTR == 2)
@@ -519,8 +465,7 @@ atomic_sub_zu(size_t *p, size_t x)
}
JEMALLOC_INLINE bool
-atomic_cas_zu(size_t *p, size_t c, size_t s)
-{
+atomic_cas_zu(size_t *p, size_t c, size_t s) {
#if (LG_SIZEOF_PTR == 3)
return (atomic_cas_u64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_PTR == 2)
@@ -529,8 +474,7 @@ atomic_cas_zu(size_t *p, size_t c, size_t s)
}
JEMALLOC_INLINE void
-atomic_write_zu(size_t *p, size_t x)
-{
+atomic_write_zu(size_t *p, size_t x) {
#if (LG_SIZEOF_PTR == 3)
atomic_write_u64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_PTR == 2)
@@ -541,8 +485,7 @@ atomic_write_zu(size_t *p, size_t x)
/******************************************************************************/
/* unsigned operations. */
JEMALLOC_INLINE unsigned
-atomic_add_u(unsigned *p, unsigned x)
-{
+atomic_add_u(unsigned *p, unsigned x) {
#if (LG_SIZEOF_INT == 3)
return ((unsigned)atomic_add_u64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_INT == 2)
@@ -551,8 +494,7 @@ atomic_add_u(unsigned *p, unsigned x)
}
JEMALLOC_INLINE unsigned
-atomic_sub_u(unsigned *p, unsigned x)
-{
+atomic_sub_u(unsigned *p, unsigned x) {
#if (LG_SIZEOF_INT == 3)
return ((unsigned)atomic_add_u64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
@@ -563,8 +505,7 @@ atomic_sub_u(unsigned *p, unsigned x)
}
JEMALLOC_INLINE bool
-atomic_cas_u(unsigned *p, unsigned c, unsigned s)
-{
+atomic_cas_u(unsigned *p, unsigned c, unsigned s) {
#if (LG_SIZEOF_INT == 3)
return (atomic_cas_u64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_INT == 2)
@@ -573,8 +514,7 @@ atomic_cas_u(unsigned *p, unsigned c, unsigned s)
}
JEMALLOC_INLINE void
-atomic_write_u(unsigned *p, unsigned x)
-{
+atomic_write_u(unsigned *p, unsigned x) {
#if (LG_SIZEOF_INT == 3)
atomic_write_u64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_INT == 2)
diff --git a/include/jemalloc/internal/base_inlines.h b/include/jemalloc/internal/base_inlines.h
index 63547d6..94fb1a9 100644
--- a/include/jemalloc/internal/base_inlines.h
+++ b/include/jemalloc/internal/base_inlines.h
@@ -7,8 +7,7 @@ unsigned base_ind_get(const base_t *base);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BASE_C_))
JEMALLOC_INLINE unsigned
-base_ind_get(const base_t *base)
-{
+base_ind_get(const base_t *base) {
return (base->ind);
}
#endif
diff --git a/include/jemalloc/internal/bitmap_inlines.h b/include/jemalloc/internal/bitmap_inlines.h
index 5400f9d..1a2411d 100644
--- a/include/jemalloc/internal/bitmap_inlines.h
+++ b/include/jemalloc/internal/bitmap_inlines.h
@@ -11,8 +11,7 @@ void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_))
JEMALLOC_INLINE bool
-bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
-{
+bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) {
#ifdef BITMAP_USE_TREE
size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
bitmap_t rg = bitmap[rgoff];
@@ -22,16 +21,16 @@ bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
size_t i;
for (i = 0; i < binfo->ngroups; i++) {
- if (bitmap[i] != 0)
+ if (bitmap[i] != 0) {
return (false);
+ }
}
return (true);
#endif
}
JEMALLOC_INLINE bool
-bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
-{
+bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
size_t goff;
bitmap_t g;
@@ -42,8 +41,7 @@ bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
}
JEMALLOC_INLINE void
-bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
-{
+bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
size_t goff;
bitmap_t *gp;
bitmap_t g;
@@ -69,8 +67,9 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
- if (g != 0)
+ if (g != 0) {
break;
+ }
}
}
#endif
@@ -78,8 +77,7 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
/* sfu: set first unset. */
JEMALLOC_INLINE size_t
-bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
-{
+bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
size_t bit;
bitmap_t g;
unsigned i;
@@ -109,8 +107,7 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
}
JEMALLOC_INLINE void
-bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
-{
+bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
size_t goff;
bitmap_t *gp;
bitmap_t g;
@@ -140,8 +137,9 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
== 0);
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
- if (!propagate)
+ if (!propagate) {
break;
+ }
}
}
#endif /* BITMAP_USE_TREE */
diff --git a/include/jemalloc/internal/extent_inlines.h b/include/jemalloc/internal/extent_inlines.h
index 87e0bcd..274e69c 100644
--- a/include/jemalloc/internal/extent_inlines.h
+++ b/include/jemalloc/internal/extent_inlines.h
@@ -43,8 +43,7 @@ int extent_snad_comp(const extent_t *a, const extent_t *b);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
JEMALLOC_INLINE extent_t *
-extent_lookup(tsdn_t *tsdn, const void *ptr, bool dependent)
-{
+extent_lookup(tsdn_t *tsdn, const void *ptr, bool dependent) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
@@ -53,132 +52,112 @@ extent_lookup(tsdn_t *tsdn, const void *ptr, bool dependent)
}
JEMALLOC_INLINE arena_t *
-extent_arena_get(const extent_t *extent)
-{
+extent_arena_get(const extent_t *extent) {
return (extent->e_arena);
}
JEMALLOC_INLINE void *
-extent_base_get(const extent_t *extent)
-{
+extent_base_get(const extent_t *extent) {
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
!extent->e_slab);
return (PAGE_ADDR2BASE(extent->e_addr));
}
JEMALLOC_INLINE void *
-extent_addr_get(const extent_t *extent)
-{
+extent_addr_get(const extent_t *extent) {
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
!extent->e_slab);
return (extent->e_addr);
}
JEMALLOC_INLINE size_t
-extent_size_get(const extent_t *extent)
-{
+extent_size_get(const extent_t *extent) {
return (extent->e_size);
}
JEMALLOC_INLINE size_t
-extent_usize_get(const extent_t *extent)
-{
+extent_usize_get(const extent_t *extent) {
assert(!extent->e_slab);
return (extent->e_usize);
}
JEMALLOC_INLINE void *
-extent_before_get(const extent_t *extent)
-{
+extent_before_get(const extent_t *extent) {
return ((void *)((uintptr_t)extent_base_get(extent) - PAGE));
}
JEMALLOC_INLINE void *
-extent_last_get(const extent_t *extent)
-{
+extent_last_get(const extent_t *extent) {
return ((void *)((uintptr_t)extent_base_get(extent) +
extent_size_get(extent) - PAGE));
}
JEMALLOC_INLINE void *
-extent_past_get(const extent_t *extent)
-{
+extent_past_get(const extent_t *extent) {
return ((void *)((uintptr_t)extent_base_get(extent) +
extent_size_get(extent)));
}
JEMALLOC_INLINE size_t
-extent_sn_get(const extent_t *extent)
-{
+extent_sn_get(const extent_t *extent) {
return (extent->e_sn);
}
JEMALLOC_INLINE bool
-extent_active_get(const extent_t *extent)
-{
+extent_active_get(const extent_t *extent) {
return (extent->e_active);
}
JEMALLOC_INLINE bool
-extent_retained_get(const extent_t *extent)
-{
+extent_retained_get(const extent_t *extent) {
return (qr_next(extent, qr_link) == extent);
}
JEMALLOC_INLINE bool
-extent_zeroed_get(const extent_t *extent)
-{
+extent_zeroed_get(const extent_t *extent) {
return (extent->e_zeroed);
}
JEMALLOC_INLINE bool
-extent_committed_get(const extent_t *extent)
-{
+extent_committed_get(const extent_t *extent) {
return (extent->e_committed);
}
JEMALLOC_INLINE bool
-extent_slab_get(const extent_t *extent)
-{
+extent_slab_get(const extent_t *extent) {
return (extent->e_slab);
}
JEMALLOC_INLINE arena_slab_data_t *
-extent_slab_data_get(extent_t *extent)
-{
+extent_slab_data_get(extent_t *extent) {
assert(extent->e_slab);
return (&extent->e_slab_data);
}
JEMALLOC_INLINE const arena_slab_data_t *
-extent_slab_data_get_const(const extent_t *extent)
-{
+extent_slab_data_get_const(const extent_t *extent) {
assert(extent->e_slab);
return (&extent->e_slab_data);
}
JEMALLOC_INLINE prof_tctx_t *
-extent_prof_tctx_get(const extent_t *extent)
-{
+extent_prof_tctx_get(const extent_t *extent) {
return ((prof_tctx_t *)atomic_read_p(
&((extent_t *)extent)->e_prof_tctx_pun));
}
JEMALLOC_INLINE void
-extent_arena_set(extent_t *extent, arena_t *arena)
-{
+extent_arena_set(extent_t *extent, arena_t *arena) {
extent->e_arena = arena;
}
JEMALLOC_INLINE void
-extent_addr_set(extent_t *extent, void *addr)
-{
+extent_addr_set(extent_t *extent, void *addr) {
extent->e_addr = addr;
}
JEMALLOC_INLINE void
-extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment)
-{
+extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) {
assert(extent_base_get(extent) == extent_addr_get(extent));
if (alignment < PAGE) {
@@ -197,58 +176,49 @@ extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment)
}
JEMALLOC_INLINE void
-extent_size_set(extent_t *extent, size_t size)
-{
+extent_size_set(extent_t *extent, size_t size) {
extent->e_size = size;
}
JEMALLOC_INLINE void
-extent_usize_set(extent_t *extent, size_t usize)
-{
+extent_usize_set(extent_t *extent, size_t usize) {
extent->e_usize = usize;
}
JEMALLOC_INLINE void
-extent_sn_set(extent_t *extent, size_t sn)
-{
+extent_sn_set(extent_t *extent, size_t sn) {
extent->e_sn = sn;
}
JEMALLOC_INLINE void
-extent_active_set(extent_t *extent, bool active)
-{
+extent_active_set(extent_t *extent, bool active) {
extent->e_active = active;
}
JEMALLOC_INLINE void
-extent_zeroed_set(extent_t *extent, bool zeroed)
-{
+extent_zeroed_set(extent_t *extent, bool zeroed) {
extent->e_zeroed = zeroed;
}
JEMALLOC_INLINE void
-extent_committed_set(extent_t *extent, bool committed)
-{
+extent_committed_set(extent_t *extent, bool committed) {
extent->e_committed = committed;
}
JEMALLOC_INLINE void
-extent_slab_set(extent_t *extent, bool slab)
-{
+extent_slab_set(extent_t *extent, bool slab) {
extent->e_slab = slab;
}
JEMALLOC_INLINE void
-extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx)
-{
+extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
atomic_write_p(&extent->e_prof_tctx_pun, tctx);
}
JEMALLOC_INLINE void
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
size_t usize, size_t sn, bool active, bool zeroed, bool committed,
- bool slab)
-{
+ bool slab) {
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
extent_arena_set(extent, arena);
@@ -260,26 +230,24 @@ extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
extent_zeroed_set(extent, zeroed);
extent_committed_set(extent, committed);
extent_slab_set(extent, slab);
- if (config_prof)
+ if (config_prof) {
extent_prof_tctx_set(extent, NULL);
+ }
qr_new(extent, qr_link);
}
JEMALLOC_INLINE void
-extent_ring_insert(extent_t *sentinel, extent_t *extent)
-{
+extent_ring_insert(extent_t *sentinel, extent_t *extent) {
qr_meld(sentinel, extent, extent_t, qr_link);
}
JEMALLOC_INLINE void
-extent_ring_remove(extent_t *extent)
-{
+extent_ring_remove(extent_t *extent) {
qr_remove(extent, qr_link);
}
JEMALLOC_INLINE int
-extent_sn_comp(const extent_t *a, const extent_t *b)
-{
+extent_sn_comp(const extent_t *a, const extent_t *b) {
size_t a_sn = extent_sn_get(a);
size_t b_sn = extent_sn_get(b);
@@ -287,8 +255,7 @@ extent_sn_comp(const extent_t *a, const extent_t *b)
}
JEMALLOC_INLINE int
-extent_ad_comp(const extent_t *a, const extent_t *b)
-{
+extent_ad_comp(const extent_t *a, const extent_t *b) {
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
@@ -296,13 +263,13 @@ extent_ad_comp(const extent_t *a, const extent_t *b)
}
JEMALLOC_INLINE int
-extent_snad_comp(const extent_t *a, const extent_t *b)
-{
+extent_snad_comp(const extent_t *a, const extent_t *b) {
int ret;
ret = extent_sn_comp(a, b);
- if (ret != 0)
+ if (ret != 0) {
return (ret);
+ }
ret = extent_ad_comp(a, b);
return (ret);
diff --git a/include/jemalloc/internal/hash_inlines.h b/include/jemalloc/internal/hash_inlines.h
index 4bb7850..82ac1f4 100644
--- a/include/jemalloc/internal/hash_inlines.h
+++ b/include/jemalloc/internal/hash_inlines.h
@@ -21,20 +21,17 @@ void hash(const void *key, size_t len, const uint32_t seed,
/******************************************************************************/
/* Internal implementation. */
JEMALLOC_INLINE uint32_t
-hash_rotl_32(uint32_t x, int8_t r)
-{
+hash_rotl_32(uint32_t x, int8_t r) {
return ((x << r) | (x >> (32 - r)));
}
JEMALLOC_INLINE uint64_t
-hash_rotl_64(uint64_t x, int8_t r)
-{
+hash_rotl_64(uint64_t x, int8_t r) {
return ((x << r) | (x >> (64 - r)));
}
JEMALLOC_INLINE uint32_t
-hash_get_block_32(const uint32_t *p, int i)
-{
+hash_get_block_32(const uint32_t *p, int i) {
/* Handle unaligned read. */
if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
uint32_t ret;
@@ -47,8 +44,7 @@ hash_get_block_32(const uint32_t *p, int i)
}
JEMALLOC_INLINE uint64_t
-hash_get_block_64(const uint64_t *p, int i)
-{
+hash_get_block_64(const uint64_t *p, int i) {
/* Handle unaligned read. */
if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
uint64_t ret;
@@ -61,8 +57,7 @@ hash_get_block_64(const uint64_t *p, int i)
}
JEMALLOC_INLINE uint32_t
-hash_fmix_32(uint32_t h)
-{
+hash_fmix_32(uint32_t h) {
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
@@ -73,8 +68,7 @@ hash_fmix_32(uint32_t h)
}
JEMALLOC_INLINE uint64_t
-hash_fmix_64(uint64_t k)
-{
+hash_fmix_64(uint64_t k) {
k ^= k >> 33;
k *= KQU(0xff51afd7ed558ccd);
k ^= k >> 33;
@@ -85,8 +79,7 @@ hash_fmix_64(uint64_t k)
}
JEMALLOC_INLINE uint32_t
-hash_x86_32(const void *key, int len, uint32_t seed)
-{
+hash_x86_32(const void *key, int len, uint32_t seed) {
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 4;
@@ -137,8 +130,7 @@ hash_x86_32(const void *key, int len, uint32_t seed)
UNUSED JEMALLOC_INLINE void
hash_x86_128(const void *key, const int len, uint32_t seed,
- uint64_t r_out[2])
-{
+ uint64_t r_out[2]) {
const uint8_t * data = (const uint8_t *) key;
const int nblocks = len / 16;
@@ -239,8 +231,7 @@ hash_x86_128(const void *key, const int len, uint32_t seed,
UNUSED JEMALLOC_INLINE void
hash_x64_128(const void *key, const int len, const uint32_t seed,
- uint64_t r_out[2])
-{
+ uint64_t r_out[2]) {
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 16;
@@ -318,8 +309,7 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
/******************************************************************************/
/* API. */
JEMALLOC_INLINE void
-hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
-{
+hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) {
assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index dc9df35..c951fab 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -550,10 +550,10 @@ ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_ALWAYS_INLINE pszind_t
-psz2ind(size_t psz)
-{
- if (unlikely(psz > LARGE_MAXCLASS))
+psz2ind(size_t psz) {
+ if (unlikely(psz > LARGE_MAXCLASS)) {
return (NPSIZES);
+ }
{
pszind_t x = lg_floor((psz<<1)-1);
pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x -
@@ -573,10 +573,10 @@ psz2ind(size_t psz)
}
JEMALLOC_INLINE size_t
-pind2sz_compute(pszind_t pind)
-{
- if (unlikely(pind == NPSIZES))
+pind2sz_compute(pszind_t pind) {
+ if (unlikely(pind == NPSIZES)) {
return (LARGE_MAXCLASS + PAGE);
+ }
{
size_t grp = pind >> LG_SIZE_CLASS_GROUP;
size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
@@ -595,25 +595,23 @@ pind2sz_compute(pszind_t pind)
}
JEMALLOC_INLINE size_t
-pind2sz_lookup(pszind_t pind)
-{
+pind2sz_lookup(pszind_t pind) {
size_t ret = (size_t)pind2sz_tab[pind];
assert(ret == pind2sz_compute(pind));
return (ret);
}
JEMALLOC_INLINE size_t
-pind2sz(pszind_t pind)
-{
+pind2sz(pszind_t pind) {
assert(pind < NPSIZES+1);
return (pind2sz_lookup(pind));
}
JEMALLOC_INLINE size_t
-psz2u(size_t psz)
-{
- if (unlikely(psz > LARGE_MAXCLASS))
+psz2u(size_t psz) {
+ if (unlikely(psz > LARGE_MAXCLASS)) {
return (LARGE_MAXCLASS + PAGE);
+ }
{
size_t x = lg_floor((psz<<1)-1);
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
@@ -626,10 +624,10 @@ psz2u(size_t psz)
}
JEMALLOC_INLINE szind_t
-size2index_compute(size_t size)
-{
- if (unlikely(size > LARGE_MAXCLASS))
+size2index_compute(size_t size) {
+ if (unlikely(size > LARGE_MAXCLASS)) {
return (NSIZES);
+ }
#if (NTBINS != 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
@@ -656,8 +654,7 @@ size2index_compute(size_t size)
}
JEMALLOC_ALWAYS_INLINE szind_t
-size2index_lookup(size_t size)
-{
+size2index_lookup(size_t size) {
assert(size <= LOOKUP_MAXCLASS);
{
szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]);
@@ -667,20 +664,20 @@ size2index_lookup(size_t size)
}
JEMALLOC_ALWAYS_INLINE szind_t
-size2index(size_t size)
-{
+size2index(size_t size) {
assert(size > 0);
- if (likely(size <= LOOKUP_MAXCLASS))
+ if (likely(size <= LOOKUP_MAXCLASS)) {
return (size2index_lookup(size));
+ }
return (size2index_compute(size));
}
JEMALLOC_INLINE size_t
-index2size_compute(szind_t index)
-{
+index2size_compute(szind_t index) {
#if (NTBINS > 0)
- if (index < NTBINS)
+ if (index < NTBINS) {
return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
+ }
#endif
{
size_t reduced_index = index - NTBINS;
@@ -702,25 +699,23 @@ index2size_compute(szind_t index)
}
JEMALLOC_ALWAYS_INLINE size_t
-index2size_lookup(szind_t index)
-{
+index2size_lookup(szind_t index) {
size_t ret = (size_t)index2size_tab[index];
assert(ret == index2size_compute(index));
return (ret);
}
JEMALLOC_ALWAYS_INLINE size_t
-index2size(szind_t index)
-{
+index2size(szind_t index) {
assert(index < NSIZES);
return (index2size_lookup(index));
}
JEMALLOC_ALWAYS_INLINE size_t
-s2u_compute(size_t size)
-{
- if (unlikely(size > LARGE_MAXCLASS))
+s2u_compute(size_t size) {
+ if (unlikely(size > LARGE_MAXCLASS)) {
return (0);
+ }
#if (NTBINS > 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
@@ -741,8 +736,7 @@ s2u_compute(size_t size)
}
JEMALLOC_ALWAYS_INLINE size_t
-s2u_lookup(size_t size)
-{
+s2u_lookup(size_t size) {
size_t ret = index2size_lookup(size2index_lookup(size));
assert(ret == s2u_compute(size));
@@ -754,11 +748,11 @@ s2u_lookup(size_t size)
* specified size.
*/
JEMALLOC_ALWAYS_INLINE size_t
-s2u(size_t size)
-{
+s2u(size_t size) {
assert(size > 0);
- if (likely(size <= LOOKUP_MAXCLASS))
+ if (likely(size <= LOOKUP_MAXCLASS)) {
return (s2u_lookup(size));
+ }
return (s2u_compute(size));
}
@@ -767,8 +761,7 @@ s2u(size_t size)
* specified size and alignment.
*/
JEMALLOC_ALWAYS_INLINE size_t
-sa2u(size_t size, size_t alignment)
-{
+sa2u(size_t size, size_t alignment) {
size_t usize;
assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
@@ -790,19 +783,21 @@ sa2u(size_t size, size_t alignment)
* 192 | 11000000 | 64
*/
usize = s2u(ALIGNMENT_CEILING(size, alignment));
- if (usize < LARGE_MINCLASS)
+ if (usize < LARGE_MINCLASS) {
return (usize);
+ }
}
/* Large size class. Beware of overflow. */
- if (unlikely(alignment > LARGE_MAXCLASS))
+ if (unlikely(alignment > LARGE_MAXCLASS)) {
return (0);
+ }
/* Make sure result is a large size class. */
- if (size <= LARGE_MINCLASS)
+ if (size <= LARGE_MINCLASS) {
usize = LARGE_MINCLASS;
- else {
+ } else {
usize = s2u(size);
if (usize < size) {
/* size_t overflow. */
@@ -823,35 +818,33 @@ sa2u(size_t size, size_t alignment)
/* Choose an arena based on a per-thread value. */
JEMALLOC_INLINE arena_t *
-arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal)
-{
+arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
arena_t *ret;
- if (arena != NULL)
+ if (arena != NULL) {
return (arena);
+ }
ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
- if (unlikely(ret == NULL))
+ if (unlikely(ret == NULL)) {
ret = arena_choose_hard(tsd, internal);
+ }
return (ret);
}
JEMALLOC_INLINE arena_t *
-arena_choose(tsd_t *tsd, arena_t *arena)
-{
+arena_choose(tsd_t *tsd, arena_t *arena) {
return (arena_choose_impl(tsd, arena, false));
}
JEMALLOC_INLINE arena_t *
-arena_ichoose(tsd_t *tsd, arena_t *arena)
-{
+arena_ichoose(tsd_t *tsd, arena_t *arena) {
return (arena_choose_impl(tsd, arena, true));
}
JEMALLOC_INLINE arena_tdata_t *
-arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
-{
+arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) {
arena_tdata_t *tdata;
arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
@@ -869,14 +862,14 @@ arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
}
tdata = &arenas_tdata[ind];
- if (likely(tdata != NULL) || !refresh_if_missing)
+ if (likely(tdata != NULL) || !refresh_if_missing) {
return (tdata);
+ }
return (arena_tdata_get_hard(tsd, ind));
}
JEMALLOC_INLINE arena_t *
-arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing)
-{
+arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
arena_t *ret;
assert(ind <= MALLOCX_ARENA_MAX);
@@ -893,13 +886,13 @@ arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing)
}
JEMALLOC_INLINE ticker_t *
-decay_ticker_get(tsd_t *tsd, unsigned ind)
-{
+decay_ticker_get(tsd_t *tsd, unsigned ind) {
arena_tdata_t *tdata;
tdata = arena_tdata_get(tsd, ind, true);
- if (unlikely(tdata == NULL))
+ if (unlikely(tdata == NULL)) {
return (NULL);
+ }
return (&tdata->decay_ticker);
}
#endif
@@ -917,8 +910,7 @@ extent_t *iealloc(tsdn_t *tsdn, const void *ptr);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_ALWAYS_INLINE extent_t *
-iealloc(tsdn_t *tsdn, const void *ptr)
-{
+iealloc(tsdn_t *tsdn, const void *ptr) {
return (extent_lookup(tsdn, ptr, true));
}
#endif
@@ -958,8 +950,7 @@ bool ixalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_ALWAYS_INLINE arena_t *
-iaalloc(tsdn_t *tsdn, const void *ptr)
-{
+iaalloc(tsdn_t *tsdn, const void *ptr) {
assert(ptr != NULL);
return (arena_aalloc(tsdn, ptr));
@@ -973,8 +964,7 @@ iaalloc(tsdn_t *tsdn, const void *ptr)
* size_t sz = isalloc(tsdn, extent, ptr);
*/
JEMALLOC_ALWAYS_INLINE size_t
-isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
-{
+isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr) {
assert(ptr != NULL);
return (arena_salloc(tsdn, extent, ptr));
@@ -982,8 +972,7 @@ isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
JEMALLOC_ALWAYS_INLINE void *
iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
- bool is_internal, arena_t *arena, bool slow_path)
-{
+ bool is_internal, arena_t *arena, bool slow_path) {
void *ret;
assert(size != 0);
@@ -1000,16 +989,14 @@ iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
}
JEMALLOC_ALWAYS_INLINE void *
-ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path)
-{
+ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) {
return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true),
false, NULL, slow_path));
}
JEMALLOC_ALWAYS_INLINE void *
ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
- tcache_t *tcache, bool is_internal, arena_t *arena)
-{
+ tcache_t *tcache, bool is_internal, arena_t *arena) {
void *ret;
assert(usize != 0);
@@ -1029,21 +1016,18 @@ ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
JEMALLOC_ALWAYS_INLINE void *
ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
- tcache_t *tcache, arena_t *arena)
-{
+ tcache_t *tcache, arena_t *arena) {
return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena));
}
JEMALLOC_ALWAYS_INLINE void *
-ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
-{
+ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) {
return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
tcache_get(tsd, true), false, NULL));
}
JEMALLOC_ALWAYS_INLINE size_t
-ivsalloc(tsdn_t *tsdn, const void *ptr)
-{
+ivsalloc(tsdn_t *tsdn, const void *ptr) {
extent_t *extent;
/*
@@ -1055,8 +1039,9 @@ ivsalloc(tsdn_t *tsdn, const void *ptr)
* failure.
* */
extent = extent_lookup(tsdn, ptr, false);
- if (extent == NULL)
+ if (extent == NULL) {
return (0);
+ }
assert(extent_active_get(extent));
/* Only slab members should be looked up via interior pointers. */
assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
@@ -1066,8 +1051,7 @@ ivsalloc(tsdn_t *tsdn, const void *ptr)
JEMALLOC_ALWAYS_INLINE void
idalloctm(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
- bool is_internal, bool slow_path)
-{
+ bool is_internal, bool slow_path) {
assert(ptr != NULL);
assert(!is_internal || tcache == NULL);
assert(!is_internal || arena_ind_get(iaalloc(tsdn, ptr)) <
@@ -1081,41 +1065,42 @@ idalloctm(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
}
JEMALLOC_ALWAYS_INLINE void
-idalloc(tsd_t *tsd, extent_t *extent, void *ptr)
-{
+idalloc(tsd_t *tsd, extent_t *extent, void *ptr) {
idalloctm(tsd_tsdn(tsd), extent, ptr, tcache_get(tsd, false), false,
true);
}
JEMALLOC_ALWAYS_INLINE void
isdalloct(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
- tcache_t *tcache, bool slow_path)
-{
+ tcache_t *tcache, bool slow_path) {
arena_sdalloc(tsdn, extent, ptr, size, tcache, slow_path);
}
JEMALLOC_ALWAYS_INLINE void *
iralloct_realign(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
size_t size, size_t extra, size_t alignment, bool zero, tcache_t *tcache,
- arena_t *arena)
-{
+ arena_t *arena) {
void *p;
size_t usize, copysize;
usize = sa2u(size + extra, alignment);
- if (unlikely(usize == 0 || usize > LARGE_MAXCLASS))
+ if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
return (NULL);
+ }
p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
if (p == NULL) {
- if (extra == 0)
+ if (extra == 0) {
return (NULL);
+ }
/* Try again, without extra this time. */
usize = sa2u(size, alignment);
- if (unlikely(usize == 0 || usize > LARGE_MAXCLASS))
+ if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
return (NULL);
+ }
p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
- if (p == NULL)
+ if (p == NULL) {
return (NULL);
+ }
}
/*
* Copy at most size bytes (not size+extra), since the caller has no
@@ -1129,8 +1114,7 @@ iralloct_realign(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
JEMALLOC_ALWAYS_INLINE void *
iralloct(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, size_t size,
- size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
-{
+ size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) {
assert(ptr != NULL);
assert(size != 0);
@@ -1150,16 +1134,14 @@ iralloct(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, size_t size,
JEMALLOC_ALWAYS_INLINE void *
iralloc(tsd_t *tsd, extent_t *extent, void *ptr, size_t oldsize, size_t size,
- size_t alignment, bool zero)
-{
+ size_t alignment, bool zero) {
return (iralloct(tsd_tsdn(tsd), extent, ptr, oldsize, size, alignment,
zero, tcache_get(tsd, true), NULL));
}
JEMALLOC_ALWAYS_INLINE bool
ixalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, size_t size,
- size_t extra, size_t alignment, bool zero)
-{
+ size_t extra, size_t alignment, bool zero) {
assert(ptr != NULL);
assert(size != 0);
diff --git a/include/jemalloc/internal/jemalloc_internal_decls.h b/include/jemalloc/internal/jemalloc_internal_decls.h
index 277027f..fd80fdf 100644
--- a/include/jemalloc/internal/jemalloc_internal_decls.h
+++ b/include/jemalloc/internal/jemalloc_internal_decls.h
@@ -61,8 +61,7 @@ typedef intptr_t ssize_t;
# pragma warning(disable: 4996)
#if _MSC_VER < 1800
static int
-isblank(int c)
-{
+isblank(int c) {
return (c == '\t' || c == ' ');
}
#endif
diff --git a/include/jemalloc/internal/mutex_inlines.h b/include/jemalloc/internal/mutex_inlines.h
index d65fa13..0c6c5dd 100644
--- a/include/jemalloc/internal/mutex_inlines.h
+++ b/include/jemalloc/internal/mutex_inlines.h
@@ -10,8 +10,7 @@ void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE void
-malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
+malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
if (isthreaded) {
witness_assert_not_owner(tsdn, &mutex->witness);
#ifdef _WIN32
@@ -32,8 +31,7 @@ malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex)
}
JEMALLOC_INLINE void
-malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
+malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
if (isthreaded) {
witness_unlock(tsdn, &mutex->witness);
#ifdef _WIN32
@@ -53,17 +51,17 @@ malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
}
JEMALLOC_INLINE void
-malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
- if (isthreaded)
+malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
+ if (isthreaded) {
witness_assert_owner(tsdn, &mutex->witness);
+ }
}
JEMALLOC_INLINE void
-malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
- if (isthreaded)
+malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
+ if (isthreaded) {
witness_assert_not_owner(tsdn, &mutex->witness);
+ }
}
#endif
diff --git a/include/jemalloc/internal/ph.h b/include/jemalloc/internal/ph.h
index 9efb7b7..61dfdc0 100644
--- a/include/jemalloc/internal/ph.h
+++ b/include/jemalloc/internal/ph.h
@@ -58,17 +58,18 @@ struct { \
phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
phn_next_set(a_type, a_field, a_phn1, phn0child); \
- if (phn0child != NULL) \
+ if (phn0child != NULL) { \
phn_prev_set(a_type, a_field, phn0child, a_phn1); \
+ } \
phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
} while (0)
#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
- if (a_phn0 == NULL) \
+ if (a_phn0 == NULL) { \
r_phn = a_phn1; \
- else if (a_phn1 == NULL) \
+ } else if (a_phn1 == NULL) { \
r_phn = a_phn0; \
- else if (a_cmp(a_phn0, a_phn1) < 0) { \
+ } else if (a_cmp(a_phn0, a_phn1) < 0) { \
phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
a_cmp); \
r_phn = a_phn0; \
@@ -95,8 +96,9 @@ struct { \
*/ \
if (phn1 != NULL) { \
a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
- if (phnrest != NULL) \
+ if (phnrest != NULL) { \
phn_prev_set(a_type, a_field, phnrest, NULL); \
+ } \
phn_prev_set(a_type, a_field, phn0, NULL); \
phn_next_set(a_type, a_field, phn0, NULL); \
phn_prev_set(a_type, a_field, phn1, NULL); \
@@ -150,8 +152,9 @@ struct { \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
- if (head == NULL) \
+ if (head == NULL) { \
break; \
+ } \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
@@ -179,9 +182,9 @@ struct { \
#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
- if (lchild == NULL) \
+ if (lchild == NULL) { \
r_phn = NULL; \
- else { \
+ } else { \
ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
r_phn); \
} \
@@ -205,26 +208,23 @@ a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
*/
#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
a_attr void \
-a_prefix##new(a_ph_type *ph) \
-{ \
+a_prefix##new(a_ph_type *ph) { \
memset(ph, 0, sizeof(ph(a_type))); \
} \
a_attr bool \
-a_prefix##empty(a_ph_type *ph) \
-{ \
+a_prefix##empty(a_ph_type *ph) { \
return (ph->ph_root == NULL); \
} \
a_attr a_type * \
-a_prefix##first(a_ph_type *ph) \
-{ \
- if (ph->ph_root == NULL) \
+a_prefix##first(a_ph_type *ph) { \
+ if (ph->ph_root == NULL) { \
return (NULL); \
+ } \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
return (ph->ph_root); \
} \
a_attr void \
-a_prefix##insert(a_ph_type *ph, a_type *phn) \
-{ \
+a_prefix##insert(a_ph_type *ph, a_type *phn) { \
memset(&phn->a_field, 0, sizeof(phn(a_type))); \
\
/* \
@@ -235,9 +235,9 @@ a_prefix##insert(a_ph_type *ph, a_type *phn) \
* constant-time, whereas eager merging would make insert \
* O(log n). \
*/ \
- if (ph->ph_root == NULL) \
+ if (ph->ph_root == NULL) { \
ph->ph_root = phn; \
- else { \
+ } else { \
phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
a_field, ph->ph_root)); \
if (phn_next_get(a_type, a_field, ph->ph_root) != \
@@ -251,12 +251,12 @@ a_prefix##insert(a_ph_type *ph, a_type *phn) \
} \
} \
a_attr a_type * \
-a_prefix##remove_first(a_ph_type *ph) \
-{ \
+a_prefix##remove_first(a_ph_type *ph) { \
a_type *ret; \
\
- if (ph->ph_root == NULL) \
+ if (ph->ph_root == NULL) { \
return (NULL); \
+ } \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
\
ret = ph->ph_root; \
@@ -267,8 +267,7 @@ a_prefix##remove_first(a_ph_type *ph) \
return (ret); \
} \
a_attr void \
-a_prefix##remove(a_ph_type *ph, a_type *phn) \
-{ \
+a_prefix##remove(a_ph_type *ph, a_type *phn) { \
a_type *replace, *parent; \
\
/* \
@@ -286,8 +285,9 @@ a_prefix##remove(a_ph_type *ph, a_type *phn) \
\
/* Get parent (if phn is leftmost child) before mutating. */ \
if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
- if (phn_lchild_get(a_type, a_field, parent) != phn) \
+ if (phn_lchild_get(a_type, a_field, parent) != phn) { \
parent = NULL; \
+ } \
} \
/* Find a possible replacement node, and link to parent. */ \
ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
diff --git a/include/jemalloc/internal/prng_inlines.h b/include/jemalloc/internal/prng_inlines.h
index 8cc19ce..124b1ba 100644
--- a/include/jemalloc/internal/prng_inlines.h
+++ b/include/jemalloc/internal/prng_inlines.h
@@ -18,20 +18,17 @@ size_t prng_range_zu(size_t *state, size_t range, bool atomic);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_))
JEMALLOC_ALWAYS_INLINE uint32_t
-prng_state_next_u32(uint32_t state)
-{
+prng_state_next_u32(uint32_t state) {
return ((state * PRNG_A_32) + PRNG_C_32);
}
JEMALLOC_ALWAYS_INLINE uint64_t
-prng_state_next_u64(uint64_t state)
-{
+prng_state_next_u64(uint64_t state) {
return ((state * PRNG_A_64) + PRNG_C_64);
}
JEMALLOC_ALWAYS_INLINE size_t
-prng_state_next_zu(size_t state)
-{
+prng_state_next_zu(size_t state) {
#if LG_SIZEOF_PTR == 2
return ((state * PRNG_A_32) + PRNG_C_32);
#elif LG_SIZEOF_PTR == 3
@@ -42,8 +39,7 @@ prng_state_next_zu(size_t state)
}
JEMALLOC_ALWAYS_INLINE uint32_t
-prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic)
-{
+prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic) {
uint32_t ret, state1;
assert(lg_range > 0);
@@ -67,8 +63,7 @@ prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic)
/* 64-bit atomic operations cannot be supported on all relevant platforms. */
JEMALLOC_ALWAYS_INLINE uint64_t
-prng_lg_range_u64(uint64_t *state, unsigned lg_range)
-{
+prng_lg_range_u64(uint64_t *state, unsigned lg_range) {
uint64_t ret, state1;
assert(lg_range > 0);
@@ -82,8 +77,7 @@ prng_lg_range_u64(uint64_t *state, unsigned lg_range)
}
JEMALLOC_ALWAYS_INLINE size_t
-prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic)
-{
+prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic) {
size_t ret, state1;
assert(lg_range > 0);
@@ -106,8 +100,7 @@ prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic)
}
JEMALLOC_ALWAYS_INLINE uint32_t
-prng_range_u32(uint32_t *state, uint32_t range, bool atomic)
-{
+prng_range_u32(uint32_t *state, uint32_t range, bool atomic) {
uint32_t ret;
unsigned lg_range;
@@ -125,8 +118,7 @@ prng_range_u32(uint32_t *state, uint32_t range, bool atomic)
}
JEMALLOC_ALWAYS_INLINE uint64_t
-prng_range_u64(uint64_t *state, uint64_t range)
-{
+prng_range_u64(uint64_t *state, uint64_t range) {
uint64_t ret;
unsigned lg_range;
@@ -144,8 +136,7 @@ prng_range_u64(uint64_t *state, uint64_t range)
}
JEMALLOC_ALWAYS_INLINE size_t
-prng_range_zu(size_t *state, size_t range, bool atomic)
-{
+prng_range_zu(size_t *state, size_t range, bool atomic) {
size_t ret;
unsigned lg_range;
diff --git a/include/jemalloc/internal/prof_inlines.h b/include/jemalloc/internal/prof_inlines.h
index 394b7b3..bb9093a 100644
--- a/include/jemalloc/internal/prof_inlines.h
+++ b/include/jemalloc/internal/prof_inlines.h
@@ -27,8 +27,7 @@ void prof_free(tsd_t *tsd, const extent_t *extent, const void *ptr,
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
JEMALLOC_ALWAYS_INLINE bool
-prof_active_get_unlocked(void)
-{
+prof_active_get_unlocked(void) {
/*
* Even if opt_prof is true, sampling can be temporarily disabled by
* setting prof_active to false. No locking is used when reading
@@ -39,8 +38,7 @@ prof_active_get_unlocked(void)
}
JEMALLOC_ALWAYS_INLINE bool
-prof_gdump_get_unlocked(void)
-{
+prof_gdump_get_unlocked(void) {
/*
* No locking is used when reading prof_gdump_val in the fast path, so
* there are no guarantees regarding how long it will take for all
@@ -50,8 +48,7 @@ prof_gdump_get_unlocked(void)
}
JEMALLOC_ALWAYS_INLINE prof_tdata_t *
-prof_tdata_get(tsd_t *tsd, bool create)
-{
+prof_tdata_get(tsd_t *tsd, bool create) {
prof_tdata_t *tdata;
cassert(config_prof);
@@ -74,8 +71,7 @@ prof_tdata_get(tsd_t *tsd, bool create)
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
-prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
-{
+prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr) {
cassert(config_prof);
assert(ptr != NULL);
@@ -84,8 +80,7 @@ prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
JEMALLOC_ALWAYS_INLINE void
prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr, size_t usize,
- prof_tctx_t *tctx)
-{
+ prof_tctx_t *tctx) {
cassert(config_prof);
assert(ptr != NULL);
@@ -94,8 +89,7 @@ prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr, size_t usize,
JEMALLOC_ALWAYS_INLINE void
prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
- prof_tctx_t *tctx)
-{
+ prof_tctx_t *tctx) {
cassert(config_prof);
assert(ptr != NULL);
@@ -104,37 +98,40 @@ prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
JEMALLOC_ALWAYS_INLINE bool
prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
- prof_tdata_t **tdata_out)
-{
+ prof_tdata_t **tdata_out) {
prof_tdata_t *tdata;
cassert(config_prof);
tdata = prof_tdata_get(tsd, true);
- if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX))
+ if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) {
tdata = NULL;
+ }
- if (tdata_out != NULL)
+ if (tdata_out != NULL) {
*tdata_out = tdata;
+ }
- if (unlikely(tdata == NULL))
+ if (unlikely(tdata == NULL)) {
return (true);
+ }
if (likely(tdata->bytes_until_sample >= usize)) {
- if (update)
+ if (update) {
tdata->bytes_until_sample -= usize;
+ }
return (true);
} else {
/* Compute new sample threshold. */
- if (update)
+ if (update) {
prof_sample_threshold_update(tdata);
+ }
return (!tdata->active);
}
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
-prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
-{
+prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) {
prof_tctx_t *ret;
prof_tdata_t *tdata;
prof_bt_t bt;
@@ -142,9 +139,9 @@ prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
assert(usize == s2u(usize));
if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
- &tdata)))
+ &tdata))) {
ret = (prof_tctx_t *)(uintptr_t)1U;
- else {
+ } else {
bt_init(&bt, tdata->vec);
prof_backtrace(&bt);
ret = prof_lookup(tsd, &bt);
@@ -155,15 +152,14 @@ prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
JEMALLOC_ALWAYS_INLINE void
prof_malloc(tsdn_t *tsdn, extent_t *extent, const void *ptr, size_t usize,
- prof_tctx_t *tctx)
-{
+ prof_tctx_t *tctx) {
cassert(config_prof);
assert(ptr != NULL);
assert(usize == isalloc(tsdn, extent, ptr));
- if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
+ if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
prof_malloc_sample_object(tsdn, extent, ptr, usize, tctx);
- else {
+ } else {
prof_tctx_set(tsdn, extent, ptr, usize,
(prof_tctx_t *)(uintptr_t)1U);
}
@@ -172,8 +168,7 @@ prof_malloc(tsdn_t *tsdn, extent_t *extent, const void *ptr, size_t usize,
JEMALLOC_ALWAYS_INLINE void
prof_realloc(tsd_t *tsd, extent_t *extent, const void *ptr, size_t usize,
prof_tctx_t *tctx, bool prof_active, bool updated, extent_t *old_extent,
- const void *old_ptr, size_t old_usize, prof_tctx_t *old_tctx)
-{
+ const void *old_ptr, size_t old_usize, prof_tctx_t *old_tctx) {
bool sampled, old_sampled, moved;
cassert(config_prof);
@@ -230,15 +225,15 @@ prof_realloc(tsd_t *tsd, extent_t *extent, const void *ptr, size_t usize,
}
JEMALLOC_ALWAYS_INLINE void
-prof_free(tsd_t *tsd, const extent_t *extent, const void *ptr, size_t usize)
-{
+prof_free(tsd_t *tsd, const extent_t *extent, const void *ptr, size_t usize) {
prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), extent, ptr);
cassert(config_prof);
assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr));
- if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
+ if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
prof_free_sampled_object(tsd, usize, tctx);
+ }
}
#endif
diff --git a/include/jemalloc/internal/qr.h b/include/jemalloc/internal/qr.h
index 06dfdaf..a04f750 100644
--- a/include/jemalloc/internal/qr.h
+++ b/include/jemalloc/internal/qr.h
@@ -25,14 +25,12 @@ struct { \
(a_qrelm)->a_field.qre_prev = (a_qr); \
} while (0)
-#define qr_after_insert(a_qrelm, a_qr, a_field) \
- do \
- { \
+#define qr_after_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
(a_qr)->a_field.qre_prev = (a_qrelm); \
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
(a_qrelm)->a_field.qre_next = (a_qr); \
- } while (0)
+} while (0)
#define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \
a_type *t; \
diff --git a/include/jemalloc/internal/rb.h b/include/jemalloc/internal/rb.h
index 3770342..a4b5a65 100644
--- a/include/jemalloc/internal/rb.h
+++ b/include/jemalloc/internal/rb.h
@@ -550,8 +550,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* Find node's successor, in preparation for swap. */ \
pathp->cmp = 1; \
nodep = pathp; \
- for (pathp++; pathp->node != NULL; \
- pathp++) { \
+ for (pathp++; pathp->node != NULL; pathp++) { \
pathp->cmp = -1; \
pathp[1].node = rbtn_left_get(a_type, a_field, \
pathp->node); \
diff --git a/include/jemalloc/internal/rtree_inlines.h b/include/jemalloc/internal/rtree_inlines.h
index 7e79a6a..9e512e9 100644
--- a/include/jemalloc/internal/rtree_inlines.h
+++ b/include/jemalloc/internal/rtree_inlines.h
@@ -37,12 +37,12 @@ void rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
JEMALLOC_ALWAYS_INLINE unsigned
-rtree_start_level(const rtree_t *rtree, uintptr_t key)
-{
+rtree_start_level(const rtree_t *rtree, uintptr_t key) {
unsigned start_level;
- if (unlikely(key == 0))
+ if (unlikely(key == 0)) {
return (rtree->height - 1);
+ }
start_level = rtree->start_level[(lg_floor(key) + 1) >>
LG_RTREE_BITS_PER_LEVEL];
@@ -52,8 +52,7 @@ rtree_start_level(const rtree_t *rtree, uintptr_t key)
JEMALLOC_ALWAYS_INLINE unsigned
rtree_ctx_start_level(const rtree_t *rtree, const rtree_ctx_t *rtree_ctx,
- uintptr_t key)
-{
+ uintptr_t key) {
unsigned start_level;
uintptr_t key_diff;
@@ -72,48 +71,45 @@ rtree_ctx_start_level(const rtree_t *rtree, const rtree_ctx_t *rtree_ctx,
}
JEMALLOC_ALWAYS_INLINE uintptr_t
-rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
-{
+rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level) {
return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
rtree->levels[level].cumbits)) & ((ZU(1) <<
rtree->levels[level].bits) - 1));
}
JEMALLOC_ALWAYS_INLINE bool
-rtree_node_valid(rtree_elm_t *node)
-{
+rtree_node_valid(rtree_elm_t *node) {
return ((uintptr_t)node != (uintptr_t)0);
}
JEMALLOC_ALWAYS_INLINE rtree_elm_t *
-rtree_child_tryread(rtree_elm_t *elm, bool dependent)
-{
+rtree_child_tryread(rtree_elm_t *elm, bool dependent) {
rtree_elm_t *child;
/* Double-checked read (first read may be stale). */
child = elm->child;
- if (!dependent && !rtree_node_valid(child))
+ if (!dependent && !rtree_node_valid(child)) {
child = (rtree_elm_t *)atomic_read_p(&elm->pun);
+ }
assert(!dependent || child != NULL);
return (child);
}
JEMALLOC_ALWAYS_INLINE rtree_elm_t *
rtree_child_read(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *elm, unsigned level,
- bool dependent)
-{
+ bool dependent) {
rtree_elm_t *child;
child = rtree_child_tryread(elm, dependent);
- if (!dependent && unlikely(!rtree_node_valid(child)))
+ if (!dependent && unlikely(!rtree_node_valid(child))) {
child = rtree_child_read_hard(tsdn, rtree, elm, level);
+ }
assert(!dependent || child != NULL);
return (child);
}
JEMALLOC_ALWAYS_INLINE extent_t *
-rtree_elm_read(rtree_elm_t *elm, bool dependent)
-{
+rtree_elm_read(rtree_elm_t *elm, bool dependent) {
extent_t *extent;
if (dependent) {
@@ -140,14 +136,12 @@ rtree_elm_read(rtree_elm_t *elm, bool dependent)
}
JEMALLOC_INLINE void
-rtree_elm_write(rtree_elm_t *elm, const extent_t *extent)
-{
+rtree_elm_write(rtree_elm_t *elm, const extent_t *extent) {
atomic_write_p(&elm->pun, extent);
}
JEMALLOC_ALWAYS_INLINE rtree_elm_t *
-rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent)
-{
+rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent) {
rtree_elm_t *subtree;
/* Double-checked read (first read may be stale). */
@@ -161,21 +155,21 @@ rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent)
}
JEMALLOC_ALWAYS_INLINE rtree_elm_t *
-rtree_subtree_read(tsdn_t *tsdn, rtree_t *rtree, unsigned level, bool dependent)
-{
+rtree_subtree_read(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
+ bool dependent) {
rtree_elm_t *subtree;
subtree = rtree_subtree_tryread(rtree, level, dependent);
- if (!dependent && unlikely(!rtree_node_valid(subtree)))
+ if (!dependent && unlikely(!rtree_node_valid(subtree))) {
subtree = rtree_subtree_read_hard(tsdn, rtree, level);
+ }
assert(!dependent || subtree != NULL);
return (subtree);
}
JEMALLOC_ALWAYS_INLINE rtree_elm_t *
rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, bool dependent, bool init_missing)
-{
+ uintptr_t key, bool dependent, bool init_missing) {
uintptr_t subkey;
unsigned start_level;
rtree_elm_t *node;
@@ -184,9 +178,9 @@ rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
if (dependent || init_missing) {
if (likely(rtree_ctx->valid)) {
- if (key == rtree_ctx->key)
+ if (key == rtree_ctx->key) {
return (rtree_ctx->elms[rtree->height]);
- else {
+ } else {
unsigned no_ctx_start_level =
rtree_start_level(rtree, key);
unsigned ctx_start_level;
@@ -237,8 +231,9 @@ rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
case level: \
assert(level < (RTREE_HEIGHT_MAX-1)); \
if (!dependent && unlikely(!rtree_node_valid(node))) { \
- if (init_missing) \
+ if (init_missing) { \
rtree_ctx->valid = false; \
+ } \
return (NULL); \
} \
subkey = rtree_subkey(rtree, key, level - \
@@ -255,8 +250,9 @@ rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
case level: \
assert(level == (RTREE_HEIGHT_MAX-1)); \
if (!dependent && unlikely(!rtree_node_valid(node))) { \
- if (init_missing) \
+ if (init_missing) { \
rtree_ctx->valid = false; \
+ } \
return (NULL); \
} \
subkey = rtree_subkey(rtree, key, level - \
@@ -330,16 +326,16 @@ rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
JEMALLOC_INLINE bool
rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
- const extent_t *extent)
-{
+ const extent_t *extent) {
rtree_elm_t *elm;
assert(extent != NULL); /* Use rtree_clear() for this case. */
assert(((uintptr_t)extent & (uintptr_t)0x1) == (uintptr_t)0x0);
elm = rtree_elm_lookup(tsdn, rtree, rtree_ctx, key, false, true);
- if (elm == NULL)
+ if (elm == NULL) {
return (true);
+ }
assert(rtree_elm_read(elm, false) == NULL);
rtree_elm_write(elm, extent);
@@ -348,27 +344,27 @@ rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
JEMALLOC_ALWAYS_INLINE extent_t *
rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
- bool dependent)
-{
+ bool dependent) {
rtree_elm_t *elm;
elm = rtree_elm_lookup(tsdn, rtree, rtree_ctx, key, dependent, false);
- if (elm == NULL)
+ if (elm == NULL) {
return (NULL);
+ }
return (rtree_elm_read(elm, dependent));
}
JEMALLOC_INLINE rtree_elm_t *
rtree_elm_acquire(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, bool dependent, bool init_missing)
-{
+ uintptr_t key, bool dependent, bool init_missing) {
rtree_elm_t *elm;
elm = rtree_elm_lookup(tsdn, rtree, rtree_ctx, key, dependent,
init_missing);
- if (!dependent && elm == NULL)
+ if (!dependent && elm == NULL) {
return (NULL);
+ }
{
extent_t *extent;
void *s;
@@ -380,52 +376,53 @@ rtree_elm_acquire(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
} while (atomic_cas_p(&elm->pun, (void *)extent, s));
}
- if (config_debug)
+ if (config_debug) {
rtree_elm_witness_acquire(tsdn, rtree, key, elm);
+ }
return (elm);
}
JEMALLOC_INLINE extent_t *
-rtree_elm_read_acquired(tsdn_t *tsdn, const rtree_t *rtree, rtree_elm_t *elm)
-{
+rtree_elm_read_acquired(tsdn_t *tsdn, const rtree_t *rtree, rtree_elm_t *elm) {
extent_t *extent;
assert(((uintptr_t)elm->pun & (uintptr_t)0x1) == (uintptr_t)0x1);
extent = (extent_t *)((uintptr_t)elm->pun & ~((uintptr_t)0x1));
assert(((uintptr_t)extent & (uintptr_t)0x1) == (uintptr_t)0x0);
- if (config_debug)
+ if (config_debug) {
rtree_elm_witness_access(tsdn, rtree, elm);
+ }
return (extent);
}
JEMALLOC_INLINE void
rtree_elm_write_acquired(tsdn_t *tsdn, const rtree_t *rtree, rtree_elm_t *elm,
- const extent_t *extent)
-{
+ const extent_t *extent) {
assert(((uintptr_t)extent & (uintptr_t)0x1) == (uintptr_t)0x0);
assert(((uintptr_t)elm->pun & (uintptr_t)0x1) == (uintptr_t)0x1);
- if (config_debug)
+ if (config_debug) {
rtree_elm_witness_access(tsdn, rtree, elm);
+ }
elm->pun = (void *)((uintptr_t)extent | (uintptr_t)0x1);
assert(rtree_elm_read_acquired(tsdn, rtree, elm) == extent);
}
JEMALLOC_INLINE void
-rtree_elm_release(tsdn_t *tsdn, const rtree_t *rtree, rtree_elm_t *elm)
-{
+rtree_elm_release(tsdn_t *tsdn, const rtree_t *rtree, rtree_elm_t *elm) {
rtree_elm_write(elm, rtree_elm_read_acquired(tsdn, rtree, elm));
- if (config_debug)
+ if (config_debug) {
rtree_elm_witness_release(tsdn, rtree, elm);
+ }
}
JEMALLOC_INLINE void
-rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key)
-{
+rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key) {
rtree_elm_t *elm;
elm = rtree_elm_acquire(tsdn, rtree, rtree_ctx, key, true, false);
diff --git a/include/jemalloc/internal/spin_inlines.h b/include/jemalloc/internal/spin_inlines.h
index b4e779f..1ffc423 100644
--- a/include/jemalloc/internal/spin_inlines.h
+++ b/include/jemalloc/internal/spin_inlines.h
@@ -8,21 +8,21 @@ void spin_adaptive(spin_t *spin);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_SPIN_C_))
JEMALLOC_INLINE void
-spin_init(spin_t *spin)
-{
+spin_init(spin_t *spin) {
spin->iteration = 0;
}
JEMALLOC_INLINE void
-spin_adaptive(spin_t *spin)
-{
+spin_adaptive(spin_t *spin) {
volatile uint64_t i;
- for (i = 0; i < (KQU(1) << spin->iteration); i++)
+ for (i = 0; i < (KQU(1) << spin->iteration); i++) {
CPU_SPINWAIT;
+ }
- if (spin->iteration < 63)
+ if (spin->iteration < 63) {
spin->iteration++;
+ }
}
#endif
diff --git a/include/jemalloc/internal/tcache_inlines.h b/include/jemalloc/internal/tcache_inlines.h
index 2762b0e..4721ba3 100644
--- a/include/jemalloc/internal/tcache_inlines.h
+++ b/include/jemalloc/internal/tcache_inlines.h
@@ -21,8 +21,7 @@ tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
JEMALLOC_INLINE void
-tcache_flush(void)
-{
+tcache_flush(void) {
tsd_t *tsd;
cassert(config_tcache);
@@ -32,8 +31,7 @@ tcache_flush(void)
}
JEMALLOC_INLINE bool
-tcache_enabled_get(void)
-{
+tcache_enabled_get(void) {
tsd_t *tsd;
tcache_enabled_t tcache_enabled;
@@ -50,8 +48,7 @@ tcache_enabled_get(void)
}
JEMALLOC_INLINE void
-tcache_enabled_set(bool enabled)
-{
+tcache_enabled_set(bool enabled) {
tsd_t *tsd;
tcache_enabled_t tcache_enabled;
@@ -62,21 +59,23 @@ tcache_enabled_set(bool enabled)
tcache_enabled = (tcache_enabled_t)enabled;
tsd_tcache_enabled_set(tsd, tcache_enabled);
- if (!enabled)
+ if (!enabled) {
tcache_cleanup(tsd);
+ }
}
JEMALLOC_ALWAYS_INLINE tcache_t *
-tcache_get(tsd_t *tsd, bool create)
-{
+tcache_get(tsd_t *tsd, bool create) {
tcache_t *tcache;
- if (!config_tcache)
+ if (!config_tcache) {
return (NULL);
+ }
tcache = tsd_tcache_get(tsd);
- if (!create)
+ if (!create) {
return (tcache);
+ }
if (unlikely(tcache == NULL) && tsd_nominal(tsd)) {
tcache = tcache_get_hard(tsd);
tsd_tcache_set(tsd, tcache);
@@ -86,18 +85,18 @@ tcache_get(tsd_t *tsd, bool create)
}
JEMALLOC_ALWAYS_INLINE void
-tcache_event(tsd_t *tsd, tcache_t *tcache)
-{
- if (TCACHE_GC_INCR == 0)
+tcache_event(tsd_t *tsd, tcache_t *tcache) {
+ if (TCACHE_GC_INCR == 0) {
return;
+ }
- if (unlikely(ticker_tick(&tcache->gc_ticker)))
+ if (unlikely(ticker_tick(&tcache->gc_ticker))) {
tcache_event_hard(tsd, tcache);
+ }
}
JEMALLOC_ALWAYS_INLINE void *
-tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success)
-{
+tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success) {
void *ret;
if (unlikely(tbin->ncached == 0)) {
@@ -116,16 +115,16 @@ tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success)
ret = *(tbin->avail - tbin->ncached);
tbin->ncached--;
- if (unlikely((int)tbin->ncached < tbin->low_water))
+ if (unlikely((int)tbin->ncached < tbin->low_water)) {
tbin->low_water = tbin->ncached;
+ }
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
- szind_t binind, bool zero, bool slow_path)
-{
+ szind_t binind, bool zero, bool slow_path) {
void *ret;
tcache_bin_t *tbin;
bool tcache_success;
@@ -138,13 +137,15 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
if (unlikely(!tcache_success)) {
bool tcache_hard_success;
arena = arena_choose(tsd, arena);
- if (unlikely(arena == NULL))
+ if (unlikely(arena == NULL)) {
return (NULL);
+ }
ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
tbin, binind, &tcache_hard_success);
- if (tcache_hard_success == false)
+ if (tcache_hard_success == false) {
return (NULL);
+ }
}
assert(ret);
@@ -162,8 +163,9 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
if (unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret,
&arena_bin_info[binind], false);
- } else if (unlikely(opt_zero))
+ } else if (unlikely(opt_zero)) {
memset(ret, 0, usize);
+ }
}
} else {
if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
@@ -173,18 +175,19 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
memset(ret, 0, usize);
}
- if (config_stats)
+ if (config_stats) {
tbin->tstats.nrequests++;
- if (config_prof)
+ }
+ if (config_prof) {
tcache->prof_accumbytes += usize;
+ }
tcache_event(tsd, tcache);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
- szind_t binind, bool zero, bool slow_path)
-{
+ szind_t binind, bool zero, bool slow_path) {
void *ret;
tcache_bin_t *tbin;
bool tcache_success;
@@ -199,12 +202,14 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
* expensive to create one and not use it.
*/
arena = arena_choose(tsd, arena);
- if (unlikely(arena == NULL))
+ if (unlikely(arena == NULL)) {
return (NULL);
+ }
ret = large_malloc(tsd_tsdn(tsd), arena, s2u(size), zero);
- if (ret == NULL)
+ if (ret == NULL) {
return (NULL);
+ }
} else {
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
@@ -220,16 +225,20 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
if (unlikely(opt_junk_alloc)) {
memset(ret, JEMALLOC_ALLOC_JUNK,
usize);
- } else if (unlikely(opt_zero))
+ } else if (unlikely(opt_zero)) {
memset(ret, 0, usize);
+ }
}
- } else
+ } else {
memset(ret, 0, usize);
+ }
- if (config_stats)
+ if (config_stats) {
tbin->tstats.nrequests++;
- if (config_prof)
+ }
+ if (config_prof) {
tcache->prof_accumbytes += usize;
+ }
}
tcache_event(tsd, tcache);
@@ -238,15 +247,15 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
- bool slow_path)
-{
+ bool slow_path) {
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
- if (slow_path && config_fill && unlikely(opt_junk_free))
+ if (slow_path && config_fill && unlikely(opt_junk_free)) {
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
+ }
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
@@ -263,8 +272,7 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
- bool slow_path)
-{
+ bool slow_path) {
szind_t binind;
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
@@ -274,8 +282,9 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
binind = size2index(size);
- if (slow_path && config_fill && unlikely(opt_junk_free))
+ if (slow_path && config_fill && unlikely(opt_junk_free)) {
large_dalloc_junk(ptr, size);
+ }
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
@@ -291,8 +300,7 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
}
JEMALLOC_ALWAYS_INLINE tcache_t *
-tcaches_get(tsd_t *tsd, unsigned ind)
-{
+tcaches_get(tsd_t *tsd, unsigned ind) {
tcaches_t *elm = &tcaches[ind];
if (unlikely(elm->tcache == NULL)) {
elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd,
diff --git a/include/jemalloc/internal/ticker_inlines.h b/include/jemalloc/internal/ticker_inlines.h
index 1a4395f..6cc6134 100644
--- a/include/jemalloc/internal/ticker_inlines.h
+++ b/include/jemalloc/internal/ticker_inlines.h
@@ -11,27 +11,23 @@ bool ticker_tick(ticker_t *ticker);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TICKER_C_))
JEMALLOC_INLINE void
-ticker_init(ticker_t *ticker, int32_t nticks)
-{
+ticker_init(ticker_t *ticker, int32_t nticks) {
ticker->tick = nticks;
ticker->nticks = nticks;
}
JEMALLOC_INLINE void
-ticker_copy(ticker_t *ticker, const ticker_t *other)
-{
+ticker_copy(ticker_t *ticker, const ticker_t *other) {
*ticker = *other;
}
JEMALLOC_INLINE int32_t
-ticker_read(const ticker_t *ticker)
-{
+ticker_read(const ticker_t *ticker) {
return (ticker->tick);
}
JEMALLOC_INLINE bool
-ticker_ticks(ticker_t *ticker, int32_t nticks)
-{
+ticker_ticks(ticker_t *ticker, int32_t nticks) {
if (unlikely(ticker->tick < nticks)) {
ticker->tick = ticker->nticks;
return (true);
@@ -41,8 +37,7 @@ ticker_ticks(ticker_t *ticker, int32_t nticks)
}
JEMALLOC_INLINE bool
-ticker_tick(ticker_t *ticker)
-{
+ticker_tick(ticker_t *ticker) {
return (ticker_ticks(ticker, 1));
}
#endif
diff --git a/include/jemalloc/internal/tsd_inlines.h b/include/jemalloc/internal/tsd_inlines.h
index 0df21ad..2093d61 100644
--- a/include/jemalloc/internal/tsd_inlines.h
+++ b/include/jemalloc/internal/tsd_inlines.h
@@ -25,12 +25,12 @@ malloc_tsd_externs(, tsd_t)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup)
JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_fetch_impl(bool init)
-{
+tsd_fetch_impl(bool init) {
tsd_t *tsd = tsd_get(init);
- if (!init && tsd_get_allocates() && tsd == NULL)
+ if (!init && tsd_get_allocates() && tsd == NULL) {
return (NULL);
+ }
assert(tsd != NULL);
if (unlikely(tsd->state != tsd_state_nominal)) {
@@ -41,47 +41,42 @@ tsd_fetch_impl(bool init)
} else if (tsd->state == tsd_state_purgatory) {
tsd->state = tsd_state_reincarnated;
tsd_set(tsd);
- } else
+ } else {
assert(tsd->state == tsd_state_reincarnated);
+ }
}
return (tsd);
}
JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_fetch(void)
-{
+tsd_fetch(void) {
return (tsd_fetch_impl(true));
}
JEMALLOC_ALWAYS_INLINE tsdn_t *
-tsd_tsdn(tsd_t *tsd)
-{
+tsd_tsdn(tsd_t *tsd) {
return ((tsdn_t *)tsd);
}
JEMALLOC_INLINE bool
-tsd_nominal(tsd_t *tsd)
-{
+tsd_nominal(tsd_t *tsd) {
return (tsd->state == tsd_state_nominal);
}
#define O(n, t, c) \
JEMALLOC_ALWAYS_INLINE t * \
-tsd_##n##p_get(tsd_t *tsd) \
-{ \
+tsd_##n##p_get(tsd_t *tsd) { \
return (&tsd->n); \
} \
\
JEMALLOC_ALWAYS_INLINE t \
-tsd_##n##_get(tsd_t *tsd) \
-{ \
+tsd_##n##_get(tsd_t *tsd) { \
return (*tsd_##n##p_get(tsd)); \
} \
\
JEMALLOC_ALWAYS_INLINE void \
-tsd_##n##_set(tsd_t *tsd, t n) \
-{ \
+tsd_##n##_set(tsd_t *tsd, t n) { \
assert(tsd->state == tsd_state_nominal); \
tsd->n = n; \
}
@@ -89,31 +84,28 @@ MALLOC_TSD
#undef O
JEMALLOC_ALWAYS_INLINE tsdn_t *
-tsdn_fetch(void)
-{
- if (!tsd_booted_get())
+tsdn_fetch(void) {
+ if (!tsd_booted_get()) {
return (NULL);
+ }
return (tsd_tsdn(tsd_fetch_impl(false)));
}
JEMALLOC_ALWAYS_INLINE bool
-tsdn_null(const tsdn_t *tsdn)
-{
+tsdn_null(const tsdn_t *tsdn) {
return (tsdn == NULL);
}
JEMALLOC_ALWAYS_INLINE tsd_t *
-tsdn_tsd(tsdn_t *tsdn)
-{
+tsdn_tsd(tsdn_t *tsdn) {
assert(!tsdn_null(tsdn));
return (&tsdn->tsd);
}
JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
-tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback)
-{
+tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) {
/*
* If tsd cannot be accessed, initialize the fallback rtree_ctx and
* return a pointer to it.
diff --git a/include/jemalloc/internal/tsd_types.h b/include/jemalloc/internal/tsd_types.h
index 17e3da9..ec40d9a 100644
--- a/include/jemalloc/internal/tsd_types.h
+++ b/include/jemalloc/internal/tsd_types.h
@@ -175,8 +175,7 @@ a_attr bool a_name##tsd_booted = false;
a_cleanup) \
/* Initialization/cleanup. */ \
a_attr bool \
-a_name##tsd_cleanup_wrapper(void) \
-{ \
+a_name##tsd_cleanup_wrapper(void) { \
if (a_name##tsd_initialized) { \
a_name##tsd_initialized = false; \
a_cleanup(&a_name##tsd_tls); \
@@ -184,8 +183,7 @@ a_name##tsd_cleanup_wrapper(void) \
return (a_name##tsd_initialized); \
} \
a_attr bool \
-a_name##tsd_boot0(void) \
-{ \
+a_name##tsd_boot0(void) { \
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
&a_name##tsd_cleanup_wrapper); \
@@ -194,96 +192,88 @@ a_name##tsd_boot0(void) \
return (false); \
} \
a_attr void \
-a_name##tsd_boot1(void) \
-{ \
+a_name##tsd_boot1(void) { \
/* Do nothing. */ \
} \
a_attr bool \
-a_name##tsd_boot(void) \
-{ \
+a_name##tsd_boot(void) { \
return (a_name##tsd_boot0()); \
} \
a_attr bool \
-a_name##tsd_booted_get(void) \
-{ \
+a_name##tsd_booted_get(void) { \
return (a_name##tsd_booted); \
} \
a_attr bool \
-a_name##tsd_get_allocates(void) \
-{ \
+a_name##tsd_get_allocates(void) { \
return (false); \
} \
/* Get/set. */ \
a_attr a_type * \
-a_name##tsd_get(bool init) \
-{ \
+a_name##tsd_get(bool init) { \
assert(a_name##tsd_booted); \
return (&a_name##tsd_tls); \
} \
a_attr void \
-a_name##tsd_set(a_type *val) \
-{ \
+a_name##tsd_set(a_type *val) { \
assert(a_name##tsd_booted); \
- if (likely(&a_name##tsd_tls != val)) \
+ if (likely(&a_name##tsd_tls != val)) { \
a_name##tsd_tls = (*val); \
- if (a_cleanup != malloc_tsd_no_cleanup) \
+ } \
+ if (a_cleanup != malloc_tsd_no_cleanup) { \
a_name##tsd_initialized = true; \
+ } \
}
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Initialization/cleanup. */ \
a_attr bool \
-a_name##tsd_boot0(void) \
-{ \
+a_name##tsd_boot0(void) { \
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \
- 0) \
+ 0) { \
return (true); \
+ } \
} \
a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
-a_name##tsd_boot1(void) \
-{ \
+a_name##tsd_boot1(void) { \
/* Do nothing. */ \
} \
a_attr bool \
-a_name##tsd_boot(void) \
-{ \
+a_name##tsd_boot(void) { \
return (a_name##tsd_boot0()); \
} \
a_attr bool \
-a_name##tsd_booted_get(void) \
-{ \
+a_name##tsd_booted_get(void) { \
return (a_name##tsd_booted); \
} \
a_attr bool \
-a_name##tsd_get_allocates(void) \
-{ \
+a_name##tsd_get_allocates(void) { \
return (false); \
} \
/* Get/set. */ \
a_attr a_type * \
-a_name##tsd_get(bool init) \
-{ \
+a_name##tsd_get(bool init) { \
assert(a_name##tsd_booted); \
return (&a_name##tsd_tls); \
} \
a_attr void \
-a_name##tsd_set(a_type *val) \
-{ \
+a_name##tsd_set(a_type *val) { \
assert(a_name##tsd_booted); \
- if (likely(&a_name##tsd_tls != val)) \
+ if (likely(&a_name##tsd_tls != val)) { \
a_name##tsd_tls = (*val); \
+ } \
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_setspecific(a_name##tsd_tsd, \
(void *)(&a_name##tsd_tls))) { \
malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \
- if (opt_abort) \
+ if (opt_abort) { \
abort(); \
+ } \
} \
} \
}
@@ -292,15 +282,15 @@ a_name##tsd_set(a_type *val) \
a_cleanup) \
/* Initialization/cleanup. */ \
a_attr bool \
-a_name##tsd_cleanup_wrapper(void) \
-{ \
+a_name##tsd_cleanup_wrapper(void) { \
DWORD error = GetLastError(); \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
TlsGetValue(a_name##tsd_tsd); \
SetLastError(error); \
\
- if (wrapper == NULL) \
+ if (wrapper == NULL) { \
return (false); \
+ } \
if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \
wrapper->initialized = false; \
@@ -314,8 +304,7 @@ a_name##tsd_cleanup_wrapper(void) \
return (false); \
} \
a_attr void \
-a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
-{ \
+a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) { \
if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \
malloc_write("<jemalloc>: Error setting" \
" TSD for "#a_name"\n"); \
@@ -323,8 +312,7 @@ a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
} \
} \
a_attr a_name##tsd_wrapper_t * \
-a_name##tsd_wrapper_get(bool init) \
-{ \
+a_name##tsd_wrapper_get(bool init) { \
DWORD error = GetLastError(); \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
TlsGetValue(a_name##tsd_tsd); \
@@ -346,11 +334,11 @@ a_name##tsd_wrapper_get(bool init) \
return (wrapper); \
} \
a_attr bool \
-a_name##tsd_boot0(void) \
-{ \
+a_name##tsd_boot0(void) { \
a_name##tsd_tsd = TlsAlloc(); \
- if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) \
+ if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) { \
return (true); \
+ } \
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
&a_name##tsd_cleanup_wrapper); \
@@ -360,8 +348,7 @@ a_name##tsd_boot0(void) \
return (false); \
} \
a_attr void \
-a_name##tsd_boot1(void) \
-{ \
+a_name##tsd_boot1(void) { \
a_name##tsd_wrapper_t *wrapper; \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
@@ -375,54 +362,52 @@ a_name##tsd_boot1(void) \
a_name##tsd_wrapper_set(wrapper); \
} \
a_attr bool \
-a_name##tsd_boot(void) \
-{ \
- if (a_name##tsd_boot0()) \
+a_name##tsd_boot(void) { \
+ if (a_name##tsd_boot0()) { \
return (true); \
+ } \
a_name##tsd_boot1(); \
return (false); \
} \
a_attr bool \
-a_name##tsd_booted_get(void) \
-{ \
+a_name##tsd_booted_get(void) { \
return (a_name##tsd_booted); \
} \
a_attr bool \
-a_name##tsd_get_allocates(void) \
-{ \
+a_name##tsd_get_allocates(void) { \
return (true); \
} \
/* Get/set. */ \
a_attr a_type * \
-a_name##tsd_get(bool init) \
-{ \
+a_name##tsd_get(bool init) { \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(init); \
- if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
+ if (a_name##tsd_get_allocates() && !init && wrapper == NULL) { \
return (NULL); \
+ } \
return (&wrapper->val); \
} \
a_attr void \
-a_name##tsd_set(a_type *val) \
-{ \
+a_name##tsd_set(a_type *val) { \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(true); \
- if (likely(&wrapper->val != val)) \
+ if (likely(&wrapper->val != val)) { \
wrapper->val = *(val); \
- if (a_cleanup != malloc_tsd_no_cleanup) \
+ } \
+ if (a_cleanup != malloc_tsd_no_cleanup) { \
wrapper->initialized = true; \
+ } \
}
#else
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Initialization/cleanup. */ \
a_attr void \
-a_name##tsd_cleanup_wrapper(void *arg) \
-{ \
+a_name##tsd_cleanup_wrapper(void *arg) { \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)arg; \
\
if (a_cleanup != malloc_tsd_no_cleanup && \
@@ -435,8 +420,9 @@ a_name##tsd_cleanup_wrapper(void *arg) \
(void *)wrapper)) { \
malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \
- if (opt_abort) \
+ if (opt_abort) { \
abort(); \
+ } \
} \
return; \
} \
@@ -444,8 +430,7 @@ a_name##tsd_cleanup_wrapper(void *arg) \
malloc_tsd_dalloc(wrapper); \
} \
a_attr void \
-a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
-{ \
+a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) { \
if (pthread_setspecific(a_name##tsd_tsd, \
(void *)wrapper)) { \
malloc_write("<jemalloc>: Error setting" \
@@ -454,8 +439,7 @@ a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
} \
} \
a_attr a_name##tsd_wrapper_t * \
-a_name##tsd_wrapper_get(bool init) \
-{ \
+a_name##tsd_wrapper_get(bool init) { \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
pthread_getspecific(a_name##tsd_tsd); \
\
@@ -464,8 +448,9 @@ a_name##tsd_wrapper_get(bool init) \
wrapper = (a_name##tsd_wrapper_t *) \
tsd_init_check_recursion(&a_name##tsd_init_head, \
&block); \
- if (wrapper) \
- return (wrapper); \
+ if (wrapper) { \
+ return (wrapper); \
+ } \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
block.data = (void *)wrapper; \
@@ -483,18 +468,17 @@ a_name##tsd_wrapper_get(bool init) \
return (wrapper); \
} \
a_attr bool \
-a_name##tsd_boot0(void) \
-{ \
+a_name##tsd_boot0(void) { \
if (pthread_key_create(&a_name##tsd_tsd, \
- a_name##tsd_cleanup_wrapper) != 0) \
+ a_name##tsd_cleanup_wrapper) != 0) { \
return (true); \
+ } \
a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
a_name##tsd_booted = true; \
return (false); \
} \
a_attr void \
-a_name##tsd_boot1(void) \
-{ \
+a_name##tsd_boot1(void) { \
a_name##tsd_wrapper_t *wrapper; \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
@@ -508,46 +492,45 @@ a_name##tsd_boot1(void) \
a_name##tsd_wrapper_set(wrapper); \
} \
a_attr bool \
-a_name##tsd_boot(void) \
-{ \
- if (a_name##tsd_boot0()) \
+a_name##tsd_boot(void) { \
+ if (a_name##tsd_boot0()) { \
return (true); \
+ } \
a_name##tsd_boot1(); \
return (false); \
} \
a_attr bool \
-a_name##tsd_booted_get(void) \
-{ \
+a_name##tsd_booted_get(void) { \
return (a_name##tsd_booted); \
} \
a_attr bool \
-a_name##tsd_get_allocates(void) \
-{ \
+a_name##tsd_get_allocates(void) { \
return (true); \
} \
/* Get/set. */ \
a_attr a_type * \
-a_name##tsd_get(bool init) \
-{ \
+a_name##tsd_get(bool init) { \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(init); \
- if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
+ if (a_name##tsd_get_allocates() && !init && wrapper == NULL) { \
return (NULL); \
+ } \
return (&wrapper->val); \
} \
a_attr void \
-a_name##tsd_set(a_type *val) \
-{ \
+a_name##tsd_set(a_type *val) { \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(true); \
- if (likely(&wrapper->val != val)) \
+ if (likely(&wrapper->val != val)) { \
wrapper->val = *(val); \
- if (a_cleanup != malloc_tsd_no_cleanup) \
+ } \
+ if (a_cleanup != malloc_tsd_no_cleanup) { \
wrapper->initialized = true; \
+ } \
}
#endif
diff --git a/include/jemalloc/internal/util_inlines.h b/include/jemalloc/internal/util_inlines.h
index 4ceed06..271673a 100644
--- a/include/jemalloc/internal/util_inlines.h
+++ b/include/jemalloc/internal/util_inlines.h
@@ -25,26 +25,22 @@ int get_errno(void);
#endif
JEMALLOC_ALWAYS_INLINE unsigned
-ffs_llu(unsigned long long bitmap)
-{
+ffs_llu(unsigned long long bitmap) {
return (JEMALLOC_INTERNAL_FFSLL(bitmap));
}
JEMALLOC_ALWAYS_INLINE unsigned
-ffs_lu(unsigned long bitmap)
-{
+ffs_lu(unsigned long bitmap) {
return (JEMALLOC_INTERNAL_FFSL(bitmap));
}
JEMALLOC_ALWAYS_INLINE unsigned
-ffs_u(unsigned bitmap)
-{
+ffs_u(unsigned bitmap) {
return (JEMALLOC_INTERNAL_FFS(bitmap));
}
JEMALLOC_ALWAYS_INLINE unsigned
-ffs_zu(size_t bitmap)
-{
+ffs_zu(size_t bitmap) {
#if LG_SIZEOF_PTR == LG_SIZEOF_INT
return (ffs_u(bitmap));
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
@@ -57,8 +53,7 @@ ffs_zu(size_t bitmap)
}
JEMALLOC_ALWAYS_INLINE unsigned
-ffs_u64(uint64_t bitmap)
-{
+ffs_u64(uint64_t bitmap) {
#if LG_SIZEOF_LONG == 3
return (ffs_lu(bitmap));
#elif LG_SIZEOF_LONG_LONG == 3
@@ -69,8 +64,7 @@ ffs_u64(uint64_t bitmap)
}
JEMALLOC_ALWAYS_INLINE unsigned
-ffs_u32(uint32_t bitmap)
-{
+ffs_u32(uint32_t bitmap) {
#if LG_SIZEOF_INT == 2
return (ffs_u(bitmap));
#else
@@ -80,8 +74,7 @@ ffs_u32(uint32_t bitmap)
}
JEMALLOC_INLINE uint64_t
-pow2_ceil_u64(uint64_t x)
-{
+pow2_ceil_u64(uint64_t x) {
x--;
x |= x >> 1;
x |= x >> 2;
@@ -94,8 +87,7 @@ pow2_ceil_u64(uint64_t x)
}
JEMALLOC_INLINE uint32_t
-pow2_ceil_u32(uint32_t x)
-{
+pow2_ceil_u32(uint32_t x) {
x--;
x |= x >> 1;
x |= x >> 2;
@@ -108,8 +100,7 @@ pow2_ceil_u32(uint32_t x)
/* Compute the smallest power of 2 that is >= x. */
JEMALLOC_INLINE size_t
-pow2_ceil_zu(size_t x)
-{
+pow2_ceil_zu(size_t x) {
#if (LG_SIZEOF_PTR == 3)
return (pow2_ceil_u64(x));
#else
@@ -119,8 +110,7 @@ pow2_ceil_zu(size_t x)
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE unsigned
-lg_floor(size_t x)
-{
+lg_floor(size_t x) {
size_t ret;
assert(x != 0);
@@ -134,8 +124,7 @@ lg_floor(size_t x)
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE unsigned
-lg_floor(size_t x)
-{
+lg_floor(size_t x) {
unsigned long ret;
assert(x != 0);
@@ -152,8 +141,7 @@ lg_floor(size_t x)
}
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
JEMALLOC_INLINE unsigned
-lg_floor(size_t x)
-{
+lg_floor(size_t x) {
assert(x != 0);
#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
@@ -166,8 +154,7 @@ lg_floor(size_t x)
}
#else
JEMALLOC_INLINE unsigned
-lg_floor(size_t x)
-{
+lg_floor(size_t x) {
assert(x != 0);
x |= (x >> 1);
@@ -178,8 +165,9 @@ lg_floor(size_t x)
#if (LG_SIZEOF_PTR == 3)
x |= (x >> 32);
#endif
- if (x == SIZE_T_MAX)
+ if (x == SIZE_T_MAX) {
return ((8 << LG_SIZEOF_PTR) - 1);
+ }
x++;
return (ffs_zu(x) - 2);
}
@@ -187,8 +175,7 @@ lg_floor(size_t x)
/* Set error code. */
JEMALLOC_INLINE void
-set_errno(int errnum)
-{
+set_errno(int errnum) {
#ifdef _WIN32
SetLastError(errnum);
#else
@@ -198,8 +185,7 @@ set_errno(int errnum)
/* Get last error code. */
JEMALLOC_INLINE int
-get_errno(void)
-{
+get_errno(void) {
#ifdef _WIN32
return (GetLastError());
#else
diff --git a/include/jemalloc/internal/util_types.h b/include/jemalloc/internal/util_types.h
index 7f72799..4fe206b 100644
--- a/include/jemalloc/internal/util_types.h
+++ b/include/jemalloc/internal/util_types.h
@@ -87,8 +87,9 @@
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
#define cassert(c) do { \
- if (unlikely(!(c))) \
+ if (unlikely(!(c))) { \
not_reached(); \
+ } \
} while (0)
#endif /* JEMALLOC_INTERNAL_UTIL_TYPES_H */
diff --git a/include/jemalloc/internal/witness_inlines.h b/include/jemalloc/internal/witness_inlines.h
index 259aa2e..2e5ebcc 100644
--- a/include/jemalloc/internal/witness_inlines.h
+++ b/include/jemalloc/internal/witness_inlines.h
@@ -13,8 +13,7 @@ void witness_unlock(tsdn_t *tsdn, witness_t *witness);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
/* Helper, not intended for direct use. */
JEMALLOC_INLINE bool
-witness_owner(tsd_t *tsd, const witness_t *witness)
-{
+witness_owner(tsd_t *tsd, const witness_t *witness) {
witness_list_t *witnesses;
witness_t *w;
@@ -22,90 +21,101 @@ witness_owner(tsd_t *tsd, const witness_t *witness)
witnesses = tsd_witnessesp_get(tsd);
ql_foreach(w, witnesses, link) {
- if (w == witness)
+ if (w == witness) {
return (true);
+ }
}
return (false);
}
JEMALLOC_INLINE void
-witness_assert_owner(tsdn_t *tsdn, const witness_t *witness)
-{
+witness_assert_owner(tsdn_t *tsdn, const witness_t *witness) {
tsd_t *tsd;
- if (!config_debug)
+ if (!config_debug) {
return;
+ }
- if (tsdn_null(tsdn))
+ if (tsdn_null(tsdn)) {
return;
+ }
tsd = tsdn_tsd(tsdn);
- if (witness->rank == WITNESS_RANK_OMIT)
+ if (witness->rank == WITNESS_RANK_OMIT) {
return;
+ }
- if (witness_owner(tsd, witness))
+ if (witness_owner(tsd, witness)) {
return;
+ }
witness_owner_error(witness);
}
JEMALLOC_INLINE void
-witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness)
-{
+witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness) {
tsd_t *tsd;
witness_list_t *witnesses;
witness_t *w;
- if (!config_debug)
+ if (!config_debug) {
return;
+ }
- if (tsdn_null(tsdn))
+ if (tsdn_null(tsdn)) {
return;
+ }
tsd = tsdn_tsd(tsdn);
- if (witness->rank == WITNESS_RANK_OMIT)
+ if (witness->rank == WITNESS_RANK_OMIT) {
return;
+ }
witnesses = tsd_witnessesp_get(tsd);
ql_foreach(w, witnesses, link) {
- if (w == witness)
+ if (w == witness) {
witness_not_owner_error(witness);
+ }
}
}
JEMALLOC_INLINE void
-witness_assert_lockless(tsdn_t *tsdn)
-{
+witness_assert_lockless(tsdn_t *tsdn) {
tsd_t *tsd;
witness_list_t *witnesses;
witness_t *w;
- if (!config_debug)
+ if (!config_debug) {
return;
+ }
- if (tsdn_null(tsdn))
+ if (tsdn_null(tsdn)) {
return;
+ }
tsd = tsdn_tsd(tsdn);
witnesses = tsd_witnessesp_get(tsd);
w = ql_last(witnesses, link);
- if (w != NULL)
+ if (w != NULL) {
witness_lockless_error(witnesses);
+ }
}
JEMALLOC_INLINE void
-witness_lock(tsdn_t *tsdn, witness_t *witness)
-{
+witness_lock(tsdn_t *tsdn, witness_t *witness) {
tsd_t *tsd;
witness_list_t *witnesses;
witness_t *w;
- if (!config_debug)
+ if (!config_debug) {
return;
+ }
- if (tsdn_null(tsdn))
+ if (tsdn_null(tsdn)) {
return;
+ }
tsd = tsdn_tsd(tsdn);
- if (witness->rank == WITNESS_RANK_OMIT)
+ if (witness->rank == WITNESS_RANK_OMIT) {
return;
+ }
witness_assert_not_owner(tsdn, witness);
@@ -133,19 +143,21 @@ witness_lock(tsdn_t *tsdn, witness_t *witness)
}
JEMALLOC_INLINE void
-witness_unlock(tsdn_t *tsdn, witness_t *witness)
-{
+witness_unlock(tsdn_t *tsdn, witness_t *witness) {
tsd_t *tsd;
witness_list_t *witnesses;
- if (!config_debug)
+ if (!config_debug) {
return;
+ }
- if (tsdn_null(tsdn))
+ if (tsdn_null(tsdn)) {
return;
+ }
tsd = tsdn_tsd(tsdn);
- if (witness->rank == WITNESS_RANK_OMIT)
+ if (witness->rank == WITNESS_RANK_OMIT) {
return;
+ }
/*
* Check whether owner before removal, rather than relying on
@@ -155,8 +167,9 @@ witness_unlock(tsdn_t *tsdn, witness_t *witness)
if (witness_owner(tsd, witness)) {
witnesses = tsd_witnessesp_get(tsd);
ql_remove(witnesses, witness, link);
- } else
+ } else {
witness_assert_owner(tsdn, witness);
+ }
}
#endif
diff --git a/include/msvc_compat/strings.h b/include/msvc_compat/strings.h
index 47998be..971b36d 100644
--- a/include/msvc_compat/strings.h
+++ b/include/msvc_compat/strings.h
@@ -6,17 +6,16 @@
#ifdef _MSC_VER
# include <intrin.h>
# pragma intrinsic(_BitScanForward)
-static __forceinline int ffsl(long x)
-{
+static __forceinline int ffsl(long x) {
unsigned long i;
- if (_BitScanForward(&i, x))
+ if (_BitScanForward(&i, x)) {
return (i + 1);
+ }
return (0);
}
-static __forceinline int ffs(int x)
-{
+static __forceinline int ffs(int x) {
return (ffsl(x));
}
@@ -24,12 +23,12 @@ static __forceinline int ffs(int x)
# pragma intrinsic(_BitScanForward64)
# endif
-static __forceinline int ffsll(unsigned __int64 x)
-{
+static __forceinline int ffsll(unsigned __int64 x) {
unsigned long i;
#ifdef _M_X64
- if (_BitScanForward64(&i, x))
+ if (_BitScanForward64(&i, x)) {
return (i + 1);
+ }
return (0);
#else
// Fallback for 32-bit build where 64-bit version not available
@@ -41,10 +40,11 @@ static __forceinline int ffsll(unsigned __int64 x)
s.ll = x;
- if (_BitScanForward(&i, s.l[0]))
+ if (_BitScanForward(&i, s.l[0])) {
return (i + 1);
- else if(_BitScanForward(&i, s.l[1]))
+ } else if(_BitScanForward(&i, s.l[1])) {
return (i + 33);
+ }
return (0);
#endif
}
diff --git a/msvc/projects/vc2015/test_threads/test_threads.cpp b/msvc/projects/vc2015/test_threads/test_threads.cpp
index a3d1a79..92e3162 100644
--- a/msvc/projects/vc2015/test_threads/test_threads.cpp
+++ b/msvc/projects/vc2015/test_threads/test_threads.cpp
@@ -16,8 +16,7 @@ using std::thread;
using std::uniform_int_distribution;
using std::minstd_rand;
-int test_threads()
-{
+int test_threads() {
je_malloc_conf = "narenas:3";
int narenas = 0;
size_t sz = sizeof(narenas);
diff --git a/msvc/projects/vc2015/test_threads/test_threads_main.cpp b/msvc/projects/vc2015/test_threads/test_threads_main.cpp
index ffd96e6..0a022fb 100644
--- a/msvc/projects/vc2015/test_threads/test_threads_main.cpp
+++ b/msvc/projects/vc2015/test_threads/test_threads_main.cpp
@@ -5,8 +5,7 @@
using namespace std::chrono_literals;
-int main(int argc, char** argv)
-{
+int main(int argc, char** argv) {
int rc = test_threads();
return rc;
}
diff --git a/src/arena.c b/src/arena.c
index 7362c4e..5cf9bd0 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -38,16 +38,14 @@ static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena,
/******************************************************************************/
static size_t
-arena_extent_dirty_npages(const extent_t *extent)
-{
+arena_extent_dirty_npages(const extent_t *extent) {
return (extent_size_get(extent) >> LG_PAGE);
}
static extent_t *
arena_extent_cache_alloc_locked(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
- size_t alignment, bool *zero, bool slab)
-{
+ size_t alignment, bool *zero, bool slab) {
bool commit = true;
malloc_mutex_assert_owner(tsdn, &arena->lock);
@@ -59,8 +57,7 @@ arena_extent_cache_alloc_locked(tsdn_t *tsdn, arena_t *arena,
extent_t *
arena_extent_cache_alloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size,
- size_t alignment, bool *zero)
-{
+ size_t alignment, bool *zero) {
extent_t *extent;
malloc_mutex_lock(tsdn, &arena->lock);
@@ -73,8 +70,7 @@ arena_extent_cache_alloc(tsdn_t *tsdn, arena_t *arena,
static void
arena_extent_cache_dalloc_locked(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent)
-{
+ extent_hooks_t **r_extent_hooks, extent_t *extent) {
malloc_mutex_assert_owner(tsdn, &arena->lock);
extent_dalloc_cache(tsdn, arena, r_extent_hooks, extent);
@@ -83,8 +79,7 @@ arena_extent_cache_dalloc_locked(tsdn_t *tsdn, arena_t *arena,
void
arena_extent_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent)
-{
+ extent_hooks_t **r_extent_hooks, extent_t *extent) {
malloc_mutex_lock(tsdn, &arena->lock);
arena_extent_cache_dalloc_locked(tsdn, arena, r_extent_hooks, extent);
malloc_mutex_unlock(tsdn, &arena->lock);
@@ -92,8 +87,7 @@ arena_extent_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
void
arena_extent_cache_maybe_insert(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
- bool cache)
-{
+ bool cache) {
malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
if (cache) {
@@ -104,8 +98,7 @@ arena_extent_cache_maybe_insert(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
void
arena_extent_cache_maybe_remove(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
- bool dirty)
-{
+ bool dirty) {
malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
if (dirty) {
@@ -117,8 +110,7 @@ arena_extent_cache_maybe_remove(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
JEMALLOC_INLINE_C void *
arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab,
- const arena_bin_info_t *bin_info)
-{
+ const arena_bin_info_t *bin_info) {
void *ret;
arena_slab_data_t *slab_data = extent_slab_data_get(slab);
size_t regind;
@@ -137,8 +129,7 @@ arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab,
JEMALLOC_INLINE_C
#endif
size_t
-arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr)
-{
+arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
size_t diff, regind;
/* Freeing a pointer outside the slab can cause assertion failure. */
@@ -174,8 +165,7 @@ arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr)
JEMALLOC_INLINE_C void
arena_slab_reg_dalloc(tsdn_t *tsdn, extent_t *slab,
- arena_slab_data_t *slab_data, void *ptr)
-{
+ arena_slab_data_t *slab_data, void *ptr) {
szind_t binind = slab_data->binind;
const arena_bin_info_t *bin_info = &arena_bin_info[binind];
size_t regind = arena_slab_regind(slab, binind, ptr);
@@ -189,27 +179,25 @@ arena_slab_reg_dalloc(tsdn_t *tsdn, extent_t *slab,
}
static void
-arena_nactive_add(arena_t *arena, size_t add_pages)
-{
+arena_nactive_add(arena_t *arena, size_t add_pages) {
arena->nactive += add_pages;
}
static void
-arena_nactive_sub(arena_t *arena, size_t sub_pages)
-{
+arena_nactive_sub(arena_t *arena, size_t sub_pages) {
assert(arena->nactive >= sub_pages);
arena->nactive -= sub_pages;
}
static void
-arena_large_malloc_stats_update(arena_t *arena, size_t usize)
-{
+arena_large_malloc_stats_update(arena_t *arena, size_t usize) {
szind_t index, hindex;
cassert(config_stats);
- if (usize < LARGE_MINCLASS)
+ if (usize < LARGE_MINCLASS) {
usize = LARGE_MINCLASS;
+ }
index = size2index(usize);
hindex = (index >= NBINS) ? index - NBINS : 0;
@@ -221,14 +209,14 @@ arena_large_malloc_stats_update(arena_t *arena, size_t usize)
}
static void
-arena_large_malloc_stats_update_undo(arena_t *arena, size_t usize)
-{
+arena_large_malloc_stats_update_undo(arena_t *arena, size_t usize) {
szind_t index, hindex;
cassert(config_stats);
- if (usize < LARGE_MINCLASS)
+ if (usize < LARGE_MINCLASS) {
usize = LARGE_MINCLASS;
+ }
index = size2index(usize);
hindex = (index >= NBINS) ? index - NBINS : 0;
@@ -240,14 +228,14 @@ arena_large_malloc_stats_update_undo(arena_t *arena, size_t usize)
}
static void
-arena_large_dalloc_stats_update(arena_t *arena, size_t usize)
-{
+arena_large_dalloc_stats_update(arena_t *arena, size_t usize) {
szind_t index, hindex;
cassert(config_stats);
- if (usize < LARGE_MINCLASS)
+ if (usize < LARGE_MINCLASS) {
usize = LARGE_MINCLASS;
+ }
index = size2index(usize);
hindex = (index >= NBINS) ? index - NBINS : 0;
@@ -258,8 +246,7 @@ arena_large_dalloc_stats_update(arena_t *arena, size_t usize)
}
static void
-arena_large_reset_stats_cancel(arena_t *arena, size_t usize)
-{
+arena_large_reset_stats_cancel(arena_t *arena, size_t usize) {
szind_t index = size2index(usize);
szind_t hindex = (index >= NBINS) ? index - NBINS : 0;
@@ -270,16 +257,15 @@ arena_large_reset_stats_cancel(arena_t *arena, size_t usize)
}
static void
-arena_large_ralloc_stats_update(arena_t *arena, size_t oldusize, size_t usize)
-{
+arena_large_ralloc_stats_update(arena_t *arena, size_t oldusize, size_t usize) {
arena_large_dalloc_stats_update(arena, oldusize);
arena_large_malloc_stats_update(arena, usize);
}
static extent_t *
arena_extent_alloc_large_hard(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, size_t usize, size_t alignment, bool *zero)
-{
+ extent_hooks_t **r_extent_hooks, size_t usize, size_t alignment,
+ bool *zero) {
extent_t *extent;
bool commit = true;
@@ -301,8 +287,7 @@ arena_extent_alloc_large_hard(tsdn_t *tsdn, arena_t *arena,
extent_t *
arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, bool *zero)
-{
+ size_t alignment, bool *zero) {
extent_t *extent;
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
@@ -328,14 +313,14 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
void
arena_extent_dalloc_large(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
- bool locked)
-{
+ bool locked) {
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
- if (!locked)
+ if (!locked) {
malloc_mutex_lock(tsdn, &arena->lock);
- else
+ } else {
malloc_mutex_assert_owner(tsdn, &arena->lock);
+ }
if (config_stats) {
arena_large_dalloc_stats_update(arena,
extent_usize_get(extent));
@@ -344,14 +329,14 @@ arena_extent_dalloc_large(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
arena_extent_cache_dalloc_locked(tsdn, arena, &extent_hooks, extent);
- if (!locked)
+ if (!locked) {
malloc_mutex_unlock(tsdn, &arena->lock);
+ }
}
void
arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
- size_t oldusize)
-{
+ size_t oldusize) {
size_t usize = extent_usize_get(extent);
size_t udiff = oldusize - usize;
@@ -366,8 +351,7 @@ arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
void
arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
- size_t oldusize)
-{
+ size_t oldusize) {
size_t usize = extent_usize_get(extent);
size_t udiff = usize - oldusize;
@@ -381,8 +365,7 @@ arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
}
static void
-arena_decay_deadline_init(arena_t *arena)
-{
+arena_decay_deadline_init(arena_t *arena) {
/*
* Generate a new deadline that is uniformly random within the next
* epoch after the current one.
@@ -399,14 +382,12 @@ arena_decay_deadline_init(arena_t *arena)
}
static bool
-arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
-{
+arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time) {
return (nstime_compare(&arena->decay.deadline, time) <= 0);
}
static size_t
-arena_decay_backlog_npages_limit(const arena_t *arena)
-{
+arena_decay_backlog_npages_limit(const arena_t *arena) {
static const uint64_t h_steps[] = {
#define STEP(step, h, x, y) \
h,
@@ -423,24 +404,23 @@ arena_decay_backlog_npages_limit(const arena_t *arena)
* to round down to the nearest whole number of pages.
*/
sum = 0;
- for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
+ for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
sum += arena->decay.backlog[i] * h_steps[i];
+ }
npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
return (npages_limit_backlog);
}
static void
-arena_decay_backlog_update_last(arena_t *arena)
-{
+arena_decay_backlog_update_last(arena_t *arena) {
size_t ndirty_delta = (arena->ndirty > arena->decay.nunpurged) ?
arena->ndirty - arena->decay.nunpurged : 0;
arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
}
static void
-arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64)
-{
+arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64) {
if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
sizeof(size_t));
@@ -461,8 +441,7 @@ arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64)
}
static void
-arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time)
-{
+arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time) {
uint64_t nadvance_u64;
nstime_t delta;
@@ -486,25 +465,23 @@ arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time)
}
static void
-arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena)
-{
+arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena) {
size_t ndirty_limit = arena_decay_backlog_npages_limit(arena);
- if (arena->ndirty > ndirty_limit)
+ if (arena->ndirty > ndirty_limit) {
arena_purge_to_limit(tsdn, arena, ndirty_limit);
+ }
arena->decay.nunpurged = arena->ndirty;
}
static void
-arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time)
-{
+arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time) {
arena_decay_epoch_advance_helper(arena, time);
arena_decay_epoch_advance_purge(tsdn, arena);
}
static void
-arena_decay_init(arena_t *arena, ssize_t decay_time)
-{
+arena_decay_init(arena_t *arena, ssize_t decay_time) {
arena->decay.time = decay_time;
if (decay_time > 0) {
nstime_init2(&arena->decay.interval, decay_time, 0);
@@ -520,18 +497,18 @@ arena_decay_init(arena_t *arena, ssize_t decay_time)
}
static bool
-arena_decay_time_valid(ssize_t decay_time)
-{
- if (decay_time < -1)
+arena_decay_time_valid(ssize_t decay_time) {
+ if (decay_time < -1) {
return (false);
- if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
+ }
+ if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX) {
return (true);
+ }
return (false);
}
ssize_t
-arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
-{
+arena_decay_time_get(tsdn_t *tsdn, arena_t *arena) {
ssize_t decay_time;
malloc_mutex_lock(tsdn, &arena->lock);
@@ -542,10 +519,10 @@ arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
}
bool
-arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
-{
- if (!arena_decay_time_valid(decay_time))
+arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) {
+ if (!arena_decay_time_valid(decay_time)) {
return (true);
+ }
malloc_mutex_lock(tsdn, &arena->lock);
/*
@@ -564,14 +541,14 @@ arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
}
static void
-arena_maybe_purge_helper(tsdn_t *tsdn, arena_t *arena)
-{
+arena_maybe_purge_helper(tsdn_t *tsdn, arena_t *arena) {
nstime_t time;
/* Purge all or nothing if the option is disabled. */
if (arena->decay.time <= 0) {
- if (arena->decay.time == 0)
+ if (arena->decay.time == 0) {
arena_purge_to_limit(tsdn, arena, 0);
+ }
return;
}
@@ -601,33 +578,34 @@ arena_maybe_purge_helper(tsdn_t *tsdn, arena_t *arena)
* during the current epoch are not subject to purge until a future
* epoch, so as a result purging only happens during epoch advances.
*/
- if (arena_decay_deadline_reached(arena, &time))
+ if (arena_decay_deadline_reached(arena, &time)) {
arena_decay_epoch_advance(tsdn, arena, &time);
+ }
}
void
-arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
-{
+arena_maybe_purge(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_assert_owner(tsdn, &arena->lock);
/* Don't recursively purge. */
- if (arena->purging)
+ if (arena->purging) {
return;
+ }
arena_maybe_purge_helper(tsdn, arena);
}
static size_t
-arena_dirty_count(tsdn_t *tsdn, arena_t *arena)
-{
+arena_dirty_count(tsdn_t *tsdn, arena_t *arena) {
extent_t *extent;
size_t ndirty = 0;
malloc_mutex_lock(tsdn, &arena->extents_mtx);
for (extent = qr_next(&arena->extents_dirty, qr_link); extent !=
- &arena->extents_dirty; extent = qr_next(extent, qr_link))
+ &arena->extents_dirty; extent = qr_next(extent, qr_link)) {
ndirty += extent_size_get(extent) >> LG_PAGE;
+ }
malloc_mutex_unlock(tsdn, &arena->extents_mtx);
@@ -636,8 +614,7 @@ arena_dirty_count(tsdn_t *tsdn, arena_t *arena)
static size_t
arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- size_t ndirty_limit, extent_t *purge_extents_sentinel)
-{
+ size_t ndirty_limit, extent_t *purge_extents_sentinel) {
extent_t *extent, *next;
size_t nstashed = 0;
@@ -651,8 +628,9 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
UNUSED extent_t *textent;
npages = extent_size_get(extent) >> LG_PAGE;
- if (arena->ndirty - (nstashed + npages) < ndirty_limit)
+ if (arena->ndirty - (nstashed + npages) < ndirty_limit) {
break;
+ }
next = qr_next(extent, qr_link);
/* Allocate. */
@@ -675,20 +653,21 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
static size_t
arena_purge_stashed(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *purge_extents_sentinel)
-{
+ extent_hooks_t **r_extent_hooks, extent_t *purge_extents_sentinel) {
UNUSED size_t nmadvise;
size_t npurged;
extent_t *extent, *next;
- if (config_stats)
+ if (config_stats) {
nmadvise = 0;
+ }
npurged = 0;
for (extent = qr_next(purge_extents_sentinel, qr_link); extent !=
purge_extents_sentinel; extent = next) {
- if (config_stats)
+ if (config_stats) {
nmadvise++;
+ }
npurged += extent_size_get(extent) >> LG_PAGE;
next = qr_next(extent, qr_link);
@@ -709,8 +688,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena,
* invariant: (arena->ndirty >= ndirty_limit)
*/
static void
-arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
-{
+arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit) {
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
size_t npurge, npurged;
extent_t purge_extents_sentinel;
@@ -730,33 +708,34 @@ arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
npurge = arena_stash_dirty(tsdn, arena, &extent_hooks, ndirty_limit,
&purge_extents_sentinel);
- if (npurge == 0)
+ if (npurge == 0) {
goto label_return;
+ }
npurged = arena_purge_stashed(tsdn, arena, &extent_hooks,
&purge_extents_sentinel);
assert(npurged == npurge);
- if (config_stats)
+ if (config_stats) {
arena->stats.npurge++;
+ }
label_return:
arena->purging = false;
}
void
-arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
-{
+arena_purge(tsdn_t *tsdn, arena_t *arena, bool all) {
malloc_mutex_lock(tsdn, &arena->lock);
- if (all)
+ if (all) {
arena_purge_to_limit(tsdn, arena, 0);
- else
+ } else {
arena_maybe_purge(tsdn, arena);
+ }
malloc_mutex_unlock(tsdn, &arena->lock);
}
static void
-arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab)
-{
+arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
@@ -764,45 +743,41 @@ arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab)
}
static void
-arena_bin_slabs_nonfull_insert(arena_bin_t *bin, extent_t *slab)
-{
+arena_bin_slabs_nonfull_insert(arena_bin_t *bin, extent_t *slab) {
assert(extent_slab_data_get(slab)->nfree > 0);
extent_heap_insert(&bin->slabs_nonfull, slab);
}
static void
-arena_bin_slabs_nonfull_remove(arena_bin_t *bin, extent_t *slab)
-{
+arena_bin_slabs_nonfull_remove(arena_bin_t *bin, extent_t *slab) {
extent_heap_remove(&bin->slabs_nonfull, slab);
}
static extent_t *
-arena_bin_slabs_nonfull_tryget(arena_bin_t *bin)
-{
+arena_bin_slabs_nonfull_tryget(arena_bin_t *bin) {
extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
- if (slab == NULL)
+ if (slab == NULL) {
return (NULL);
- if (config_stats)
+ }
+ if (config_stats) {
bin->stats.reslabs++;
+ }
return (slab);
}
static void
-arena_bin_slabs_full_insert(arena_bin_t *bin, extent_t *slab)
-{
+arena_bin_slabs_full_insert(arena_bin_t *bin, extent_t *slab) {
assert(extent_slab_data_get(slab)->nfree == 0);
extent_ring_insert(&bin->slabs_full, slab);
}
static void
-arena_bin_slabs_full_remove(extent_t *slab)
-{
+arena_bin_slabs_full_remove(extent_t *slab) {
extent_ring_remove(slab);
}
void
-arena_reset(tsd_t *tsd, arena_t *arena)
-{
+arena_reset(tsd_t *tsd, arena_t *arena) {
unsigned i;
extent_t *extent;
@@ -828,16 +803,19 @@ arena_reset(tsd_t *tsd, arena_t *arena)
size_t usize;
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
- if (config_stats || (config_prof && opt_prof))
+ if (config_stats || (config_prof && opt_prof)) {
usize = isalloc(tsd_tsdn(tsd), extent, ptr);
+ }
/* Remove large allocation from prof sample set. */
- if (config_prof && opt_prof)
+ if (config_prof && opt_prof) {
prof_free(tsd, extent, ptr, usize);
+ }
large_dalloc(tsd_tsdn(tsd), extent);
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
/* Cancel out unwanted effects on stats. */
- if (config_stats)
+ if (config_stats) {
arena_large_reset_stats_cancel(arena, usize);
+ }
}
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
@@ -883,8 +861,7 @@ arena_reset(tsd_t *tsd, arena_t *arena)
}
static void
-arena_destroy_retained(tsdn_t *tsdn, arena_t *arena)
-{
+arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
size_t i;
@@ -912,8 +889,7 @@ arena_destroy_retained(tsdn_t *tsdn, arena_t *arena)
}
void
-arena_destroy(tsd_t *tsd, arena_t *arena)
-{
+arena_destroy(tsd_t *tsd, arena_t *arena) {
assert(base_ind_get(arena->base) >= narenas_auto);
assert(arena_nthreads_get(arena, false) == 0);
assert(arena_nthreads_get(arena, true) == 0);
@@ -949,8 +925,7 @@ arena_destroy(tsd_t *tsd, arena_t *arena)
static extent_t *
arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, const arena_bin_info_t *bin_info)
-{
+ extent_hooks_t **r_extent_hooks, const arena_bin_info_t *bin_info) {
extent_t *slab;
bool zero, commit;
@@ -966,8 +941,7 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
static extent_t *
arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
- const arena_bin_info_t *bin_info)
-{
+ const arena_bin_info_t *bin_info) {
extent_t *slab;
arena_slab_data_t *slab_data;
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
@@ -978,8 +952,9 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
if (slab == NULL) {
slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
bin_info);
- if (slab == NULL)
+ if (slab == NULL) {
return (NULL);
+ }
}
assert(extent_slab_get(slab));
@@ -991,23 +966,24 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
slab_data->nfree = bin_info->nregs;
bitmap_init(slab_data->bitmap, &bin_info->bitmap_info);
- if (config_stats)
+ if (config_stats) {
arena->stats.mapped += extent_size_get(slab);
+ }
return (slab);
}
static extent_t *
arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
- szind_t binind)
-{
+ szind_t binind) {
extent_t *slab;
const arena_bin_info_t *bin_info;
/* Look for a usable slab. */
slab = arena_bin_slabs_nonfull_tryget(bin);
- if (slab != NULL)
+ if (slab != NULL) {
return (slab);
+ }
/* No existing slabs have any space available. */
bin_info = &arena_bin_info[binind];
@@ -1034,8 +1010,9 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
* so search one more time.
*/
slab = arena_bin_slabs_nonfull_tryget(bin);
- if (slab != NULL)
+ if (slab != NULL) {
return (slab);
+ }
return (NULL);
}
@@ -1043,8 +1020,7 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
static void *
arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
- szind_t binind)
-{
+ szind_t binind) {
const arena_bin_info_t *bin_info;
extent_t *slab;
@@ -1088,8 +1064,9 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
bin->slabcur = NULL;
}
- if (slab == NULL)
+ if (slab == NULL) {
return (NULL);
+ }
bin->slabcur = slab;
assert(extent_slab_data_get(bin->slabcur)->nfree > 0);
@@ -1099,15 +1076,15 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
void
arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
- szind_t binind, uint64_t prof_accumbytes)
-{
+ szind_t binind, uint64_t prof_accumbytes) {
unsigned i, nfill;
arena_bin_t *bin;
assert(tbin->ncached == 0);
- if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes))
+ if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) {
prof_idump(tsdn);
+ }
bin = &arena->bins[binind];
malloc_mutex_lock(tsdn, &bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
@@ -1118,8 +1095,9 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
extent_slab_data_get(slab)->nfree > 0) {
ptr = arena_slab_reg_alloc(tsdn, slab,
&arena_bin_info[binind]);
- } else
+ } else {
ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind);
+ }
if (ptr == NULL) {
/*
* OOM. tbin->avail isn't yet filled down to its first
@@ -1152,10 +1130,10 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
}
void
-arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, bool zero)
-{
- if (!zero)
+arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, bool zero) {
+ if (!zero) {
memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
+ }
}
#ifdef JEMALLOC_JET
@@ -1163,8 +1141,7 @@ arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, bool zero)
#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
#endif
void
-arena_dalloc_junk_small(void *ptr, const arena_bin_info_t *bin_info)
-{
+arena_dalloc_junk_small(void *ptr, const arena_bin_info_t *bin_info) {
memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
}
#ifdef JEMALLOC_JET
@@ -1175,8 +1152,7 @@ arena_dalloc_junk_small_t *arena_dalloc_junk_small =
#endif
static void *
-arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
-{
+arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
void *ret;
arena_bin_t *bin;
size_t usize;
@@ -1188,10 +1164,11 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
malloc_mutex_lock(tsdn, &bin->lock);
if ((slab = bin->slabcur) != NULL && extent_slab_data_get(slab)->nfree >
- 0)
+ 0) {
ret = arena_slab_reg_alloc(tsdn, slab, &arena_bin_info[binind]);
- else
+ } else {
ret = arena_bin_malloc_hard(tsdn, arena, bin, binind);
+ }
if (ret == NULL) {
malloc_mutex_unlock(tsdn, &bin->lock);
@@ -1204,16 +1181,18 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
bin->stats.curregs++;
}
malloc_mutex_unlock(tsdn, &bin->lock);
- if (config_prof && arena_prof_accum(tsdn, arena, usize))
+ if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
prof_idump(tsdn);
+ }
if (!zero) {
if (config_fill) {
if (unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret,
&arena_bin_info[binind], false);
- } else if (unlikely(opt_zero))
+ } else if (unlikely(opt_zero)) {
memset(ret, 0, usize);
+ }
}
} else {
if (config_fill && unlikely(opt_junk_alloc)) {
@@ -1229,24 +1208,25 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
void *
arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
- bool zero)
-{
+ bool zero) {
assert(!tsdn_null(tsdn) || arena != NULL);
- if (likely(!tsdn_null(tsdn)))
+ if (likely(!tsdn_null(tsdn))) {
arena = arena_choose(tsdn_tsd(tsdn), arena);
- if (unlikely(arena == NULL))
+ }
+ if (unlikely(arena == NULL)) {
return (NULL);
+ }
- if (likely(size <= SMALL_MAXCLASS))
+ if (likely(size <= SMALL_MAXCLASS)) {
return (arena_malloc_small(tsdn, arena, ind, zero));
+ }
return (large_malloc(tsdn, arena, index2size(ind), zero));
}
void *
arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
- bool zero, tcache_t *tcache)
-{
+ bool zero, tcache_t *tcache) {
void *ret;
if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
@@ -1255,18 +1235,18 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
tcache, true);
} else {
- if (likely(alignment <= CACHELINE))
+ if (likely(alignment <= CACHELINE)) {
ret = large_malloc(tsdn, arena, usize, zero);
- else
+ } else {
ret = large_palloc(tsdn, arena, usize, alignment, zero);
+ }
}
return (ret);
}
void
arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr,
- size_t usize)
-{
+ size_t usize) {
arena_t *arena = extent_arena_get(extent);
cassert(config_prof);
@@ -1283,18 +1263,18 @@ arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr,
* canceling.
*/
malloc_mutex_lock(tsdn, &arena->lock);
- if (arena->prof_accumbytes >= LARGE_MINCLASS - usize)
+ if (arena->prof_accumbytes >= LARGE_MINCLASS - usize) {
arena->prof_accumbytes -= LARGE_MINCLASS - usize;
- else
+ } else {
arena->prof_accumbytes = 0;
+ }
malloc_mutex_unlock(tsdn, &arena->lock);
assert(isalloc(tsdn, extent, ptr) == usize);
}
static size_t
-arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr)
-{
+arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
cassert(config_prof);
assert(ptr != NULL);
@@ -1307,8 +1287,7 @@ arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr)
void
arena_dalloc_promoted(tsdn_t *tsdn, extent_t *extent, void *ptr,
- tcache_t *tcache, bool slow_path)
-{
+ tcache_t *tcache, bool slow_path) {
size_t usize;
cassert(config_prof);
@@ -1318,17 +1297,17 @@ arena_dalloc_promoted(tsdn_t *tsdn, extent_t *extent, void *ptr,
if (usize <= tcache_maxclass) {
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, usize,
slow_path);
- } else
+ } else {
large_dalloc(tsdn, extent);
+ }
}
static void
-arena_dissociate_bin_slab(extent_t *slab, arena_bin_t *bin)
-{
+arena_dissociate_bin_slab(extent_t *slab, arena_bin_t *bin) {
/* Dissociate slab from bin. */
- if (slab == bin->slabcur)
+ if (slab == bin->slabcur) {
bin->slabcur = NULL;
- else {
+ } else {
szind_t binind = extent_slab_data_get(slab)->binind;
const arena_bin_info_t *bin_info = &arena_bin_info[binind];
@@ -1337,17 +1316,17 @@ arena_dissociate_bin_slab(extent_t *slab, arena_bin_t *bin)
* slab only contains one region, then it never gets inserted
* into the non-full slabs heap.
*/
- if (bin_info->nregs == 1)
+ if (bin_info->nregs == 1) {
arena_bin_slabs_full_remove(slab);
- else
+ } else {
arena_bin_slabs_nonfull_remove(bin, slab);
+ }
}
}
static void
arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
- arena_bin_t *bin)
-{
+ arena_bin_t *bin) {
assert(slab != bin->slabcur);
malloc_mutex_unlock(tsdn, &bin->lock);
@@ -1357,14 +1336,14 @@ arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
malloc_mutex_unlock(tsdn, &arena->lock);
/****************************/
malloc_mutex_lock(tsdn, &bin->lock);
- if (config_stats)
+ if (config_stats) {
bin->stats.curslabs--;
+ }
}
static void
arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
- arena_bin_t *bin)
-{
+ arena_bin_t *bin) {
assert(extent_slab_data_get(slab)->nfree > 0);
/*
@@ -1375,28 +1354,31 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
*/
if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) {
/* Switch slabcur. */
- if (extent_slab_data_get(bin->slabcur)->nfree > 0)
+ if (extent_slab_data_get(bin->slabcur)->nfree > 0) {
arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
- else
+ } else {
arena_bin_slabs_full_insert(bin, bin->slabcur);
+ }
bin->slabcur = slab;
- if (config_stats)
+ if (config_stats) {
bin->stats.reslabs++;
- } else
+ }
+ } else {
arena_bin_slabs_nonfull_insert(bin, slab);
+ }
}
static void
arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
- void *ptr, bool junked)
-{
+ void *ptr, bool junked) {
arena_slab_data_t *slab_data = extent_slab_data_get(slab);
szind_t binind = slab_data->binind;
arena_bin_t *bin = &arena->bins[binind];
const arena_bin_info_t *bin_info = &arena_bin_info[binind];
- if (!junked && config_fill && unlikely(opt_junk_free))
+ if (!junked && config_fill && unlikely(opt_junk_free)) {
arena_dalloc_junk_small(ptr, bin_info);
+ }
arena_slab_reg_dalloc(tsdn, slab, slab_data, ptr);
if (slab_data->nfree == bin_info->nregs) {
@@ -1415,14 +1397,12 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
void
arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
- void *ptr)
-{
+ void *ptr) {
arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true);
}
static void
-arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr)
-{
+arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
arena_bin_t *bin = &arena->bins[extent_slab_data_get(extent)->binind];
malloc_mutex_lock(tsdn, &bin->lock);
@@ -1431,23 +1411,22 @@ arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr)
}
void
-arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr)
-{
+arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
arena_dalloc_bin(tsdn, arena, extent, ptr);
arena_decay_tick(tsdn, arena);
}
bool
arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
- size_t size, size_t extra, bool zero)
-{
+ size_t size, size_t extra, bool zero) {
size_t usize_min, usize_max;
/* Calls with non-zero extra had to clamp extra. */
assert(extra == 0 || size + extra <= LARGE_MAXCLASS);
- if (unlikely(size > LARGE_MAXCLASS))
+ if (unlikely(size > LARGE_MAXCLASS)) {
return (true);
+ }
usize_min = s2u(size);
usize_max = s2u(size + extra);
@@ -1460,8 +1439,9 @@ arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
oldsize);
if ((usize_max > SMALL_MAXCLASS || size2index(usize_max) !=
size2index(oldsize)) && (size > oldsize || usize_max <
- oldsize))
+ oldsize)) {
return (true);
+ }
arena_decay_tick(tsdn, extent_arena_get(extent));
return (false);
@@ -1475,33 +1455,36 @@ arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
static void *
arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, bool zero, tcache_t *tcache)
-{
- if (alignment == 0)
+ size_t alignment, bool zero, tcache_t *tcache) {
+ if (alignment == 0) {
return (arena_malloc(tsdn, arena, usize, size2index(usize),
zero, tcache, true));
+ }
usize = sa2u(usize, alignment);
- if (unlikely(usize == 0 || usize > LARGE_MAXCLASS))
+ if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
return (NULL);
+ }
return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
}
void *
arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
- size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache)
-{
+ size_t oldsize, size_t size, size_t alignment, bool zero,
+ tcache_t *tcache) {
void *ret;
size_t usize, copysize;
usize = s2u(size);
- if (unlikely(usize == 0 || size > LARGE_MAXCLASS))
+ if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) {
return (NULL);
+ }
if (likely(usize <= SMALL_MAXCLASS)) {
/* Try to avoid moving the allocation. */
if (!arena_ralloc_no_move(tsdn, extent, ptr, oldsize, usize, 0,
- zero))
+ zero)) {
return (ptr);
+ }
}
if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) {
@@ -1515,8 +1498,9 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
*/
ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, zero,
tcache);
- if (ret == NULL)
+ if (ret == NULL) {
return (NULL);
+ }
/*
* Junk/zero-filling were already done by
@@ -1530,8 +1514,7 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
}
dss_prec_t
-arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
-{
+arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena) {
dss_prec_t ret;
malloc_mutex_lock(tsdn, &arena->lock);
@@ -1541,10 +1524,10 @@ arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
}
bool
-arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
-{
- if (!have_dss)
+arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec) {
+ if (!have_dss) {
return (dss_prec != dss_prec_disabled);
+ }
malloc_mutex_lock(tsdn, &arena->lock);
arena->dss_prec = dss_prec;
malloc_mutex_unlock(tsdn, &arena->lock);
@@ -1552,24 +1535,22 @@ arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
}
ssize_t
-arena_decay_time_default_get(void)
-{
+arena_decay_time_default_get(void) {
return ((ssize_t)atomic_read_zu((size_t *)&decay_time_default));
}
bool
-arena_decay_time_default_set(ssize_t decay_time)
-{
- if (!arena_decay_time_valid(decay_time))
+arena_decay_time_default_set(ssize_t decay_time) {
+ if (!arena_decay_time_valid(decay_time)) {
return (true);
+ }
atomic_write_zu((size_t *)&decay_time_default, (size_t)decay_time);
return (false);
}
static void
arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
- const char **dss, ssize_t *decay_time, size_t *nactive, size_t *ndirty)
-{
+ const char **dss, ssize_t *decay_time, size_t *nactive, size_t *ndirty) {
*nthreads += arena_nthreads_get(arena, false);
*dss = dss_prec_names[arena->dss_prec];
*decay_time = arena->decay.time;
@@ -1579,8 +1560,7 @@ arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
void
arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
- const char **dss, ssize_t *decay_time, size_t *nactive, size_t *ndirty)
-{
+ const char **dss, ssize_t *decay_time, size_t *nactive, size_t *ndirty) {
malloc_mutex_lock(tsdn, &arena->lock);
arena_basic_stats_merge_locked(arena, nthreads, dss, decay_time,
nactive, ndirty);
@@ -1591,8 +1571,7 @@ void
arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *decay_time, size_t *nactive, size_t *ndirty,
arena_stats_t *astats, malloc_bin_stats_t *bstats,
- malloc_large_stats_t *lstats)
-{
+ malloc_large_stats_t *lstats) {
size_t base_allocated, base_resident, base_mapped;
unsigned i;
@@ -1662,57 +1641,57 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
}
unsigned
-arena_nthreads_get(arena_t *arena, bool internal)
-{
+arena_nthreads_get(arena_t *arena, bool internal) {
return (atomic_read_u(&arena->nthreads[internal]));
}
void
-arena_nthreads_inc(arena_t *arena, bool internal)
-{
+arena_nthreads_inc(arena_t *arena, bool internal) {
atomic_add_u(&arena->nthreads[internal], 1);
}
void
-arena_nthreads_dec(arena_t *arena, bool internal)
-{
+arena_nthreads_dec(arena_t *arena, bool internal) {
atomic_sub_u(&arena->nthreads[internal], 1);
}
size_t
-arena_extent_sn_next(arena_t *arena)
-{
+arena_extent_sn_next(arena_t *arena) {
return (atomic_add_zu(&arena->extent_sn_next, 1) - 1);
}
arena_t *
-arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks)
-{
+arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
arena_t *arena;
base_t *base;
unsigned i;
- if (ind == 0)
+ if (ind == 0) {
base = b0get();
- else {
+ } else {
base = base_new(tsdn, ind, extent_hooks);
- if (base == NULL)
+ if (base == NULL) {
return (NULL);
+ }
}
arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE);
- if (arena == NULL)
+ if (arena == NULL) {
goto label_error;
+ }
arena->nthreads[0] = arena->nthreads[1] = 0;
- if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
+ if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA)) {
goto label_error;
+ }
- if (config_stats && config_tcache)
+ if (config_stats && config_tcache) {
ql_new(&arena->tcache_ql);
+ }
- if (config_prof)
+ if (config_prof) {
arena->prof_accumbytes = 0;
+ }
if (config_cache_oblivious) {
/*
@@ -1738,8 +1717,9 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks)
ql_new(&arena->large);
if (malloc_mutex_init(&arena->large_mtx, "arena_large",
- WITNESS_RANK_ARENA_LARGE))
+ WITNESS_RANK_ARENA_LARGE)) {
goto label_error;
+ }
for (i = 0; i < NPSIZES+1; i++) {
extent_heap_new(&arena->extents_cached[i]);
@@ -1750,83 +1730,85 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks)
false, false);
if (malloc_mutex_init(&arena->extents_mtx, "arena_extents",
- WITNESS_RANK_ARENA_EXTENTS))
+ WITNESS_RANK_ARENA_EXTENTS)) {
goto label_error;
+ }
- if (!config_munmap)
+ if (!config_munmap) {
arena->extent_grow_next = psz2ind(HUGEPAGE);
+ }
ql_new(&arena->extent_cache);
if (malloc_mutex_init(&arena->extent_cache_mtx, "arena_extent_cache",
- WITNESS_RANK_ARENA_EXTENT_CACHE))
+ WITNESS_RANK_ARENA_EXTENT_CACHE)) {
goto label_error;
+ }
/* Initialize bins. */
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
if (malloc_mutex_init(&bin->lock, "arena_bin",
- WITNESS_RANK_ARENA_BIN))
+ WITNESS_RANK_ARENA_BIN)) {
goto label_error;
+ }
bin->slabcur = NULL;
extent_heap_new(&bin->slabs_nonfull);
extent_init(&bin->slabs_full, arena, NULL, 0, 0, 0, false,
false, false, false);
- if (config_stats)
+ if (config_stats) {
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+ }
}
arena->base = base;
return (arena);
label_error:
- if (ind != 0)
+ if (ind != 0) {
base_delete(base);
+ }
return (NULL);
}
void
-arena_boot(void)
-{
+arena_boot(void) {
arena_decay_time_default_set(opt_decay_time);
}
void
-arena_prefork0(tsdn_t *tsdn, arena_t *arena)
-{
+arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_prefork(tsdn, &arena->lock);
}
void
-arena_prefork1(tsdn_t *tsdn, arena_t *arena)
-{
+arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_prefork(tsdn, &arena->extents_mtx);
}
void
-arena_prefork2(tsdn_t *tsdn, arena_t *arena)
-{
+arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_prefork(tsdn, &arena->extent_cache_mtx);
}
void
-arena_prefork3(tsdn_t *tsdn, arena_t *arena)
-{
+arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
unsigned i;
base_prefork(tsdn, arena->base);
- for (i = 0; i < NBINS; i++)
+ for (i = 0; i < NBINS; i++) {
malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
+ }
malloc_mutex_prefork(tsdn, &arena->large_mtx);
}
void
-arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
-{
+arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
unsigned i;
malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
- for (i = 0; i < NBINS; i++)
+ for (i = 0; i < NBINS; i++) {
malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
+ }
base_postfork_parent(tsdn, arena->base);
malloc_mutex_postfork_parent(tsdn, &arena->extent_cache_mtx);
malloc_mutex_postfork_parent(tsdn, &arena->extents_mtx);
@@ -1834,13 +1816,13 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
}
void
-arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
-{
+arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
unsigned i;
malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
- for (i = 0; i < NBINS; i++)
+ for (i = 0; i < NBINS; i++) {
malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
+ }
base_postfork_child(tsdn, arena->base);
malloc_mutex_postfork_child(tsdn, &arena->extent_cache_mtx);
malloc_mutex_postfork_child(tsdn, &arena->extents_mtx);
diff --git a/src/base.c b/src/base.c
index 7c0ef2c..ee964fa 100644
--- a/src/base.c
+++ b/src/base.c
@@ -9,17 +9,16 @@ static base_t *b0;
/******************************************************************************/
static void *
-base_map(extent_hooks_t *extent_hooks, unsigned ind, size_t size)
-{
+base_map(extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
void *addr;
bool zero = true;
bool commit = true;
assert(size == HUGEPAGE_CEILING(size));
- if (extent_hooks == &extent_hooks_default)
+ if (extent_hooks == &extent_hooks_default) {
addr = extent_alloc_mmap(NULL, size, PAGE, &zero, &commit);
- else {
+ } else {
addr = extent_hooks->alloc(extent_hooks, NULL, size, PAGE,
&zero, &commit, ind);
}
@@ -28,8 +27,8 @@ base_map(extent_hooks_t *extent_hooks, unsigned ind, size_t size)
}
static void
-base_unmap(extent_hooks_t *extent_hooks, unsigned ind, void *addr, size_t size)
-{
+base_unmap(extent_hooks_t *extent_hooks, unsigned ind, void *addr,
+ size_t size) {
/*
* Cascade through dalloc, decommit, purge_lazy, and purge_forced,
* stopping at first success. This cascade is performed for consistency
@@ -41,40 +40,48 @@ base_unmap(extent_hooks_t *extent_hooks, unsigned ind, void *addr, size_t size)
* some consistent-but-allocated state.
*/
if (extent_hooks == &extent_hooks_default) {
- if (!extent_dalloc_mmap(addr, size))
+ if (!extent_dalloc_mmap(addr, size)) {
return;
- if (!pages_decommit(addr, size))
+ }
+ if (!pages_decommit(addr, size)) {
return;
- if (!pages_purge_lazy(addr, size))
+ }
+ if (!pages_purge_lazy(addr, size)) {
return;
- if (!pages_purge_forced(addr, size))
+ }
+ if (!pages_purge_forced(addr, size)) {
return;
+ }
/* Nothing worked. This should never happen. */
not_reached();
} else {
if (extent_hooks->dalloc != NULL &&
- !extent_hooks->dalloc(extent_hooks, addr, size, true, ind))
+ !extent_hooks->dalloc(extent_hooks, addr, size, true,
+ ind)) {
return;
+ }
if (extent_hooks->decommit != NULL &&
!extent_hooks->decommit(extent_hooks, addr, size, 0, size,
- ind))
+ ind)) {
return;
+ }
if (extent_hooks->purge_lazy != NULL &&
!extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
- ind))
+ ind)) {
return;
+ }
if (extent_hooks->purge_forced != NULL &&
!extent_hooks->purge_forced(extent_hooks, addr, size, 0,
- size, ind))
+ size, ind)) {
return;
+ }
/* Nothing worked. That's the application's problem. */
}
}
static void
base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
- size_t size)
-{
+ size_t size) {
size_t sn;
sn = *extent_sn_next;
@@ -85,8 +92,7 @@ base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
static void *
base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
- size_t alignment)
-{
+ size_t alignment) {
void *ret;
assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
@@ -104,8 +110,7 @@ base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
static void
base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent,
- size_t gap_size, void *addr, size_t size)
-{
+ size_t gap_size, void *addr, size_t size) {
if (extent_size_get(extent) > 0) {
/*
* Compute the index for the largest size class that does not
@@ -131,8 +136,7 @@ base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent,
static void *
base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, extent_t *extent,
- size_t size, size_t alignment)
-{
+ size_t size, size_t alignment) {
void *ret;
size_t gap_size;
@@ -148,8 +152,7 @@ base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, extent_t *extent,
*/
static base_block_t *
base_block_alloc(extent_hooks_t *extent_hooks, unsigned ind,
- size_t *extent_sn_next, size_t size, size_t alignment)
-{
+ size_t *extent_sn_next, size_t size, size_t alignment) {
base_block_t *block;
size_t usize, header_size, gap_size, block_size;
@@ -159,8 +162,9 @@ base_block_alloc(extent_hooks_t *extent_hooks, unsigned ind,
gap_size = ALIGNMENT_CEILING(header_size, alignment) - header_size;
block_size = HUGEPAGE_CEILING(header_size + gap_size + usize);
block = (base_block_t *)base_map(extent_hooks, ind, block_size);
- if (block == NULL)
+ if (block == NULL) {
return (NULL);
+ }
block->size = block_size;
block->next = NULL;
assert(block_size >= header_size);
@@ -174,8 +178,7 @@ base_block_alloc(extent_hooks_t *extent_hooks, unsigned ind,
* specified alignment.
*/
static extent_t *
-base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment)
-{
+base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
base_block_t *block;
@@ -183,8 +186,9 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment)
block = base_block_alloc(extent_hooks, base_ind_get(base),
&base->extent_sn_next, size, alignment);
- if (block == NULL)
+ if (block == NULL) {
return (NULL);
+ }
block->next = base->blocks;
base->blocks = block;
if (config_stats) {
@@ -198,14 +202,12 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment)
}
base_t *
-b0get(void)
-{
+b0get(void) {
return (b0);
}
base_t *
-base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks)
-{
+base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
base_t *base;
size_t extent_sn_next, base_alignment, base_size, gap_size;
base_block_t *block;
@@ -214,8 +216,9 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks)
extent_sn_next = 0;
block = base_block_alloc(extent_hooks, ind, &extent_sn_next,
sizeof(base_t), QUANTUM);
- if (block == NULL)
+ if (block == NULL) {
return (NULL);
+ }
base_alignment = CACHELINE;
base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
@@ -229,8 +232,9 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks)
}
base->extent_sn_next = extent_sn_next;
base->blocks = block;
- for (i = 0; i < NSIZES; i++)
+ for (i = 0; i < NSIZES; i++) {
extent_heap_new(&base->avail[i]);
+ }
if (config_stats) {
base->allocated = sizeof(base_block_t);
base->resident = PAGE_CEILING(sizeof(base_block_t));
@@ -245,8 +249,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks)
}
void
-base_delete(base_t *base)
-{
+base_delete(base_t *base) {
extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
base_block_t *next = base->blocks;
do {
@@ -258,14 +261,12 @@ base_delete(base_t *base)
}
extent_hooks_t *
-base_extent_hooks_get(base_t *base)
-{
+base_extent_hooks_get(base_t *base) {
return ((extent_hooks_t *)atomic_read_p(&base->extent_hooks_pun));
}
extent_hooks_t *
-base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks)
-{
+base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base);
union {
extent_hooks_t **h;
@@ -287,8 +288,7 @@ base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks)
* sharing.
*/
void *
-base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment)
-{
+base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
void *ret;
size_t usize, asize;
szind_t i;
@@ -324,8 +324,7 @@ label_return:
void
base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
- size_t *mapped)
-{
+ size_t *mapped) {
cassert(config_stats);
malloc_mutex_lock(tsdn, &base->mtx);
@@ -338,26 +337,22 @@ base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
}
void
-base_prefork(tsdn_t *tsdn, base_t *base)
-{
+base_prefork(tsdn_t *tsdn, base_t *base) {
malloc_mutex_prefork(tsdn, &base->mtx);
}
void
-base_postfork_parent(tsdn_t *tsdn, base_t *base)
-{
+base_postfork_parent(tsdn_t *tsdn, base_t *base) {
malloc_mutex_postfork_parent(tsdn, &base->mtx);
}
void
-base_postfork_child(tsdn_t *tsdn, base_t *base)
-{
+base_postfork_child(tsdn_t *tsdn, base_t *base) {
malloc_mutex_postfork_child(tsdn, &base->mtx);
}
bool
-base_boot(tsdn_t *tsdn)
-{
+base_boot(tsdn_t *tsdn) {
b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
return (b0 == NULL);
}
diff --git a/src/bitmap.c b/src/bitmap.c
index 3d27f05..7cbc7d4 100644
--- a/src/bitmap.c
+++ b/src/bitmap.c
@@ -6,8 +6,7 @@
#ifdef BITMAP_USE_TREE
void
-bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
-{
+bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
unsigned i;
size_t group_count;
@@ -35,14 +34,12 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
}
static size_t
-bitmap_info_ngroups(const bitmap_info_t *binfo)
-{
+bitmap_info_ngroups(const bitmap_info_t *binfo) {
return (binfo->levels[binfo->nlevels].group_offset);
}
void
-bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
-{
+bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) {
size_t extra;
unsigned i;
@@ -56,23 +53,24 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
memset(bitmap, 0xffU, bitmap_size(binfo));
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
- if (extra != 0)
+ if (extra != 0) {
bitmap[binfo->levels[1].group_offset - 1] >>= extra;
+ }
for (i = 1; i < binfo->nlevels; i++) {
size_t group_count = binfo->levels[i].group_offset -
binfo->levels[i-1].group_offset;
extra = (BITMAP_GROUP_NBITS - (group_count &
BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK;
- if (extra != 0)
+ if (extra != 0) {
bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
+ }
}
}
#else /* BITMAP_USE_TREE */
void
-bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
-{
+bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
assert(nbits > 0);
assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
@@ -81,27 +79,25 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
}
static size_t
-bitmap_info_ngroups(const bitmap_info_t *binfo)
-{
+bitmap_info_ngroups(const bitmap_info_t *binfo) {
return (binfo->ngroups);
}
void
-bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
-{
+bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) {
size_t extra;
memset(bitmap, 0xffU, bitmap_size(binfo));
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
- if (extra != 0)
+ if (extra != 0) {
bitmap[binfo->ngroups - 1] >>= extra;
+ }
}
#endif /* BITMAP_USE_TREE */
size_t
-bitmap_size(const bitmap_info_t *binfo)
-{
+bitmap_size(const bitmap_info_t *binfo) {
return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP);
}
diff --git a/src/ckh.c b/src/ckh.c
index fe79862..0deaf80 100644
--- a/src/ckh.c
+++ b/src/ckh.c
@@ -50,15 +50,15 @@ static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
* otherwise.
*/
JEMALLOC_INLINE_C size_t
-ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
-{
+ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) {
ckhc_t *cell;
unsigned i;
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
- if (cell->key != NULL && ckh->keycomp(key, cell->key))
+ if (cell->key != NULL && ckh->keycomp(key, cell->key)) {
return ((bucket << LG_CKH_BUCKET_CELLS) + i);
+ }
}
return (SIZE_T_MAX);
@@ -68,8 +68,7 @@ ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
*/
JEMALLOC_INLINE_C size_t
-ckh_isearch(ckh_t *ckh, const void *key)
-{
+ckh_isearch(ckh_t *ckh, const void *key) {
size_t hashes[2], bucket, cell;
assert(ckh != NULL);
@@ -79,8 +78,9 @@ ckh_isearch(ckh_t *ckh, const void *key)
/* Search primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
- if (cell != SIZE_T_MAX)
+ if (cell != SIZE_T_MAX) {
return (cell);
+ }
/* Search secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
@@ -90,8 +90,7 @@ ckh_isearch(ckh_t *ckh, const void *key)
JEMALLOC_INLINE_C bool
ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
- const void *data)
-{
+ const void *data) {
ckhc_t *cell;
unsigned offset, i;
@@ -123,8 +122,7 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
*/
JEMALLOC_INLINE_C bool
ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
- void const **argdata)
-{
+ void const **argdata) {
const void *key, *data, *tkey, *tdata;
ckhc_t *cell;
size_t hashes[2], bucket, tbucket;
@@ -187,14 +185,14 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
}
bucket = tbucket;
- if (!ckh_try_bucket_insert(ckh, bucket, key, data))
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
return (false);
+ }
}
}
JEMALLOC_INLINE_C bool
-ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
-{
+ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) {
size_t hashes[2], bucket;
const void *key = *argkey;
const void *data = *argdata;
@@ -203,13 +201,15 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
/* Try to insert in primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
- if (!ckh_try_bucket_insert(ckh, bucket, key, data))
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
return (false);
+ }
/* Try to insert in secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
- if (!ckh_try_bucket_insert(ckh, bucket, key, data))
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
return (false);
+ }
/*
* Try to find a place for this item via iterative eviction/relocation.
@@ -222,8 +222,7 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
* old table into the new.
*/
JEMALLOC_INLINE_C bool
-ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
-{
+ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) {
size_t count, i, nins;
const void *key, *data;
@@ -245,8 +244,7 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
}
static bool
-ckh_grow(tsd_t *tsd, ckh_t *ckh)
-{
+ckh_grow(tsd_t *tsd, ckh_t *ckh) {
bool ret;
ckhc_t *tab, *ttab;
unsigned lg_prevbuckets, lg_curcells;
@@ -302,8 +300,7 @@ label_return:
}
static void
-ckh_shrink(tsd_t *tsd, ckh_t *ckh)
-{
+ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
ckhc_t *tab, *ttab;
size_t usize;
unsigned lg_prevbuckets, lg_curcells;
@@ -315,8 +312,9 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
- if (unlikely(usize == 0 || usize > LARGE_MAXCLASS))
+ if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
return;
+ }
tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL,
true, arena_ichoose(tsd, NULL));
if (tab == NULL) {
@@ -353,8 +351,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
bool
ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
- ckh_keycomp_t *keycomp)
-{
+ ckh_keycomp_t *keycomp) {
bool ret;
size_t mincells, usize;
unsigned lg_mincells;
@@ -384,8 +381,9 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2;
for (lg_mincells = LG_CKH_BUCKET_CELLS;
(ZU(1) << lg_mincells) < mincells;
- lg_mincells++)
- ; /* Do nothing. */
+ lg_mincells++) {
+ /* Do nothing. */
+ }
ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->hash = hash;
@@ -409,8 +407,7 @@ label_return:
}
void
-ckh_delete(tsd_t *tsd, ckh_t *ckh)
-{
+ckh_delete(tsd_t *tsd, ckh_t *ckh) {
assert(ckh != NULL);
#ifdef CKH_VERBOSE
@@ -427,30 +424,31 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh)
idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), ckh->tab), ckh->tab,
NULL, true, true);
- if (config_debug)
+ if (config_debug) {
memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
+ }
}
size_t
-ckh_count(ckh_t *ckh)
-{
+ckh_count(ckh_t *ckh) {
assert(ckh != NULL);
return (ckh->count);
}
bool
-ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
-{
+ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) {
size_t i, ncells;
for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets +
LG_CKH_BUCKET_CELLS)); i < ncells; i++) {
if (ckh->tab[i].key != NULL) {
- if (key != NULL)
+ if (key != NULL) {
*key = (void *)ckh->tab[i].key;
- if (data != NULL)
+ }
+ if (data != NULL) {
*data = (void *)ckh->tab[i].data;
+ }
*tabind = i + 1;
return (false);
}
@@ -460,8 +458,7 @@ ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
}
bool
-ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data)
-{
+ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) {
bool ret;
assert(ckh != NULL);
@@ -485,18 +482,19 @@ label_return:
bool
ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
- void **data)
-{
+ void **data) {
size_t cell;
assert(ckh != NULL);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
- if (key != NULL)
+ if (key != NULL) {
*key = (void *)ckh->tab[cell].key;
- if (data != NULL)
+ }
+ if (data != NULL) {
*data = (void *)ckh->tab[cell].data;
+ }
ckh->tab[cell].key = NULL;
ckh->tab[cell].data = NULL; /* Not necessary. */
@@ -516,18 +514,19 @@ ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
}
bool
-ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
-{
+ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) {
size_t cell;
assert(ckh != NULL);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
- if (key != NULL)
+ if (key != NULL) {
*key = (void *)ckh->tab[cell].key;
- if (data != NULL)
+ }
+ if (data != NULL) {
*data = (void *)ckh->tab[cell].data;
+ }
return (false);
}
@@ -535,14 +534,12 @@ ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
}
void
-ckh_string_hash(const void *key, size_t r_hash[2])
-{
+ckh_string_hash(const void *key, size_t r_hash[2]) {
hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
}
bool
-ckh_string_keycomp(const void *k1, const void *k2)
-{
+ckh_string_keycomp(const void *k1, const void *k2) {
assert(k1 != NULL);
assert(k2 != NULL);
@@ -550,8 +547,7 @@ ckh_string_keycomp(const void *k1, const void *k2)
}
void
-ckh_pointer_hash(const void *key, size_t r_hash[2])
-{
+ckh_pointer_hash(const void *key, size_t r_hash[2]) {
union {
const void *v;
size_t i;
@@ -563,7 +559,6 @@ ckh_pointer_hash(const void *key, size_t r_hash[2])
}
bool
-ckh_pointer_keycomp(const void *k1, const void *k2)
-{
+ckh_pointer_keycomp(const void *k1, const void *k2) {
return ((k1 == k2) ? true : false);
}
diff --git a/src/ctl.c b/src/ctl.c
index b19c9d3..929176f 100644
--- a/src/ctl.c
+++ b/src/ctl.c
@@ -17,22 +17,19 @@ static ctl_arenas_t *ctl_arenas;
/* Helpers for named and indexed nodes. */
JEMALLOC_INLINE_C const ctl_named_node_t *
-ctl_named_node(const ctl_node_t *node)
-{
+ctl_named_node(const ctl_node_t *node) {
return ((node->named) ? (const ctl_named_node_t *)node : NULL);
}
JEMALLOC_INLINE_C const ctl_named_node_t *
-ctl_named_children(const ctl_named_node_t *node, size_t index)
-{
+ctl_named_children(const ctl_named_node_t *node, size_t index) {
const ctl_named_node_t *children = ctl_named_node(node->children);
return (children ? &children[index] : NULL);
}
JEMALLOC_INLINE_C const ctl_indexed_node_t *
-ctl_indexed_node(const ctl_node_t *node)
-{
+ctl_indexed_node(const ctl_node_t *node) {
return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
}
@@ -433,8 +430,7 @@ static const ctl_named_node_t super_root_node[] = {
/******************************************************************************/
static unsigned
-arenas_i2a_impl(size_t i, bool compat, bool validate)
-{
+arenas_i2a_impl(size_t i, bool compat, bool validate) {
unsigned a;
switch (i) {
@@ -453,9 +449,9 @@ arenas_i2a_impl(size_t i, bool compat, bool validate)
* removal in 6.0.0.
*/
a = 0;
- } else if (validate && i >= ctl_arenas->narenas)
+ } else if (validate && i >= ctl_arenas->narenas) {
a = UINT_MAX;
- else {
+ } else {
/*
* This function should never be called for an index
* more than one past the range of indices that have
@@ -472,14 +468,12 @@ arenas_i2a_impl(size_t i, bool compat, bool validate)
}
static unsigned
-arenas_i2a(size_t i)
-{
+arenas_i2a(size_t i) {
return (arenas_i2a_impl(i, true, false));
}
static ctl_arena_t *
-arenas_i_impl(tsdn_t *tsdn, size_t i, bool compat, bool init)
-{
+arenas_i_impl(tsdn_t *tsdn, size_t i, bool compat, bool init) {
ctl_arena_t *ret;
assert(!compat || !init);
@@ -515,16 +509,14 @@ arenas_i_impl(tsdn_t *tsdn, size_t i, bool compat, bool init)
}
static ctl_arena_t *
-arenas_i(size_t i)
-{
+arenas_i(size_t i) {
ctl_arena_t *ret = arenas_i_impl(TSDN_NULL, i, true, false);
assert(ret != NULL);
return (ret);
}
static void
-ctl_arena_clear(ctl_arena_t *ctl_arena)
-{
+ctl_arena_clear(ctl_arena_t *ctl_arena) {
ctl_arena->nthreads = 0;
ctl_arena->dss = dss_prec_names[dss_prec_limit];
ctl_arena->decay_time = -1;
@@ -544,8 +536,7 @@ ctl_arena_clear(ctl_arena_t *ctl_arena)
}
static void
-ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena)
-{
+ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
unsigned i;
if (config_stats) {
@@ -575,8 +566,7 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena)
static void
ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
- bool destroyed)
-{
+ bool destroyed) {
unsigned i;
if (!destroyed) {
@@ -605,13 +595,15 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
sdstats->astats.base += astats->astats.base;
sdstats->astats.internal += astats->astats.internal;
sdstats->astats.resident += astats->astats.resident;
- } else
+ } else {
assert(astats->astats.internal == 0);
+ }
- if (!destroyed)
+ if (!destroyed) {
sdstats->allocated_small += astats->allocated_small;
- else
+ } else {
assert(astats->allocated_small == 0);
+ }
sdstats->nmalloc_small += astats->nmalloc_small;
sdstats->ndalloc_small += astats->ndalloc_small;
sdstats->nrequests_small += astats->nrequests_small;
@@ -619,8 +611,9 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
if (!destroyed) {
sdstats->astats.allocated_large +=
astats->astats.allocated_large;
- } else
+ } else {
assert(astats->astats.allocated_large == 0);
+ }
sdstats->astats.nmalloc_large += astats->astats.nmalloc_large;
sdstats->astats.ndalloc_large += astats->astats.ndalloc_large;
sdstats->astats.nrequests_large +=
@@ -639,8 +632,9 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
if (!destroyed) {
sdstats->bstats[i].curregs +=
astats->bstats[i].curregs;
- } else
+ } else {
assert(astats->bstats[i].curregs == 0);
+ }
if (config_tcache) {
sdstats->bstats[i].nfills +=
astats->bstats[i].nfills;
@@ -652,8 +646,9 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
if (!destroyed) {
sdstats->bstats[i].curslabs +=
astats->bstats[i].curslabs;
- } else
+ } else {
assert(astats->bstats[i].curslabs == 0);
+ }
}
for (i = 0; i < NSIZES - NBINS; i++) {
@@ -664,16 +659,16 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
if (!destroyed) {
sdstats->lstats[i].curlextents +=
astats->lstats[i].curlextents;
- } else
+ } else {
assert(astats->lstats[i].curlextents == 0);
+ }
}
}
}
static void
ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena,
- unsigned i, bool destroyed)
-{
+ unsigned i, bool destroyed) {
ctl_arena_t *ctl_arena = arenas_i(i);
ctl_arena_clear(ctl_arena);
@@ -683,8 +678,7 @@ ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena,
}
static unsigned
-ctl_arena_init(tsdn_t *tsdn, extent_hooks_t *extent_hooks)
-{
+ctl_arena_init(tsdn_t *tsdn, extent_hooks_t *extent_hooks) {
unsigned arena_ind;
ctl_arena_t *ctl_arena;
@@ -692,26 +686,29 @@ ctl_arena_init(tsdn_t *tsdn, extent_hooks_t *extent_hooks)
NULL) {
ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
arena_ind = ctl_arena->arena_ind;
- } else
+ } else {
arena_ind = ctl_arenas->narenas;
+ }
/* Trigger stats allocation. */
- if (arenas_i_impl(tsdn, arena_ind, false, true) == NULL)
+ if (arenas_i_impl(tsdn, arena_ind, false, true) == NULL) {
return (UINT_MAX);
+ }
/* Initialize new arena. */
- if (arena_init(tsdn, arena_ind, extent_hooks) == NULL)
+ if (arena_init(tsdn, arena_ind, extent_hooks) == NULL) {
return (UINT_MAX);
+ }
- if (arena_ind == ctl_arenas->narenas)
+ if (arena_ind == ctl_arenas->narenas) {
ctl_arenas->narenas++;
+ }
return (arena_ind);
}
static void
-ctl_refresh(tsdn_t *tsdn)
-{
+ctl_refresh(tsdn_t *tsdn) {
unsigned i;
ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL);
VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas);
@@ -751,8 +748,7 @@ ctl_refresh(tsdn_t *tsdn)
}
static bool
-ctl_init(tsdn_t *tsdn)
-{
+ctl_init(tsdn_t *tsdn) {
bool ret;
malloc_mutex_lock(tsdn, &ctl_mtx);
@@ -828,8 +824,7 @@ label_return:
static int
ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
- size_t *mibp, size_t *depthp)
-{
+ size_t *mibp, size_t *depthp) {
int ret;
const char *elm, *tdot, *dot;
size_t elen, i, j;
@@ -857,9 +852,10 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
if (strlen(child->name) == elen &&
strncmp(elm, child->name, elen) == 0) {
node = child;
- if (nodesp != NULL)
+ if (nodesp != NULL) {
nodesp[i] =
(const ctl_node_t *)node;
+ }
mibp[i] = j;
break;
}
@@ -886,8 +882,9 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
goto label_return;
}
- if (nodesp != NULL)
+ if (nodesp != NULL) {
nodesp[i] = (const ctl_node_t *)node;
+ }
mibp[i] = (size_t)index;
}
@@ -925,8 +922,7 @@ label_return:
int
ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
+ void *newp, size_t newlen) {
int ret;
size_t depth;
ctl_node_t const *nodes[CTL_MAX_DEPTH];
@@ -940,12 +936,14 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
depth = CTL_MAX_DEPTH;
ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
- if (ret != 0)
+ if (ret != 0) {
goto label_return;
+ }
node = ctl_named_node(nodes[depth-1]);
- if (node != NULL && node->ctl)
+ if (node != NULL && node->ctl) {
ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
+ }
else {
/* The name refers to a partial path through the ctl tree. */
ret = ENOENT;
@@ -956,8 +954,7 @@ label_return:
}
int
-ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp)
-{
+ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp) {
int ret;
if (!ctl_initialized && ctl_init(tsdn)) {
@@ -972,8 +969,7 @@ label_return:
int
ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
const ctl_named_node_t *node;
size_t i;
@@ -1009,9 +1005,9 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
}
/* Call the ctl function. */
- if (node && node->ctl)
+ if (node && node->ctl) {
ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
- else {
+ } else {
/* Partial MIB. */
ret = ENOENT;
}
@@ -1021,10 +1017,10 @@ label_return:
}
bool
-ctl_boot(void)
-{
- if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL))
+ctl_boot(void) {
+ if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL)) {
return (true);
+ }
ctl_initialized = false;
@@ -1032,20 +1028,17 @@ ctl_boot(void)
}
void
-ctl_prefork(tsdn_t *tsdn)
-{
+ctl_prefork(tsdn_t *tsdn) {
malloc_mutex_prefork(tsdn, &ctl_mtx);
}
void
-ctl_postfork_parent(tsdn_t *tsdn)
-{
+ctl_postfork_parent(tsdn_t *tsdn) {
malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
}
void
-ctl_postfork_child(tsdn_t *tsdn)
-{
+ctl_postfork_child(tsdn_t *tsdn) {
malloc_mutex_postfork_child(tsdn, &ctl_mtx);
}
@@ -1112,36 +1105,38 @@ ctl_postfork_child(tsdn_t *tsdn)
#define CTL_RO_CLGEN(c, l, n, v, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
-{ \
+ size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
- if (!(c)) \
+ if (!(c)) { \
return (ENOENT); \
- if (l) \
+ } \
+ if (l) { \
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
+ } \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
- if (l) \
+ if (l) { \
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
+ } \
return (ret); \
}
#define CTL_RO_CGEN(c, n, v, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
-{ \
+ size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
- if (!(c)) \
+ if (!(c)) { \
return (ENOENT); \
+ } \
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \
oldval = (v); \
@@ -1156,8 +1151,7 @@ label_return: \
#define CTL_RO_GEN(n, v, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
-{ \
+ size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
@@ -1179,13 +1173,13 @@ label_return: \
#define CTL_RO_NL_CGEN(c, n, v, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
-{ \
+ size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
- if (!(c)) \
+ if (!(c)) { \
return (ENOENT); \
+ } \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
@@ -1198,8 +1192,7 @@ label_return: \
#define CTL_RO_NL_GEN(n, v, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
-{ \
+ size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
@@ -1215,13 +1208,13 @@ label_return: \
#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
-{ \
+ size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
- if (!(c)) \
+ if (!(c)) { \
return (ENOENT); \
+ } \
READONLY(); \
oldval = (m(tsd)); \
READ(oldval, t); \
@@ -1234,8 +1227,7 @@ label_return: \
#define CTL_RO_CONFIG_GEN(n, t) \
static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
-{ \
+ size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
@@ -1254,15 +1246,15 @@ CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
static int
epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
UNUSED uint64_t newval;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(newval, uint64_t);
- if (newp != NULL)
+ if (newp != NULL) {
ctl_refresh(tsd_tsdn(tsd));
+ }
READ(ctl_arenas->epoch, uint64_t);
ret = 0;
@@ -1317,15 +1309,15 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
static int
thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
arena_t *oldarena;
unsigned newind, oldind;
oldarena = arena_choose(tsd, NULL);
- if (oldarena == NULL)
+ if (oldarena == NULL) {
return (EAGAIN);
+ }
newind = oldind = arena_ind_get(oldarena);
WRITE(newind, unsigned);
@@ -1372,13 +1364,13 @@ CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
static int
thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen)
-{
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
bool oldval;
- if (!config_tcache)
+ if (!config_tcache) {
return (ENOENT);
+ }
oldval = tcache_enabled_get();
if (newp != NULL) {
@@ -1397,12 +1389,12 @@ label_return:
static int
thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen)
-{
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
- if (!config_tcache)
+ if (!config_tcache) {
return (ENOENT);
+ }
READONLY();
WRITEONLY();
@@ -1416,12 +1408,12 @@ label_return:
static int
thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
- if (!config_prof)
+ if (!config_prof) {
return (ENOENT);
+ }
READ_XOR_WRITE();
@@ -1432,8 +1424,9 @@ thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
}
if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
- 0)
+ 0) {
goto label_return;
+ }
} else {
const char *oldname = prof_thread_name_get(tsd);
READ(oldname, const char *);
@@ -1446,13 +1439,13 @@ label_return:
static int
thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
bool oldval;
- if (!config_prof)
+ if (!config_prof) {
return (ENOENT);
+ }
oldval = prof_thread_active_get(tsd);
if (newp != NULL) {
@@ -1476,13 +1469,13 @@ label_return:
static int
tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned tcache_ind;
- if (!config_tcache)
+ if (!config_tcache) {
return (ENOENT);
+ }
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY();
@@ -1500,13 +1493,13 @@ label_return:
static int
tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned tcache_ind;
- if (!config_tcache)
+ if (!config_tcache) {
return (ENOENT);
+ }
WRITEONLY();
tcache_ind = UINT_MAX;
@@ -1524,13 +1517,13 @@ label_return:
static int
tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned tcache_ind;
- if (!config_tcache)
+ if (!config_tcache) {
return (ENOENT);
+ }
WRITEONLY();
tcache_ind = UINT_MAX;
@@ -1550,8 +1543,7 @@ label_return:
static int
arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen)
-{
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
tsdn_t *tsdn = tsd_tsdn(tsd);
unsigned arena_ind;
@@ -1572,8 +1564,7 @@ label_return:
}
static void
-arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all)
-{
+arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all) {
malloc_mutex_lock(tsdn, &ctl_mtx);
{
unsigned narenas = ctl_arenas->narenas;
@@ -1586,8 +1577,9 @@ arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all)
unsigned i;
VARIABLE_ARRAY(arena_t *, tarenas, narenas);
- for (i = 0; i < narenas; i++)
+ for (i = 0; i < narenas; i++) {
tarenas[i] = arena_get(tsdn, i, false);
+ }
/*
* No further need to hold ctl_mtx, since narenas and
@@ -1596,8 +1588,9 @@ arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all)
malloc_mutex_unlock(tsdn, &ctl_mtx);
for (i = 0; i < narenas; i++) {
- if (tarenas[i] != NULL)
+ if (tarenas[i] != NULL) {
arena_purge(tsdn, tarenas[i], all);
+ }
}
} else {
arena_t *tarena;
@@ -1609,16 +1602,16 @@ arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all)
/* No further need to hold ctl_mtx. */
malloc_mutex_unlock(tsdn, &ctl_mtx);
- if (tarena != NULL)
+ if (tarena != NULL) {
arena_purge(tsdn, tarena, all);
+ }
}
}
}
static int
arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned arena_ind;
@@ -1634,8 +1627,7 @@ label_return:
static int
arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned arena_ind;
@@ -1652,8 +1644,7 @@ label_return:
static int
arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind,
- arena_t **arena)
-{
+ arena_t **arena) {
int ret;
READONLY();
@@ -1678,16 +1669,16 @@ label_return:
static int
arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned arena_ind;
arena_t *arena;
ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
newp, newlen, &arena_ind, &arena);
- if (ret != 0)
+ if (ret != 0) {
return (ret);
+ }
arena_reset(tsd, arena);
@@ -1696,8 +1687,7 @@ arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
static int
arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned arena_ind;
arena_t *arena;
@@ -1705,8 +1695,9 @@ arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
newp, newlen, &arena_ind, &arena);
- if (ret != 0)
+ if (ret != 0) {
goto label_return;
+ }
if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena,
true) != 0) {
@@ -1735,8 +1726,7 @@ label_return:
static int
arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
const char *dss = NULL;
unsigned arena_ind;
@@ -1797,8 +1787,7 @@ label_return:
static int
arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned arena_ind;
arena_t *arena;
@@ -1833,8 +1822,7 @@ label_return:
static int
arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen)
-{
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned arena_ind;
arena_t *arena;
@@ -1867,8 +1855,7 @@ label_return:
}
static const ctl_named_node_t *
-arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
-{
+arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
const ctl_named_node_t *ret;
malloc_mutex_lock(tsdn, &ctl_mtx);
@@ -1894,8 +1881,7 @@ label_return:
static int
arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned narenas;
@@ -1916,8 +1902,7 @@ label_return:
static int
arenas_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
if (oldp != NULL && oldlenp != NULL) {
@@ -1949,27 +1934,27 @@ CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_slab_size, arena_bin_info[mib[2]].slab_size, size_t)
static const ctl_named_node_t *
-arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
-{
- if (i > NBINS)
+arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
+ if (i > NBINS) {
return (NULL);
+ }
return (super_arenas_bin_i_node);
}
CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned)
CTL_RO_NL_GEN(arenas_lextent_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
static const ctl_named_node_t *
-arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
-{
- if (i > NSIZES - NBINS)
+arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
+ size_t i) {
+ if (i > NSIZES - NBINS) {
return (NULL);
+ }
return (super_arenas_lextent_i_node);
}
static int
arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
extent_hooks_t *extent_hooks;
unsigned arena_ind;
@@ -1995,13 +1980,13 @@ label_return:
static int
prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen)
-{
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
bool oldval;
- if (!config_prof)
+ if (!config_prof) {
return (ENOENT);
+ }
if (newp != NULL) {
if (newlen != sizeof(bool)) {
@@ -2010,8 +1995,9 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
}
oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
*(bool *)newp);
- } else
+ } else {
oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
+ }
READ(oldval, bool);
ret = 0;
@@ -2021,13 +2007,13 @@ label_return:
static int
prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
bool oldval;
- if (!config_prof)
+ if (!config_prof) {
return (ENOENT);
+ }
if (newp != NULL) {
if (newlen != sizeof(bool)) {
@@ -2035,8 +2021,9 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
goto label_return;
}
oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
- } else
+ } else {
oldval = prof_active_get(tsd_tsdn(tsd));
+ }
READ(oldval, bool);
ret = 0;
@@ -2046,13 +2033,13 @@ label_return:
static int
prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
const char *filename = NULL;
- if (!config_prof)
+ if (!config_prof) {
return (ENOENT);
+ }
WRITEONLY();
WRITE(filename, const char *);
@@ -2069,13 +2056,13 @@ label_return:
static int
prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
bool oldval;
- if (!config_prof)
+ if (!config_prof) {
return (ENOENT);
+ }
if (newp != NULL) {
if (newlen != sizeof(bool)) {
@@ -2083,8 +2070,9 @@ prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
goto label_return;
}
oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
- } else
+ } else {
oldval = prof_gdump_get(tsd_tsdn(tsd));
+ }
READ(oldval, bool);
ret = 0;
@@ -2094,18 +2082,19 @@ label_return:
static int
prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
size_t lg_sample = lg_prof_sample;
- if (!config_prof)
+ if (!config_prof) {
return (ENOENT);
+ }
WRITEONLY();
WRITE(lg_sample, size_t);
- if (lg_sample >= (sizeof(uint64_t) << 3))
+ if (lg_sample >= (sizeof(uint64_t) << 3)) {
lg_sample = (sizeof(uint64_t) << 3) - 1;
+ }
prof_reset(tsd, lg_sample);
@@ -2189,10 +2178,10 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
static const ctl_named_node_t *
stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
- size_t j)
-{
- if (j > NBINS)
+ size_t j) {
+ if (j > NBINS) {
return (NULL);
+ }
return (super_stats_arenas_i_bins_j_node);
}
@@ -2207,16 +2196,15 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
static const ctl_named_node_t *
stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
- size_t j)
-{
- if (j > NSIZES - NBINS)
+ size_t j) {
+ if (j > NSIZES - NBINS) {
return (NULL);
+ }
return (super_stats_arenas_i_lextents_j_node);
}
static const ctl_named_node_t *
-stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
-{
+stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
const ctl_named_node_t *ret;
size_t a;
diff --git a/src/extent.c b/src/extent.c
index be40aaa..5cf2e25 100644
--- a/src/extent.c
+++ b/src/extent.c
@@ -75,8 +75,7 @@ static void extent_record(tsdn_t *tsdn, arena_t *arena,
/******************************************************************************/
extent_t *
-extent_alloc(tsdn_t *tsdn, arena_t *arena)
-{
+extent_alloc(tsdn_t *tsdn, arena_t *arena) {
extent_t *extent;
malloc_mutex_lock(tsdn, &arena->extent_cache_mtx);
@@ -92,8 +91,7 @@ extent_alloc(tsdn_t *tsdn, arena_t *arena)
}
void
-extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
-{
+extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
malloc_mutex_lock(tsdn, &arena->extent_cache_mtx);
ql_elm_new(extent, ql_link);
ql_tail_insert(&arena->extent_cache, extent, ql_link);
@@ -101,22 +99,21 @@ extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
}
extent_hooks_t *
-extent_hooks_get(arena_t *arena)
-{
+extent_hooks_get(arena_t *arena) {
return (base_extent_hooks_get(arena->base));
}
extent_hooks_t *
-extent_hooks_set(arena_t *arena, extent_hooks_t *extent_hooks)
-{
+extent_hooks_set(arena_t *arena, extent_hooks_t *extent_hooks) {
return (base_extent_hooks_set(arena->base, extent_hooks));
}
static void
-extent_hooks_assure_initialized(arena_t *arena, extent_hooks_t **r_extent_hooks)
-{
- if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER)
+extent_hooks_assure_initialized(arena_t *arena,
+ extent_hooks_t **r_extent_hooks) {
+ if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
*r_extent_hooks = extent_hooks_get(arena);
+ }
}
#ifdef JEMALLOC_JET
@@ -124,8 +121,7 @@ extent_hooks_assure_initialized(arena_t *arena, extent_hooks_t **r_extent_hooks)
#define extent_size_quantize_floor JEMALLOC_N(n_extent_size_quantize_floor)
#endif
size_t
-extent_size_quantize_floor(size_t size)
-{
+extent_size_quantize_floor(size_t size) {
size_t ret;
pszind_t pind;
@@ -161,8 +157,7 @@ extent_size_quantize_t *extent_size_quantize_floor =
#define extent_size_quantize_ceil JEMALLOC_N(n_extent_size_quantize_ceil)
#endif
size_t
-extent_size_quantize_ceil(size_t size)
-{
+extent_size_quantize_ceil(size_t size) {
size_t ret;
assert(size > 0);
@@ -195,8 +190,7 @@ ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
static void
extent_heaps_insert(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES+1],
- extent_t *extent)
-{
+ extent_t *extent) {
size_t psz = extent_size_quantize_floor(extent_size_get(extent));
pszind_t pind = psz2ind(psz);
@@ -207,8 +201,7 @@ extent_heaps_insert(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES+1],
static void
extent_heaps_remove(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES+1],
- extent_t *extent)
-{
+ extent_t *extent) {
size_t psz = extent_size_quantize_floor(extent_size_get(extent));
pszind_t pind = psz2ind(psz);
@@ -220,12 +213,12 @@ extent_heaps_remove(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES+1],
static bool
extent_rtree_acquire(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
const extent_t *extent, bool dependent, bool init_missing,
- rtree_elm_t **r_elm_a, rtree_elm_t **r_elm_b)
-{
+ rtree_elm_t **r_elm_a, rtree_elm_t **r_elm_b) {
*r_elm_a = rtree_elm_acquire(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_base_get(extent), dependent, init_missing);
- if (!dependent && *r_elm_a == NULL)
+ if (!dependent && *r_elm_a == NULL) {
return (true);
+ }
assert(*r_elm_a != NULL);
if (extent_size_get(extent) > PAGE) {
@@ -237,33 +230,33 @@ extent_rtree_acquire(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
return (true);
}
assert(*r_elm_b != NULL);
- } else
+ } else {
*r_elm_b = NULL;
+ }
return (false);
}
static void
extent_rtree_write_acquired(tsdn_t *tsdn, rtree_elm_t *elm_a,
- rtree_elm_t *elm_b, const extent_t *extent)
-{
+ rtree_elm_t *elm_b, const extent_t *extent) {
rtree_elm_write_acquired(tsdn, &extents_rtree, elm_a, extent);
- if (elm_b != NULL)
+ if (elm_b != NULL) {
rtree_elm_write_acquired(tsdn, &extents_rtree, elm_b, extent);
+ }
}
static void
-extent_rtree_release(tsdn_t *tsdn, rtree_elm_t *elm_a, rtree_elm_t *elm_b)
-{
+extent_rtree_release(tsdn_t *tsdn, rtree_elm_t *elm_a, rtree_elm_t *elm_b) {
rtree_elm_release(tsdn, &extents_rtree, elm_a);
- if (elm_b != NULL)
+ if (elm_b != NULL) {
rtree_elm_release(tsdn, &extents_rtree, elm_b);
+ }
}
static void
extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
- const extent_t *extent)
-{
+ const extent_t *extent) {
size_t i;
assert(extent_slab_get(extent));
@@ -276,8 +269,7 @@ extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
}
static void
-extent_gprof_add(tsdn_t *tsdn, const extent_t *extent)
-{
+extent_gprof_add(tsdn_t *tsdn, const extent_t *extent) {
cassert(config_prof);
if (opt_prof && extent_active_get(extent)) {
@@ -291,14 +283,14 @@ extent_gprof_add(tsdn_t *tsdn, const extent_t *extent)
*/
high = atomic_read_zu(&highpages);
}
- if (cur > high && prof_gdump_get_unlocked())
+ if (cur > high && prof_gdump_get_unlocked()) {
prof_gdump(tsdn);
+ }
}
}
static void
-extent_gprof_sub(tsdn_t *tsdn, const extent_t *extent)
-{
+extent_gprof_sub(tsdn_t *tsdn, const extent_t *extent) {
cassert(config_prof);
if (opt_prof && extent_active_get(extent)) {
@@ -309,37 +301,37 @@ extent_gprof_sub(tsdn_t *tsdn, const extent_t *extent)
}
static bool
-extent_register(tsdn_t *tsdn, const extent_t *extent)
-{
+extent_register(tsdn_t *tsdn, const extent_t *extent) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_elm_t *elm_a, *elm_b;
if (extent_rtree_acquire(tsdn, rtree_ctx, extent, false, true, &elm_a,
- &elm_b))
+ &elm_b)) {
return (true);
+ }
extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent);
- if (extent_slab_get(extent))
+ if (extent_slab_get(extent)) {
extent_interior_register(tsdn, rtree_ctx, extent);
+ }
extent_rtree_release(tsdn, elm_a, elm_b);
- if (config_prof)
+ if (config_prof) {
extent_gprof_add(tsdn, extent);
+ }
return (false);
}
static void
-extent_reregister(tsdn_t *tsdn, const extent_t *extent)
-{
+extent_reregister(tsdn_t *tsdn, const extent_t *extent) {
bool err = extent_register(tsdn, extent);
assert(!err);
}
static void
extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
- const extent_t *extent)
-{
+ const extent_t *extent) {
size_t i;
assert(extent_slab_get(extent));
@@ -352,8 +344,7 @@ extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
}
static void
-extent_deregister(tsdn_t *tsdn, extent_t *extent)
-{
+extent_deregister(tsdn_t *tsdn, extent_t *extent) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_elm_t *elm_a, *elm_b;
@@ -367,8 +358,9 @@ extent_deregister(tsdn_t *tsdn, extent_t *extent)
}
extent_rtree_release(tsdn, elm_a, elm_b);
- if (config_prof)
+ if (config_prof) {
extent_gprof_sub(tsdn, extent);
+ }
}
/*
@@ -377,8 +369,7 @@ extent_deregister(tsdn_t *tsdn, extent_t *extent)
*/
static extent_t *
extent_first_best_fit(tsdn_t *tsdn, arena_t *arena,
- extent_heap_t extent_heaps[NPSIZES+1], size_t size)
-{
+ extent_heap_t extent_heaps[NPSIZES+1], size_t size) {
pszind_t pind, i;
malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
@@ -386,8 +377,9 @@ extent_first_best_fit(tsdn_t *tsdn, arena_t *arena,
pind = psz2ind(extent_size_quantize_ceil(size));
for (i = pind; i < NPSIZES+1; i++) {
extent_t *extent = extent_heap_first(&extent_heaps[i]);
- if (extent != NULL)
+ if (extent != NULL) {
return (extent);
+ }
}
return (NULL);
@@ -395,8 +387,7 @@ extent_first_best_fit(tsdn_t *tsdn, arena_t *arena,
static void
extent_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- bool cache, extent_t *extent)
-{
+ bool cache, extent_t *extent) {
/*
* Leak extent after making sure its pages have already been purged, so
* that this is only a virtual memory leak.
@@ -415,15 +406,15 @@ static extent_t *
extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extent_heap_t extent_heaps[NPSIZES+1], bool locked, bool cache,
void *new_addr, size_t usize, size_t pad, size_t alignment, bool *zero,
- bool *commit, bool slab)
-{
+ bool *commit, bool slab) {
extent_t *extent;
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
size_t size, alloc_size, leadsize, trailsize;
- if (locked)
+ if (locked) {
malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
+ }
assert(new_addr == NULL || !slab);
assert(pad == 0 || !slab);
assert(alignment > 0);
@@ -452,10 +443,12 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
size = usize + pad;
alloc_size = size + PAGE_CEILING(alignment) - PAGE;
/* Beware size_t wrap-around. */
- if (alloc_size < usize)
+ if (alloc_size < usize) {
return (NULL);
- if (!locked)
+ }
+ if (!locked) {
malloc_mutex_lock(tsdn, &arena->extents_mtx);
+ }
extent_hooks_assure_initialized(arena, r_extent_hooks);
if (new_addr != NULL) {
rtree_elm_t *elm;
@@ -470,19 +463,22 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
if (extent_arena_get(extent) != arena ||
extent_size_get(extent) < size ||
extent_active_get(extent) ||
- extent_retained_get(extent) == cache)
+ extent_retained_get(extent) == cache) {
extent = NULL;
+ }
}
rtree_elm_release(tsdn, &extents_rtree, elm);
- } else
+ } else {
extent = NULL;
+ }
} else {
extent = extent_first_best_fit(tsdn, arena, extent_heaps,
alloc_size);
}
if (extent == NULL) {
- if (!locked)
+ if (!locked) {
malloc_mutex_unlock(tsdn, &arena->extents_mtx);
+ }
return (NULL);
}
extent_heaps_remove(tsdn, extent_heaps, extent);
@@ -493,10 +489,12 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
assert(new_addr == NULL || leadsize == 0);
assert(extent_size_get(extent) >= leadsize + size);
trailsize = extent_size_get(extent) - leadsize - size;
- if (extent_zeroed_get(extent))
+ if (extent_zeroed_get(extent)) {
*zero = true;
- if (extent_committed_get(extent))
+ }
+ if (extent_committed_get(extent)) {
*commit = true;
+ }
/* Split the lead. */
if (leadsize != 0) {
@@ -507,8 +505,9 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
if (extent == NULL) {
extent_deregister(tsdn, lead);
extent_leak(tsdn, arena, r_extent_hooks, cache, lead);
- if (!locked)
+ if (!locked) {
malloc_mutex_unlock(tsdn, &arena->extents_mtx);
+ }
return (NULL);
}
extent_heaps_insert(tsdn, extent_heaps, lead);
@@ -523,8 +522,9 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extent_deregister(tsdn, extent);
extent_leak(tsdn, arena, r_extent_hooks, cache,
extent);
- if (!locked)
+ if (!locked) {
malloc_mutex_unlock(tsdn, &arena->extents_mtx);
+ }
return (NULL);
}
extent_heaps_insert(tsdn, extent_heaps, trail);
@@ -540,8 +540,9 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
if (*commit && !extent_committed_get(extent)) {
if (extent_commit_wrapper(tsdn, arena, r_extent_hooks, extent,
0, extent_size_get(extent))) {
- if (!locked)
+ if (!locked) {
malloc_mutex_unlock(tsdn, &arena->extents_mtx);
+ }
extent_record(tsdn, arena, r_extent_hooks, extent_heaps,
cache, extent);
return (NULL);
@@ -549,16 +550,18 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extent_zeroed_set(extent, true);
}
- if (pad != 0)
+ if (pad != 0) {
extent_addr_randomize(tsdn, extent, alignment);
+ }
extent_active_set(extent, true);
if (slab) {
extent_slab_set(extent, slab);
extent_interior_register(tsdn, rtree_ctx, extent);
}
- if (!locked)
+ if (!locked) {
malloc_mutex_unlock(tsdn, &arena->extents_mtx);
+ }
if (*zero) {
if (!extent_zeroed_get(extent)) {
@@ -569,8 +572,9 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
size_t *p = (size_t *)(uintptr_t)
extent_addr_get(extent);
- for (i = 0; i < usize / sizeof(size_t); i++)
+ for (i = 0; i < usize / sizeof(size_t); i++) {
assert(p[i] == 0);
+ }
}
}
return (extent);
@@ -584,8 +588,7 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
*/
static void *
extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec)
-{
+ size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
void *ret;
assert(size != 0);
@@ -594,17 +597,20 @@ extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
/* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary && (ret =
extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
- commit)) != NULL)
+ commit)) != NULL) {
return (ret);
+ }
/* mmap. */
if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
- != NULL)
+ != NULL) {
return (ret);
+ }
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary && (ret =
extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
- commit)) != NULL)
+ commit)) != NULL) {
return (ret);
+ }
/* All strategies for allocation failed. */
return (NULL);
@@ -613,8 +619,7 @@ extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
static extent_t *
extent_alloc_cache_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, bool locked, void *new_addr, size_t usize,
- size_t pad, size_t alignment, bool *zero, bool *commit, bool slab)
-{
+ size_t pad, size_t alignment, bool *zero, bool *commit, bool slab) {
extent_t *extent;
assert(usize + pad != 0);
@@ -629,8 +634,7 @@ extent_alloc_cache_impl(tsdn_t *tsdn, arena_t *arena,
extent_t *
extent_alloc_cache_locked(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
- size_t alignment, bool *zero, bool *commit, bool slab)
-{
+ size_t alignment, bool *zero, bool *commit, bool slab) {
malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
return (extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, true,
@@ -640,16 +644,14 @@ extent_alloc_cache_locked(tsdn_t *tsdn, arena_t *arena,
extent_t *
extent_alloc_cache(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
- size_t alignment, bool *zero, bool *commit, bool slab)
-{
+ size_t alignment, bool *zero, bool *commit, bool slab) {
return (extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, false,
new_addr, usize, pad, alignment, zero, commit, slab));
}
static void *
extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
- size_t size, size_t alignment, bool *zero, bool *commit)
-{
+ size_t size, size_t alignment, bool *zero, bool *commit) {
void *ret;
ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
@@ -659,8 +661,7 @@ extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
static void *
extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit, unsigned arena_ind)
-{
+ size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
tsdn_t *tsdn;
arena_t *arena;
@@ -680,10 +681,10 @@ extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
static void
extent_retain(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- extent_t *extent)
-{
- if (config_stats)
+ extent_t *extent) {
+ if (config_stats) {
arena->stats.retained += extent_size_get(extent);
+ }
extent_record(tsdn, arena, r_extent_hooks, arena->extents_retained,
false, extent);
}
@@ -696,8 +697,7 @@ extent_retain(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
static extent_t *
extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
- size_t alignment, bool *zero, bool *commit, bool slab)
-{
+ size_t alignment, bool *zero, bool *commit, bool slab) {
extent_t *extent;
void *ptr;
size_t size, alloc_size, alloc_size_min, leadsize, trailsize;
@@ -713,13 +713,16 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
alloc_size = pind2sz(arena->extent_grow_next);
alloc_size_min = size + PAGE_CEILING(alignment) - PAGE;
/* Beware size_t wrap-around. */
- if (alloc_size_min < usize)
+ if (alloc_size_min < usize) {
return (NULL);
- if (alloc_size < alloc_size_min)
+ }
+ if (alloc_size < alloc_size_min) {
return (NULL);
+ }
extent = extent_alloc(tsdn, arena);
- if (extent == NULL)
+ if (extent == NULL) {
return (NULL);
+ }
zeroed = false;
committed = false;
ptr = extent_alloc_core(tsdn, arena, new_addr, alloc_size, PAGE,
@@ -741,10 +744,12 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
assert(new_addr == NULL || leadsize == 0);
assert(alloc_size >= leadsize + size);
trailsize = alloc_size - leadsize - size;
- if (extent_zeroed_get(extent))
+ if (extent_zeroed_get(extent)) {
*zero = true;
- if (extent_committed_get(extent))
+ }
+ if (extent_committed_get(extent)) {
*commit = true;
+ }
/* Split the lead. */
if (leadsize != 0) {
@@ -790,8 +795,9 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
/* Adjust gprof stats now that extent is final size. */
extent_gprof_add(tsdn, extent);
}
- if (pad != 0)
+ if (pad != 0) {
extent_addr_randomize(tsdn, extent, alignment);
+ }
if (slab) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
@@ -800,18 +806,19 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
extent_slab_set(extent, true);
extent_interior_register(tsdn, rtree_ctx, extent);
}
- if (*zero && !extent_zeroed_get(extent))
+ if (*zero && !extent_zeroed_get(extent)) {
memset(extent_addr_get(extent), 0, extent_usize_get(extent));
- if (arena->extent_grow_next + 1 < NPSIZES)
+ }
+ if (arena->extent_grow_next + 1 < NPSIZES) {
arena->extent_grow_next++;
+ }
return (extent);
}
static extent_t *
extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
- size_t alignment, bool *zero, bool *commit, bool slab)
-{
+ size_t alignment, bool *zero, bool *commit, bool slab) {
extent_t *extent;
assert(usize != 0);
@@ -825,8 +832,9 @@ extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
size_t size = usize + pad;
arena->stats.retained -= size;
}
- if (config_prof)
+ if (config_prof) {
extent_gprof_add(tsdn, extent);
+ }
}
if (!config_munmap && extent == NULL) {
extent = extent_grow_retained(tsdn, arena, r_extent_hooks,
@@ -839,16 +847,16 @@ extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
static extent_t *
extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
- size_t alignment, bool *zero, bool *commit, bool slab)
-{
+ size_t alignment, bool *zero, bool *commit, bool slab) {
extent_t *extent;
size_t size;
void *addr;
size = usize + pad;
extent = extent_alloc(tsdn, arena);
- if (extent == NULL)
+ if (extent == NULL) {
return (NULL);
+ }
if (*r_extent_hooks == &extent_hooks_default) {
/* Call directly to propagate tsdn. */
addr = extent_alloc_default_impl(tsdn, arena, new_addr, size,
@@ -863,8 +871,9 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
}
extent_init(extent, arena, addr, size, usize,
arena_extent_sn_next(arena), true, zero, commit, slab);
- if (pad != 0)
+ if (pad != 0) {
extent_addr_randomize(tsdn, extent, alignment);
+ }
if (extent_register(tsdn, extent)) {
extent_leak(tsdn, arena, r_extent_hooks, false, extent);
return (NULL);
@@ -876,8 +885,7 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
extent_t *
extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
- size_t alignment, bool *zero, bool *commit, bool slab)
-{
+ size_t alignment, bool *zero, bool *commit, bool slab) {
extent_t *extent;
extent_hooks_assure_initialized(arena, r_extent_hooks);
@@ -893,16 +901,19 @@ extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
}
static bool
-extent_can_coalesce(const extent_t *a, const extent_t *b)
-{
- if (extent_arena_get(a) != extent_arena_get(b))
+extent_can_coalesce(const extent_t *a, const extent_t *b) {
+ if (extent_arena_get(a) != extent_arena_get(b)) {
return (false);
- if (extent_active_get(a) != extent_active_get(b))
+ }
+ if (extent_active_get(a) != extent_active_get(b)) {
return (false);
- if (extent_committed_get(a) != extent_committed_get(b))
+ }
+ if (extent_committed_get(a) != extent_committed_get(b)) {
return (false);
- if (extent_retained_get(a) != extent_retained_get(b))
+ }
+ if (extent_retained_get(a) != extent_retained_get(b)) {
return (false);
+ }
return (true);
}
@@ -910,10 +921,10 @@ extent_can_coalesce(const extent_t *a, const extent_t *b)
static void
extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
- extent_heap_t extent_heaps[NPSIZES+1], bool cache)
-{
- if (!extent_can_coalesce(a, b))
+ extent_heap_t extent_heaps[NPSIZES+1], bool cache) {
+ if (!extent_can_coalesce(a, b)) {
return;
+ }
extent_heaps_remove(tsdn, extent_heaps, a);
extent_heaps_remove(tsdn, extent_heaps, b);
@@ -937,8 +948,7 @@ extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
static void
extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- extent_heap_t extent_heaps[NPSIZES+1], bool cache, extent_t *extent)
-{
+ extent_heap_t extent_heaps[NPSIZES+1], bool cache, extent_t *extent) {
extent_t *prev, *next;
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
@@ -980,8 +990,7 @@ extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
}
void
-extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
-{
+extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
if (extent_register(tsdn, extent)) {
@@ -993,8 +1002,7 @@ extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
void
extent_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent)
-{
+ extent_hooks_t **r_extent_hooks, extent_t *extent) {
assert(extent_base_get(extent) != NULL);
assert(extent_size_get(extent) != 0);
@@ -1006,17 +1014,16 @@ extent_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
}
static bool
-extent_dalloc_default_impl(void *addr, size_t size)
-{
- if (!have_dss || !extent_in_dss(addr))
+extent_dalloc_default_impl(void *addr, size_t size) {
+ if (!have_dss || !extent_in_dss(addr)) {
return (extent_dalloc_mmap(addr, size));
+ }
return (true);
}
static bool
extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
- bool committed, unsigned arena_ind)
-{
+ bool committed, unsigned arena_ind) {
assert(extent_hooks == &extent_hooks_default);
return (extent_dalloc_default_impl(addr, size));
@@ -1024,8 +1031,7 @@ extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
bool
extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent)
-{
+ extent_hooks_t **r_extent_hooks, extent_t *extent) {
bool err;
assert(extent_base_get(extent) != NULL);
@@ -1050,46 +1056,50 @@ extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
extent_committed_get(extent), arena_ind_get(arena)));
}
- if (!err)
+ if (!err) {
extent_dalloc(tsdn, arena, extent);
+ }
return (err);
}
void
extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent)
-{
+ extent_hooks_t **r_extent_hooks, extent_t *extent) {
bool zeroed;
- if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent))
+ if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) {
return;
+ }
extent_reregister(tsdn, extent);
/* Try to decommit; purge if that fails. */
- if (!extent_committed_get(extent))
+ if (!extent_committed_get(extent)) {
zeroed = true;
- else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
- 0, extent_size_get(extent)))
+ } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
+ 0, extent_size_get(extent))) {
zeroed = true;
- else if ((*r_extent_hooks)->purge_lazy != NULL &&
+ } else if ((*r_extent_hooks)->purge_lazy != NULL &&
!(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), 0,
- extent_size_get(extent), arena_ind_get(arena)))
+ extent_size_get(extent), arena_ind_get(arena))) {
zeroed = false;
- else if ((*r_extent_hooks)->purge_forced != NULL &&
+ } else if ((*r_extent_hooks)->purge_forced != NULL &&
!(*r_extent_hooks)->purge_forced(*r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), 0,
- extent_size_get(extent), arena_ind_get(arena)))
+ extent_size_get(extent), arena_ind_get(arena))) {
zeroed = true;
- else
+ } else {
zeroed = false;
+ }
extent_zeroed_set(extent, zeroed);
- if (config_stats)
+ if (config_stats) {
arena->stats.retained += extent_size_get(extent);
- if (config_prof)
+ }
+ if (config_prof) {
extent_gprof_sub(tsdn, extent);
+ }
extent_record(tsdn, arena, r_extent_hooks, arena->extents_retained,
false, extent);
@@ -1097,8 +1107,7 @@ extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
static bool
extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
- size_t offset, size_t length, unsigned arena_ind)
-{
+ size_t offset, size_t length, unsigned arena_ind) {
assert(extent_hooks == &extent_hooks_default);
return (pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
@@ -1108,8 +1117,7 @@ extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
bool
extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length)
-{
+ size_t length) {
bool err;
extent_hooks_assure_initialized(arena, r_extent_hooks);
@@ -1122,8 +1130,7 @@ extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
static bool
extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
- size_t offset, size_t length, unsigned arena_ind)
-{
+ size_t offset, size_t length, unsigned arena_ind) {
assert(extent_hooks == &extent_hooks_default);
return (pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
@@ -1133,8 +1140,7 @@ extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
bool
extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length)
-{
+ size_t length) {
bool err;
extent_hooks_assure_initialized(arena, r_extent_hooks);
@@ -1150,8 +1156,7 @@ extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
#ifdef PAGES_CAN_PURGE_LAZY
static bool
extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
- size_t offset, size_t length, unsigned arena_ind)
-{
+ size_t offset, size_t length, unsigned arena_ind) {
assert(extent_hooks == &extent_hooks_default);
assert(addr != NULL);
assert((offset & PAGE_MASK) == 0);
@@ -1166,8 +1171,7 @@ extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
bool
extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length)
-{
+ size_t length) {
extent_hooks_assure_initialized(arena, r_extent_hooks);
return ((*r_extent_hooks)->purge_lazy == NULL ||
(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
@@ -1178,8 +1182,7 @@ extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
#ifdef PAGES_CAN_PURGE_FORCED
static bool
extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
- size_t size, size_t offset, size_t length, unsigned arena_ind)
-{
+ size_t size, size_t offset, size_t length, unsigned arena_ind) {
assert(extent_hooks == &extent_hooks_default);
assert(addr != NULL);
assert((offset & PAGE_MASK) == 0);
@@ -1194,8 +1197,7 @@ extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
bool
extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length)
-{
+ size_t length) {
extent_hooks_assure_initialized(arena, r_extent_hooks);
return ((*r_extent_hooks)->purge_forced == NULL ||
(*r_extent_hooks)->purge_forced(*r_extent_hooks,
@@ -1206,12 +1208,12 @@ extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
#ifdef JEMALLOC_MAPS_COALESCE
static bool
extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
- size_t size_a, size_t size_b, bool committed, unsigned arena_ind)
-{
+ size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
assert(extent_hooks == &extent_hooks_default);
- if (!maps_coalesce)
+ if (!maps_coalesce) {
return (true);
+ }
return (false);
}
#endif
@@ -1219,8 +1221,7 @@ extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
extent_t *
extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
- size_t usize_a, size_t size_b, size_t usize_b)
-{
+ size_t usize_a, size_t size_b, size_t usize_b) {
extent_t *trail;
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
@@ -1230,12 +1231,14 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_assure_initialized(arena, r_extent_hooks);
- if ((*r_extent_hooks)->split == NULL)
+ if ((*r_extent_hooks)->split == NULL) {
return (NULL);
+ }
trail = extent_alloc(tsdn, arena);
- if (trail == NULL)
+ if (trail == NULL) {
goto label_error_a;
+ }
{
extent_t lead;
@@ -1246,8 +1249,9 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_slab_get(extent));
if (extent_rtree_acquire(tsdn, rtree_ctx, &lead, false, true,
- &lead_elm_a, &lead_elm_b))
+ &lead_elm_a, &lead_elm_b)) {
goto label_error_b;
+ }
}
extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
@@ -1255,13 +1259,15 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_active_get(extent), extent_zeroed_get(extent),
extent_committed_get(extent), extent_slab_get(extent));
if (extent_rtree_acquire(tsdn, rtree_ctx, trail, false, true,
- &trail_elm_a, &trail_elm_b))
+ &trail_elm_a, &trail_elm_b)) {
goto label_error_c;
+ }
if ((*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
size_a + size_b, size_a, size_b, extent_committed_get(extent),
- arena_ind_get(arena)))
+ arena_ind_get(arena))) {
goto label_error_d;
+ }
extent_size_set(extent, size_a);
extent_usize_set(extent, usize_a);
@@ -1284,12 +1290,13 @@ label_error_a:
}
static bool
-extent_merge_default_impl(void *addr_a, void *addr_b)
-{
- if (!maps_coalesce)
+extent_merge_default_impl(void *addr_a, void *addr_b) {
+ if (!maps_coalesce) {
return (true);
- if (have_dss && !extent_dss_mergeable(addr_a, addr_b))
+ }
+ if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
return (true);
+ }
return (false);
}
@@ -1297,8 +1304,7 @@ extent_merge_default_impl(void *addr_a, void *addr_b)
#ifdef JEMALLOC_MAPS_COALESCE
static bool
extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
- void *addr_b, size_t size_b, bool committed, unsigned arena_ind)
-{
+ void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
assert(extent_hooks == &extent_hooks_default);
return (extent_merge_default_impl(addr_a, addr_b));
@@ -1307,8 +1313,7 @@ extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
bool
extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b)
-{
+ extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
bool err;
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
@@ -1316,8 +1321,9 @@ extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_assure_initialized(arena, r_extent_hooks);
- if ((*r_extent_hooks)->merge == NULL)
+ if ((*r_extent_hooks)->merge == NULL) {
return (true);
+ }
if (*r_extent_hooks == &extent_hooks_default) {
/* Call directly to propagate tsdn. */
@@ -1330,8 +1336,9 @@ extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
arena_ind_get(arena));
}
- if (err)
+ if (err) {
return (true);
+ }
/*
* The rtree writes must happen while all the relevant elements are
@@ -1350,8 +1357,9 @@ extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
if (b_elm_b != NULL) {
rtree_elm_write_acquired(tsdn, &extents_rtree, b_elm_a, NULL);
rtree_elm_release(tsdn, &extents_rtree, b_elm_a);
- } else
+ } else {
b_elm_b = b_elm_a;
+ }
extent_size_set(a, extent_size_get(a) + extent_size_get(b));
extent_usize_set(a, extent_usize_get(a) + extent_usize_get(b));
@@ -1368,14 +1376,15 @@ extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
}
bool
-extent_boot(void)
-{
+extent_boot(void) {
if (rtree_new(&extents_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
- LG_PAGE)))
+ LG_PAGE))) {
return (true);
+ }
- if (have_dss)
+ if (have_dss) {
extent_dss_boot();
+ }
return (false);
}
diff --git a/src/extent_dss.c b/src/extent_dss.c
index 5aa95b1..d61d546 100644
--- a/src/extent_dss.c
+++ b/src/extent_dss.c
@@ -30,8 +30,7 @@ static void *dss_max;
/******************************************************************************/
static void *
-extent_dss_sbrk(intptr_t increment)
-{
+extent_dss_sbrk(intptr_t increment) {
#ifdef JEMALLOC_DSS
return (sbrk(increment));
#else
@@ -41,28 +40,27 @@ extent_dss_sbrk(intptr_t increment)
}
dss_prec_t
-extent_dss_prec_get(void)
-{
+extent_dss_prec_get(void) {
dss_prec_t ret;
- if (!have_dss)
+ if (!have_dss) {
return (dss_prec_disabled);
+ }
ret = (dss_prec_t)atomic_read_u(&dss_prec_default);
return (ret);
}
bool
-extent_dss_prec_set(dss_prec_t dss_prec)
-{
- if (!have_dss)
+extent_dss_prec_set(dss_prec_t dss_prec) {
+ if (!have_dss) {
return (dss_prec != dss_prec_disabled);
+ }
atomic_write_u(&dss_prec_default, (unsigned)dss_prec);
return (false);
}
static void *
-extent_dss_max_update(void *new_addr)
-{
+extent_dss_max_update(void *new_addr) {
void *max_cur;
spin_t spinner;
@@ -83,20 +81,21 @@ extent_dss_max_update(void *new_addr)
spin_adaptive(&spinner);
continue;
}
- if (!atomic_cas_p(&dss_max, max_prev, max_cur))
+ if (!atomic_cas_p(&dss_max, max_prev, max_cur)) {
break;
+ }
}
/* Fixed new_addr can only be supported if it is at the edge of DSS. */
- if (new_addr != NULL && max_cur != new_addr)
+ if (new_addr != NULL && max_cur != new_addr) {
return (NULL);
+ }
return (max_cur);
}
void *
extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit)
-{
+ size_t alignment, bool *zero, bool *commit) {
extent_t *gap;
cassert(have_dss);
@@ -107,12 +106,14 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
* sbrk() uses a signed increment argument, so take care not to
* interpret a large allocation request as a negative increment.
*/
- if ((intptr_t)size < 0)
+ if ((intptr_t)size < 0) {
return (NULL);
+ }
gap = extent_alloc(tsdn, arena);
- if (gap == NULL)
+ if (gap == NULL) {
return (NULL);
+ }
if (!atomic_read_u(&dss_exhausted)) {
/*
@@ -126,8 +127,9 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
intptr_t incr;
max_cur = extent_dss_max_update(new_addr);
- if (max_cur == NULL)
+ if (max_cur == NULL) {
goto label_oom;
+ }
/*
* Compute how much gap space (if any) is necessary to
@@ -145,8 +147,9 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
}
dss_next = (void *)((uintptr_t)ret + size);
if ((uintptr_t)ret < (uintptr_t)max_cur ||
- (uintptr_t)dss_next < (uintptr_t)max_cur)
+ (uintptr_t)dss_next < (uintptr_t)max_cur) {
goto label_oom; /* Wrap-around. */
+ }
incr = gap_size + size;
/*
@@ -155,19 +158,22 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
* DSS while dss_max is greater than the current DSS
* max reported by sbrk(0).
*/
- if (atomic_cas_p(&dss_max, max_cur, dss_next))
+ if (atomic_cas_p(&dss_max, max_cur, dss_next)) {
continue;
+ }
/* Try to allocate. */
dss_prev = extent_dss_sbrk(incr);
if (dss_prev == max_cur) {
/* Success. */
- if (gap_size != 0)
+ if (gap_size != 0) {
extent_dalloc_gap(tsdn, arena, gap);
- else
+ } else {
extent_dalloc(tsdn, arena, gap);
- if (!*commit)
+ }
+ if (!*commit) {
*commit = pages_decommit(ret, size);
+ }
if (*zero && *commit) {
extent_hooks_t *extent_hooks =
EXTENT_HOOKS_INITIALIZER;
@@ -177,8 +183,9 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size, 0, true, false, true, false);
if (extent_purge_forced_wrapper(tsdn,
arena, &extent_hooks, &extent, 0,
- size))
+ size)) {
memset(ret, 0, size);
+ }
}
return (ret);
}
@@ -204,30 +211,28 @@ label_oom:
}
static bool
-extent_in_dss_helper(void *addr, void *max)
-{
+extent_in_dss_helper(void *addr, void *max) {
return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr <
(uintptr_t)max);
}
bool
-extent_in_dss(void *addr)
-{
+extent_in_dss(void *addr) {
cassert(have_dss);
return (extent_in_dss_helper(addr, atomic_read_p(&dss_max)));
}
bool
-extent_dss_mergeable(void *addr_a, void *addr_b)
-{
+extent_dss_mergeable(void *addr_a, void *addr_b) {
void *max;
cassert(have_dss);
if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b <
- (uintptr_t)dss_base)
+ (uintptr_t)dss_base) {
return (true);
+ }
max = atomic_read_p(&dss_max);
return (extent_in_dss_helper(addr_a, max) ==
@@ -235,8 +240,7 @@ extent_dss_mergeable(void *addr_a, void *addr_b)
}
void
-extent_dss_boot(void)
-{
+extent_dss_boot(void) {
cassert(have_dss);
dss_base = extent_dss_sbrk(0);
diff --git a/src/extent_mmap.c b/src/extent_mmap.c
index e685a45..2c00b58 100644
--- a/src/extent_mmap.c
+++ b/src/extent_mmap.c
@@ -4,21 +4,23 @@
/******************************************************************************/
static void *
-extent_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
-{
+extent_alloc_mmap_slow(size_t size, size_t alignment, bool *zero,
+ bool *commit) {
void *ret;
size_t alloc_size;
alloc_size = size + alignment - PAGE;
/* Beware size_t wrap-around. */
- if (alloc_size < size)
+ if (alloc_size < size) {
return (NULL);
+ }
do {
void *pages;
size_t leadsize;
pages = pages_map(NULL, alloc_size, commit);
- if (pages == NULL)
+ if (pages == NULL) {
return (NULL);
+ }
leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
(uintptr_t)pages;
ret = pages_trim(pages, alloc_size, leadsize, size, commit);
@@ -31,8 +33,7 @@ extent_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
void *
extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
- bool *commit)
-{
+ bool *commit) {
void *ret;
size_t offset;
@@ -52,8 +53,9 @@ extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
assert(alignment != 0);
ret = pages_map(new_addr, size, commit);
- if (ret == NULL || ret == new_addr)
+ if (ret == NULL || ret == new_addr) {
return (ret);
+ }
assert(new_addr == NULL);
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
if (offset != 0) {
@@ -67,9 +69,9 @@ extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
}
bool
-extent_dalloc_mmap(void *addr, size_t size)
-{
- if (config_munmap)
+extent_dalloc_mmap(void *addr, size_t size) {
+ if (config_munmap) {
pages_unmap(addr, size);
+ }
return (!config_munmap);
}
diff --git a/src/jemalloc.c b/src/jemalloc.c
index af2a53a..2de42c3 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -209,8 +209,7 @@ static bool init_lock_initialized = false;
JEMALLOC_ATTR(constructor)
static void WINAPI
-_init_init_lock(void)
-{
+_init_init_lock(void) {
/*
* If another constructor in the same binary is using mallctl to e.g.
* set up extent hooks, it may end up running before this one, and
@@ -221,8 +220,9 @@ _init_init_lock(void)
* the process creation, before any separate thread normally starts
* doing anything.
*/
- if (!init_lock_initialized)
+ if (!init_lock_initialized) {
malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT);
+ }
init_lock_initialized = true;
}
@@ -273,24 +273,23 @@ static bool malloc_init_hard(void);
*/
JEMALLOC_ALWAYS_INLINE_C bool
-malloc_initialized(void)
-{
+malloc_initialized(void) {
return (malloc_init_state == malloc_init_initialized);
}
JEMALLOC_ALWAYS_INLINE_C bool
-malloc_init_a0(void)
-{
- if (unlikely(malloc_init_state == malloc_init_uninitialized))
+malloc_init_a0(void) {
+ if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
return (malloc_init_hard_a0());
+ }
return (false);
}
JEMALLOC_ALWAYS_INLINE_C bool
-malloc_init(void)
-{
- if (unlikely(!malloc_initialized()) && malloc_init_hard())
+malloc_init(void) {
+ if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
return (true);
+ }
return (false);
}
@@ -300,30 +299,27 @@ malloc_init(void)
*/
static void *
-a0ialloc(size_t size, bool zero, bool is_internal)
-{
- if (unlikely(malloc_init_a0()))
+a0ialloc(size_t size, bool zero, bool is_internal) {
+ if (unlikely(malloc_init_a0())) {
return (NULL);
+ }
return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL,
is_internal, arena_get(TSDN_NULL, 0, true), true));
}
static void
-a0idalloc(extent_t *extent, void *ptr, bool is_internal)
-{
+a0idalloc(extent_t *extent, void *ptr, bool is_internal) {
idalloctm(TSDN_NULL, extent, ptr, false, is_internal, true);
}
void *
-a0malloc(size_t size)
-{
+a0malloc(size_t size) {
return (a0ialloc(size, false, true));
}
void
-a0dalloc(void *ptr)
-{
+a0dalloc(void *ptr) {
a0idalloc(iealloc(NULL, ptr), ptr, true);
}
@@ -334,17 +330,16 @@ a0dalloc(void *ptr)
*/
void *
-bootstrap_malloc(size_t size)
-{
- if (unlikely(size == 0))
+bootstrap_malloc(size_t size) {
+ if (unlikely(size == 0)) {
size = 1;
+ }
return (a0ialloc(size, false, false));
}
void *
-bootstrap_calloc(size_t num, size_t size)
-{
+bootstrap_calloc(size_t num, size_t size) {
size_t num_size;
num_size = num * size;
@@ -357,49 +352,46 @@ bootstrap_calloc(size_t num, size_t size)
}
void
-bootstrap_free(void *ptr)
-{
- if (unlikely(ptr == NULL))
+bootstrap_free(void *ptr) {
+ if (unlikely(ptr == NULL)) {
return;
+ }
a0idalloc(iealloc(NULL, ptr), ptr, false);
}
void
-arena_set(unsigned ind, arena_t *arena)
-{
+arena_set(unsigned ind, arena_t *arena) {
atomic_write_p((void **)&arenas[ind], arena);
}
static void
-narenas_total_set(unsigned narenas)
-{
+narenas_total_set(unsigned narenas) {
atomic_write_u(&narenas_total, narenas);
}
static void
-narenas_total_inc(void)
-{
+narenas_total_inc(void) {
atomic_add_u(&narenas_total, 1);
}
unsigned
-narenas_total_get(void)
-{
+narenas_total_get(void) {
return (atomic_read_u(&narenas_total));
}
/* Create a new arena and insert it into the arenas array at index ind. */
static arena_t *
-arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks)
-{
+arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
arena_t *arena;
assert(ind <= narenas_total_get());
- if (ind > MALLOCX_ARENA_MAX)
+ if (ind > MALLOCX_ARENA_MAX) {
return (NULL);
- if (ind == narenas_total_get())
+ }
+ if (ind == narenas_total_get()) {
narenas_total_inc();
+ }
/*
* Another thread may have already initialized arenas[ind] if it's an
@@ -418,8 +410,7 @@ arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks)
}
arena_t *
-arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks)
-{
+arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
arena_t *arena;
malloc_mutex_lock(tsdn, &arenas_lock);
@@ -429,25 +420,25 @@ arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks)
}
static void
-arena_bind(tsd_t *tsd, unsigned ind, bool internal)
-{
+arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
arena_t *arena;
- if (!tsd_nominal(tsd))
+ if (!tsd_nominal(tsd)) {
return;
+ }
arena = arena_get(tsd_tsdn(tsd), ind, false);
arena_nthreads_inc(arena, internal);
- if (internal)
+ if (internal) {
tsd_iarena_set(tsd, arena);
- else
+ } else {
tsd_arena_set(tsd, arena);
+ }
}
void
-arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
-{
+arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) {
arena_t *oldarena, *newarena;
oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
@@ -458,21 +449,20 @@ arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
}
static void
-arena_unbind(tsd_t *tsd, unsigned ind, bool internal)
-{
+arena_unbind(tsd_t *tsd, unsigned ind, bool internal) {
arena_t *arena;
arena = arena_get(tsd_tsdn(tsd), ind, false);
arena_nthreads_dec(arena, internal);
- if (internal)
+ if (internal) {
tsd_iarena_set(tsd, NULL);
- else
+ } else {
tsd_arena_set(tsd, NULL);
+ }
}
arena_tdata_t *
-arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
-{
+arena_tdata_get_hard(tsd_t *tsd, unsigned ind) {
arena_tdata_t *tdata, *arenas_tdata_old;
arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
unsigned narenas_tdata_old, i;
@@ -541,15 +531,15 @@ arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
/* Read the refreshed tdata array. */
tdata = &arenas_tdata[ind];
label_return:
- if (arenas_tdata_old != NULL)
+ if (arenas_tdata_old != NULL) {
a0dalloc(arenas_tdata_old);
+ }
return (tdata);
}
/* Slow path, called only by arena_choose(). */
arena_t *
-arena_choose_hard(tsd_t *tsd, bool internal)
-{
+arena_choose_hard(tsd_t *tsd, bool internal) {
arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
if (narenas_auto > 1) {
@@ -563,8 +553,9 @@ arena_choose_hard(tsd_t *tsd, bool internal)
* choose[1]: For internal metadata allocation.
*/
- for (j = 0; j < 2; j++)
+ for (j = 0; j < 2; j++) {
choose[j] = 0;
+ }
first_null = narenas_auto;
malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
@@ -580,8 +571,9 @@ arena_choose_hard(tsd_t *tsd, bool internal)
tsd_tsdn(tsd), i, false), !!j) <
arena_nthreads_get(arena_get(
tsd_tsdn(tsd), choose[j], false),
- !!j))
+ !!j)) {
choose[j] = i;
+ }
}
} else if (first_null == narenas_auto) {
/*
@@ -622,8 +614,9 @@ arena_choose_hard(tsd_t *tsd, bool internal)
&arenas_lock);
return (NULL);
}
- if (!!j == internal)
+ if (!!j == internal) {
ret = arena;
+ }
}
arena_bind(tsd, choose[j], !!j);
}
@@ -638,28 +631,27 @@ arena_choose_hard(tsd_t *tsd, bool internal)
}
void
-iarena_cleanup(tsd_t *tsd)
-{
+iarena_cleanup(tsd_t *tsd) {
arena_t *iarena;
iarena = tsd_iarena_get(tsd);
- if (iarena != NULL)
+ if (iarena != NULL) {
arena_unbind(tsd, arena_ind_get(iarena), true);
+ }
}
void
-arena_cleanup(tsd_t *tsd)
-{
+arena_cleanup(tsd_t *tsd) {
arena_t *arena;
arena = tsd_arena_get(tsd);
- if (arena != NULL)
+ if (arena != NULL) {
arena_unbind(tsd, arena_ind_get(arena), false);
+ }
}
void
-arenas_tdata_cleanup(tsd_t *tsd)
-{
+arenas_tdata_cleanup(tsd_t *tsd) {
arena_tdata_t *arenas_tdata;
/* Prevent tsd->arenas_tdata from being (re)created. */
@@ -673,8 +665,7 @@ arenas_tdata_cleanup(tsd_t *tsd)
}
static void
-stats_print_atexit(void)
-{
+stats_print_atexit(void) {
if (config_tcache && config_stats) {
tsdn_t *tsdn;
unsigned narenas, i;
@@ -720,19 +711,18 @@ stats_print_atexit(void)
#ifndef JEMALLOC_HAVE_SECURE_GETENV
static char *
-secure_getenv(const char *name)
-{
+secure_getenv(const char *name) {
# ifdef JEMALLOC_HAVE_ISSETUGID
- if (issetugid() != 0)
+ if (issetugid() != 0) {
return (NULL);
+ }
# endif
return (getenv(name));
}
#endif
static unsigned
-malloc_ncpus(void)
-{
+malloc_ncpus(void) {
long result;
#ifdef _WIN32
@@ -761,8 +751,7 @@ malloc_ncpus(void)
static bool
malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
- char const **v_p, size_t *vlen_p)
-{
+ char const **v_p, size_t *vlen_p) {
bool accept;
const char *opts = *opts_p;
@@ -837,15 +826,13 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
static void
malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
- size_t vlen)
-{
+ size_t vlen) {
malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
(int)vlen, v);
}
static void
-malloc_slow_flag_init(void)
-{
+malloc_slow_flag_init(void) {
/*
* Combine the runtime options into malloc_slow for fast path. Called
* after processing all the options.
@@ -860,8 +847,7 @@ malloc_slow_flag_init(void)
}
static void
-malloc_conf_init(void)
-{
+malloc_conf_init(void) {
unsigned i;
char buf[PATH_MAX + 1];
const char *opts, *k, *v;
@@ -948,17 +934,18 @@ malloc_conf_init(void)
(sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
#define CONF_HANDLE_BOOL(o, n, cont) \
if (CONF_MATCH(n)) { \
- if (CONF_MATCH_VALUE("true")) \
+ if (CONF_MATCH_VALUE("true")) { \
o = true; \
- else if (CONF_MATCH_VALUE("false")) \
+ } else if (CONF_MATCH_VALUE("false")) { \
o = false; \
- else { \
+ } else { \
malloc_conf_error( \
"Invalid conf value", \
k, klen, v, vlen); \
} \
- if (cont) \
+ if (cont) { \
continue; \
+ } \
}
#define CONF_MIN_no(um, min) false
#define CONF_MIN_yes(um, min) ((um) < (min))
@@ -978,13 +965,15 @@ malloc_conf_init(void)
k, klen, v, vlen); \
} else if (clip) { \
if (CONF_MIN_##check_min(um, \
- (min))) \
+ (min))) { \
o = (t)(min); \
- else if (CONF_MAX_##check_max( \
- um, (max))) \
+ } else if ( \
+ CONF_MAX_##check_max(um, \
+ (max))) { \
o = (t)(max); \
- else \
+ } else { \
o = (t)um; \
+ } \
} else { \
if (CONF_MIN_##check_min(um, \
(min)) || \
@@ -994,8 +983,9 @@ malloc_conf_init(void)
"Out-of-range " \
"conf value", \
k, klen, v, vlen); \
- } else \
+ } else { \
o = (t)um; \
+ } \
} \
continue; \
}
@@ -1023,8 +1013,9 @@ malloc_conf_init(void)
malloc_conf_error( \
"Out-of-range conf value", \
k, klen, v, vlen); \
- } else \
+ } else { \
o = l; \
+ } \
continue; \
}
#define CONF_HANDLE_CHAR_P(o, n, d) \
@@ -1148,8 +1139,7 @@ malloc_conf_init(void)
}
static bool
-malloc_init_hard_needed(void)
-{
+malloc_init_hard_needed(void) {
if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
malloc_init_recursible)) {
/*
@@ -1177,35 +1167,42 @@ malloc_init_hard_needed(void)
}
static bool
-malloc_init_hard_a0_locked()
-{
+malloc_init_hard_a0_locked() {
malloc_initializer = INITIALIZER;
- if (config_prof)
+ if (config_prof) {
prof_boot0();
+ }
malloc_conf_init();
if (opt_stats_print) {
/* Print statistics at exit. */
if (atexit(stats_print_atexit) != 0) {
malloc_write("<jemalloc>: Error in atexit()\n");
- if (opt_abort)
+ if (opt_abort) {
abort();
+ }
}
}
pages_boot();
- if (base_boot(TSDN_NULL))
+ if (base_boot(TSDN_NULL)) {
return (true);
- if (extent_boot())
+ }
+ if (extent_boot()) {
return (true);
- if (ctl_boot())
+ }
+ if (ctl_boot()) {
return (true);
- if (config_prof)
+ }
+ if (config_prof) {
prof_boot1();
+ }
arena_boot();
- if (config_tcache && tcache_boot(TSDN_NULL))
+ if (config_tcache && tcache_boot(TSDN_NULL)) {
return (true);
- if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS))
+ }
+ if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS)) {
return (true);
+ }
/*
* Create enough scaffolding to allow recursive allocation in
* malloc_ncpus().
@@ -1218,9 +1215,10 @@ malloc_init_hard_a0_locked()
* Initialize one arena here. The rest are lazily created in
* arena_choose_hard().
*/
- if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default) ==
- NULL)
+ if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default)
+ == NULL) {
return (true);
+ }
malloc_init_state = malloc_init_a0_initialized;
@@ -1228,8 +1226,7 @@ malloc_init_hard_a0_locked()
}
static bool
-malloc_init_hard_a0(void)
-{
+malloc_init_hard_a0(void) {
bool ret;
malloc_mutex_lock(TSDN_NULL, &init_lock);
@@ -1240,8 +1237,7 @@ malloc_init_hard_a0(void)
/* Initialize data structures which may trigger recursive allocation. */
static bool
-malloc_init_hard_recursible(void)
-{
+malloc_init_hard_recursible(void) {
malloc_init_state = malloc_init_recursible;
ncpus = malloc_ncpus();
@@ -1253,8 +1249,9 @@ malloc_init_hard_recursible(void)
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
jemalloc_postfork_child) != 0) {
malloc_write("<jemalloc>: Error in pthread_atfork()\n");
- if (opt_abort)
+ if (opt_abort) {
abort();
+ }
return (true);
}
#endif
@@ -1263,20 +1260,21 @@ malloc_init_hard_recursible(void)
}
static bool
-malloc_init_hard_finish(tsdn_t *tsdn)
-{
- if (malloc_mutex_boot())
+malloc_init_hard_finish(tsdn_t *tsdn) {
+ if (malloc_mutex_boot()) {
return (true);
+ }
if (opt_narenas == 0) {
/*
* For SMP systems, create more than one arena per CPU by
* default.
*/
- if (ncpus > 1)
+ if (ncpus > 1) {
opt_narenas = ncpus << 2;
- else
+ } else {
opt_narenas = 1;
+ }
}
narenas_auto = opt_narenas;
/*
@@ -1292,8 +1290,9 @@ malloc_init_hard_finish(tsdn_t *tsdn)
/* Allocate and initialize arenas. */
arenas = (arena_t **)base_alloc(tsdn, a0->base, sizeof(arena_t *) *
(MALLOCX_ARENA_MAX+1), CACHELINE);
- if (arenas == NULL)
+ if (arenas == NULL) {
return (true);
+ }
/* Copy the pointer to the one arena that was already initialized. */
arena_set(0, a0);
@@ -1304,8 +1303,7 @@ malloc_init_hard_finish(tsdn_t *tsdn)
}
static bool
-malloc_init_hard(void)
-{
+malloc_init_hard(void) {
tsd_t *tsd;
#if defined(_WIN32) && _WIN32_WINNT < 0x0600
@@ -1326,10 +1324,12 @@ malloc_init_hard(void)
malloc_mutex_unlock(TSDN_NULL, &init_lock);
/* Recursive allocation relies on functional tsd. */
tsd = malloc_tsd_boot0();
- if (tsd == NULL)
+ if (tsd == NULL) {
return (true);
- if (malloc_init_hard_recursible())
+ }
+ if (malloc_init_hard_recursible()) {
return (true);
+ }
malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
if (config_prof && prof_boot2(tsd)) {
@@ -1616,7 +1616,6 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts) {
tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
-
/* If profiling is on, get our profiling context. */
if (config_prof && opt_prof) {
/*
@@ -1755,8 +1754,7 @@ imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
-je_malloc(size_t size)
-{
+je_malloc(size_t size) {
void *ret;
static_opts_t sopts;
dynamic_opts_t dopts;
@@ -1780,8 +1778,7 @@ je_malloc(size_t size)
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
JEMALLOC_ATTR(nonnull(1))
-je_posix_memalign(void **memptr, size_t alignment, size_t size)
-{
+je_posix_memalign(void **memptr, size_t alignment, size_t size) {
int ret;
static_opts_t sopts;
dynamic_opts_t dopts;
@@ -1808,8 +1805,7 @@ je_posix_memalign(void **memptr, size_t alignment, size_t size)
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
-je_aligned_alloc(size_t alignment, size_t size)
-{
+je_aligned_alloc(size_t alignment, size_t size) {
void *ret;
static_opts_t sopts;
@@ -1839,8 +1835,7 @@ je_aligned_alloc(size_t alignment, size_t size)
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
-je_calloc(size_t num, size_t size)
-{
+je_calloc(size_t num, size_t size) {
void *ret;
static_opts_t sopts;
dynamic_opts_t dopts;
@@ -1865,29 +1860,30 @@ je_calloc(size_t num, size_t size)
static void *
irealloc_prof_sample(tsd_t *tsd, extent_t *extent, void *old_ptr,
- size_t old_usize, size_t usize, prof_tctx_t *tctx)
-{
+ size_t old_usize, size_t usize, prof_tctx_t *tctx) {
void *p;
- if (tctx == NULL)
+ if (tctx == NULL) {
return (NULL);
+ }
if (usize <= SMALL_MAXCLASS) {
p = iralloc(tsd, extent, old_ptr, old_usize, LARGE_MINCLASS, 0,
false);
- if (p == NULL)
+ if (p == NULL) {
return (NULL);
+ }
arena_prof_promote(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
usize);
- } else
+ } else {
p = iralloc(tsd, extent, old_ptr, old_usize, usize, 0, false);
+ }
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
irealloc_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize,
- size_t usize)
-{
+ size_t usize) {
void *p;
extent_t *extent;
bool prof_active;
@@ -1915,8 +1911,7 @@ irealloc_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize,
}
JEMALLOC_INLINE_C void
-ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
-{
+ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
extent_t *extent;
size_t usize;
@@ -1929,42 +1924,46 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
if (config_prof && opt_prof) {
usize = isalloc(tsd_tsdn(tsd), extent, ptr);
prof_free(tsd, extent, ptr, usize);
- } else if (config_stats)
+ } else if (config_stats) {
usize = isalloc(tsd_tsdn(tsd), extent, ptr);
- if (config_stats)
+ }
+ if (config_stats) {
*tsd_thread_deallocatedp_get(tsd) += usize;
+ }
- if (likely(!slow_path))
+ if (likely(!slow_path)) {
idalloctm(tsd_tsdn(tsd), extent, ptr, tcache, false, false);
- else
+ } else {
idalloctm(tsd_tsdn(tsd), extent, ptr, tcache, false, true);
+ }
}
JEMALLOC_INLINE_C void
isfree(tsd_t *tsd, extent_t *extent, void *ptr, size_t usize, tcache_t *tcache,
- bool slow_path)
-{
+ bool slow_path) {
witness_assert_lockless(tsd_tsdn(tsd));
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
- if (config_prof && opt_prof)
+ if (config_prof && opt_prof) {
prof_free(tsd, extent, ptr, usize);
- if (config_stats)
+ }
+ if (config_stats) {
*tsd_thread_deallocatedp_get(tsd) += usize;
+ }
- if (likely(!slow_path))
+ if (likely(!slow_path)) {
isdalloct(tsd_tsdn(tsd), extent, ptr, usize, tcache, false);
- else
+ } else {
isdalloct(tsd_tsdn(tsd), extent, ptr, usize, tcache, true);
+ }
}
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ALLOC_SIZE(2)
-je_realloc(void *ptr, size_t size)
-{
+je_realloc(void *ptr, size_t size) {
void *ret;
tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
@@ -2000,8 +1999,9 @@ je_realloc(void *ptr, size_t size)
NULL : irealloc_prof(tsd, extent, ptr, old_usize,
usize);
} else {
- if (config_stats)
+ if (config_stats) {
usize = s2u(size);
+ }
ret = iralloc(tsd, extent, ptr, old_usize, size, 0,
false);
}
@@ -2033,16 +2033,16 @@ je_realloc(void *ptr, size_t size)
}
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
-je_free(void *ptr)
-{
+je_free(void *ptr) {
UTRACE(ptr, 0, 0);
if (likely(ptr != NULL)) {
tsd_t *tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
- if (likely(!malloc_slow))
+ if (likely(!malloc_slow)) {
ifree(tsd, ptr, tcache_get(tsd, false), false);
- else
+ } else {
ifree(tsd, ptr, tcache_get(tsd, false), true);
+ }
witness_assert_lockless(tsd_tsdn(tsd));
}
}
@@ -2059,8 +2059,7 @@ je_free(void *ptr)
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)
-je_memalign(size_t alignment, size_t size)
-{
+je_memalign(size_t alignment, size_t size) {
void *ret;
static_opts_t sopts;
dynamic_opts_t dopts;
@@ -2090,8 +2089,7 @@ je_memalign(size_t alignment, size_t size)
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)
-je_valloc(size_t size)
-{
+je_valloc(size_t size) {
void *ret;
static_opts_t sopts;
@@ -2180,8 +2178,7 @@ int __posix_memalign(void** r, size_t a, size_t s)
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
-je_mallocx(size_t size, int flags)
-{
+je_mallocx(size_t size, int flags) {
void *ret;
static_opts_t sopts;
dynamic_opts_t dopts;
@@ -2225,17 +2222,18 @@ je_mallocx(size_t size, int flags)
static void *
irallocx_prof_sample(tsdn_t *tsdn, extent_t *extent, void *old_ptr,
size_t old_usize, size_t usize, size_t alignment, bool zero,
- tcache_t *tcache, arena_t *arena, prof_tctx_t *tctx)
-{
+ tcache_t *tcache, arena_t *arena, prof_tctx_t *tctx) {
void *p;
- if (tctx == NULL)
+ if (tctx == NULL) {
return (NULL);
+ }
if (usize <= SMALL_MAXCLASS) {
p = iralloct(tsdn, extent, old_ptr, old_usize, LARGE_MINCLASS,
alignment, zero, tcache, arena);
- if (p == NULL)
+ if (p == NULL) {
return (NULL);
+ }
arena_prof_promote(tsdn, iealloc(tsdn, p), p, usize);
} else {
p = iralloct(tsdn, extent, old_ptr, old_usize, usize, alignment,
@@ -2248,8 +2246,7 @@ irallocx_prof_sample(tsdn_t *tsdn, extent_t *extent, void *old_ptr,
JEMALLOC_ALWAYS_INLINE_C void *
irallocx_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize,
size_t size, size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
- arena_t *arena)
-{
+ arena_t *arena) {
void *p;
extent_t *extent;
bool prof_active;
@@ -2281,8 +2278,9 @@ irallocx_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize,
*/
extent = old_extent;
*usize = isalloc(tsd_tsdn(tsd), extent, p);
- } else
+ } else {
extent = iealloc(tsd_tsdn(tsd), p);
+ }
prof_realloc(tsd, extent, p, *usize, tctx, prof_active, false,
old_extent, old_ptr, old_usize, old_tctx);
@@ -2292,8 +2290,7 @@ irallocx_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize,
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ALLOC_SIZE(2)
-je_rallocx(void *ptr, size_t size, int flags)
-{
+je_rallocx(void *ptr, size_t size, int flags) {
void *p;
tsd_t *tsd;
extent_t *extent;
@@ -2314,34 +2311,41 @@ je_rallocx(void *ptr, size_t size, int flags)
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
- if (unlikely(arena == NULL))
+ if (unlikely(arena == NULL)) {
goto label_oom;
- } else
+ }
+ } else {
arena = NULL;
+ }
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
- if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
tcache = NULL;
- else
+ } else {
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
- } else
+ }
+ } else {
tcache = tcache_get(tsd, true);
+ }
old_usize = isalloc(tsd_tsdn(tsd), extent, ptr);
if (config_prof && opt_prof) {
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
- if (unlikely(usize == 0 || usize > LARGE_MAXCLASS))
+ if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
goto label_oom;
+ }
p = irallocx_prof(tsd, extent, ptr, old_usize, size, alignment,
&usize, zero, tcache, arena);
- if (unlikely(p == NULL))
+ if (unlikely(p == NULL)) {
goto label_oom;
+ }
} else {
p = iralloct(tsd_tsdn(tsd), extent, ptr, old_usize, size,
alignment, zero, tcache, arena);
- if (unlikely(p == NULL))
+ if (unlikely(p == NULL)) {
goto label_oom;
+ }
if (config_stats) {
usize = isalloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd),
p), p);
@@ -2368,12 +2372,13 @@ label_oom:
JEMALLOC_ALWAYS_INLINE_C size_t
ixallocx_helper(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t old_usize,
- size_t size, size_t extra, size_t alignment, bool zero)
-{
+ size_t size, size_t extra, size_t alignment, bool zero) {
size_t usize;
- if (ixalloc(tsdn, extent, ptr, old_usize, size, extra, alignment, zero))
+ if (ixalloc(tsdn, extent, ptr, old_usize, size, extra, alignment,
+ zero)) {
return (old_usize);
+ }
usize = isalloc(tsdn, extent, ptr);
return (usize);
@@ -2382,12 +2387,12 @@ ixallocx_helper(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t old_usize,
static size_t
ixallocx_prof_sample(tsdn_t *tsdn, extent_t *extent, void *ptr,
size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero,
- prof_tctx_t *tctx)
-{
+ prof_tctx_t *tctx) {
size_t usize;
- if (tctx == NULL)
+ if (tctx == NULL) {
return (old_usize);
+ }
usize = ixallocx_helper(tsdn, extent, ptr, old_usize, size, extra,
alignment, zero);
@@ -2396,8 +2401,7 @@ ixallocx_prof_sample(tsdn_t *tsdn, extent_t *extent, void *ptr,
JEMALLOC_ALWAYS_INLINE_C size_t
ixallocx_prof(tsd_t *tsd, extent_t *extent, void *ptr, size_t old_usize,
- size_t size, size_t extra, size_t alignment, bool zero)
-{
+ size_t size, size_t extra, size_t alignment, bool zero) {
size_t usize_max, usize;
bool prof_active;
prof_tctx_t *old_tctx, *tctx;
@@ -2445,8 +2449,7 @@ ixallocx_prof(tsd_t *tsd, extent_t *extent, void *ptr, size_t old_usize,
}
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
-je_xallocx(void *ptr, size_t size, size_t extra, int flags)
-{
+je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
tsd_t *tsd;
extent_t *extent;
size_t usize, old_usize;
@@ -2476,8 +2479,9 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
usize = old_usize;
goto label_not_resized;
}
- if (unlikely(LARGE_MAXCLASS - size < extra))
+ if (unlikely(LARGE_MAXCLASS - size < extra)) {
extra = LARGE_MAXCLASS - size;
+ }
if (config_prof && opt_prof) {
usize = ixallocx_prof(tsd, extent, ptr, old_usize, size, extra,
@@ -2486,8 +2490,9 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
usize = ixallocx_helper(tsd_tsdn(tsd), extent, ptr, old_usize,
size, extra, alignment, zero);
}
- if (unlikely(usize == old_usize))
+ if (unlikely(usize == old_usize)) {
goto label_not_resized;
+ }
if (config_stats) {
*tsd_thread_allocatedp_get(tsd) += usize;
@@ -2501,8 +2506,7 @@ label_not_resized:
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
JEMALLOC_ATTR(pure)
-je_sallocx(const void *ptr, int flags)
-{
+je_sallocx(const void *ptr, int flags) {
size_t usize;
tsdn_t *tsdn;
@@ -2511,18 +2515,18 @@ je_sallocx(const void *ptr, int flags)
tsdn = tsdn_fetch();
witness_assert_lockless(tsdn);
- if (config_ivsalloc)
+ if (config_ivsalloc) {
usize = ivsalloc(tsdn, ptr);
- else
+ } else {
usize = isalloc(tsdn, iealloc(tsdn, ptr), ptr);
+ }
witness_assert_lockless(tsdn);
return (usize);
}
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
-je_dallocx(void *ptr, int flags)
-{
+je_dallocx(void *ptr, int flags) {
tsd_t *tsd;
tcache_t *tcache;
@@ -2532,39 +2536,41 @@ je_dallocx(void *ptr, int flags)
tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
- if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
tcache = NULL;
- else
+ } else {
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
- } else
+ }
+ } else {
tcache = tcache_get(tsd, false);
+ }
UTRACE(ptr, 0, 0);
- if (likely(!malloc_slow))
+ if (likely(!malloc_slow)) {
ifree(tsd, ptr, tcache, false);
- else
+ } else {
ifree(tsd, ptr, tcache, true);
+ }
witness_assert_lockless(tsd_tsdn(tsd));
}
JEMALLOC_ALWAYS_INLINE_C size_t
-inallocx(tsdn_t *tsdn, size_t size, int flags)
-{
+inallocx(tsdn_t *tsdn, size_t size, int flags) {
size_t usize;
witness_assert_lockless(tsdn);
- if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
+ if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) {
usize = s2u(size);
- else
+ } else {
usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
+ }
witness_assert_lockless(tsdn);
return (usize);
}
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
-je_sdallocx(void *ptr, size_t size, int flags)
-{
+je_sdallocx(void *ptr, size_t size, int flags) {
tsd_t *tsd;
extent_t *extent;
size_t usize;
@@ -2579,39 +2585,43 @@ je_sdallocx(void *ptr, size_t size, int flags)
witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
- if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
tcache = NULL;
- else
+ } else {
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
- } else
+ }
+ } else {
tcache = tcache_get(tsd, false);
+ }
UTRACE(ptr, 0, 0);
- if (likely(!malloc_slow))
+ if (likely(!malloc_slow)) {
isfree(tsd, extent, ptr, usize, tcache, false);
- else
+ } else {
isfree(tsd, extent, ptr, usize, tcache, true);
+ }
witness_assert_lockless(tsd_tsdn(tsd));
}
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
JEMALLOC_ATTR(pure)
-je_nallocx(size_t size, int flags)
-{
+je_nallocx(size_t size, int flags) {
size_t usize;
tsdn_t *tsdn;
assert(size != 0);
- if (unlikely(malloc_init()))
+ if (unlikely(malloc_init())) {
return (0);
+ }
tsdn = tsdn_fetch();
witness_assert_lockless(tsdn);
usize = inallocx(tsdn, size, flags);
- if (unlikely(usize > LARGE_MAXCLASS))
+ if (unlikely(usize > LARGE_MAXCLASS)) {
return (0);
+ }
witness_assert_lockless(tsdn);
return (usize);
@@ -2619,13 +2629,13 @@ je_nallocx(size_t size, int flags)
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
- size_t newlen)
-{
+ size_t newlen) {
int ret;
tsd_t *tsd;
- if (unlikely(malloc_init()))
+ if (unlikely(malloc_init())) {
return (EAGAIN);
+ }
tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
@@ -2635,13 +2645,13 @@ je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
}
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
-je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
-{
+je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
int ret;
tsdn_t *tsdn;
- if (unlikely(malloc_init()))
+ if (unlikely(malloc_init())) {
return (EAGAIN);
+ }
tsdn = tsdn_fetch();
witness_assert_lockless(tsdn);
@@ -2652,13 +2662,13 @@ je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
+ void *newp, size_t newlen) {
int ret;
tsd_t *tsd;
- if (unlikely(malloc_init()))
+ if (unlikely(malloc_init())) {
return (EAGAIN);
+ }
tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
@@ -2669,8 +2679,7 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *opts)
-{
+ const char *opts) {
tsdn_t *tsdn;
tsdn = tsdn_fetch();
@@ -2680,8 +2689,7 @@ je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
}
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
-je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
-{
+je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
size_t ret;
tsdn_t *tsdn;
@@ -2690,9 +2698,9 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
tsdn = tsdn_fetch();
witness_assert_lockless(tsdn);
- if (config_ivsalloc)
+ if (config_ivsalloc) {
ret = ivsalloc(tsdn, ptr);
- else {
+ } else {
ret = (ptr == NULL) ? 0 : isalloc(tsdn, iealloc(tsdn, ptr),
ptr);
}
@@ -2726,8 +2734,7 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
#ifndef JEMALLOC_JET
JEMALLOC_ATTR(constructor)
static void
-jemalloc_constructor(void)
-{
+jemalloc_constructor(void) {
malloc_init();
}
#endif
@@ -2745,8 +2752,9 @@ _malloc_prefork(void)
arena_t *arena;
#ifdef JEMALLOC_MUTEX_INIT_CB
- if (!malloc_initialized())
+ if (!malloc_initialized()) {
return;
+ }
#endif
assert(malloc_initialized());
@@ -2779,8 +2787,9 @@ _malloc_prefork(void)
}
}
for (i = 0; i < narenas; i++) {
- if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
+ if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
arena_prefork3(tsd_tsdn(tsd), arena);
+ }
}
prof_prefork1(tsd_tsdn(tsd));
}
@@ -2797,8 +2806,9 @@ _malloc_postfork(void)
unsigned i, narenas;
#ifdef JEMALLOC_MUTEX_INIT_CB
- if (!malloc_initialized())
+ if (!malloc_initialized()) {
return;
+ }
#endif
assert(malloc_initialized());
@@ -2809,8 +2819,9 @@ _malloc_postfork(void)
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena;
- if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
+ if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
arena_postfork_parent(tsd_tsdn(tsd), arena);
+ }
}
prof_postfork_parent(tsd_tsdn(tsd));
malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
@@ -2818,8 +2829,7 @@ _malloc_postfork(void)
}
void
-jemalloc_postfork_child(void)
-{
+jemalloc_postfork_child(void) {
tsd_t *tsd;
unsigned i, narenas;
@@ -2832,8 +2842,9 @@ jemalloc_postfork_child(void)
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena;
- if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
+ if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
arena_postfork_child(tsd_tsdn(tsd), arena);
+ }
}
prof_postfork_child(tsd_tsdn(tsd));
malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
diff --git a/src/jemalloc_cpp.cpp b/src/jemalloc_cpp.cpp
index 984c944..030ff99 100644
--- a/src/jemalloc_cpp.cpp
+++ b/src/jemalloc_cpp.cpp
@@ -33,8 +33,7 @@ void operator delete[](void *ptr, std::size_t size) noexcept;
template <bool IsNoExcept>
JEMALLOC_INLINE
void *
-newImpl(std::size_t size) noexcept(IsNoExcept)
-{
+newImpl(std::size_t size) noexcept(IsNoExcept) {
void *ptr = je_malloc(size);
if (likely(ptr != nullptr))
return (ptr);
@@ -67,65 +66,55 @@ newImpl(std::size_t size) noexcept(IsNoExcept)
}
void *
-operator new(std::size_t size)
-{
+operator new(std::size_t size) {
return (newImpl<false>(size));
}
void *
-operator new[](std::size_t size)
-{
+operator new[](std::size_t size) {
return (newImpl<false>(size));
}
void *
-operator new(std::size_t size, const std::nothrow_t &) noexcept
-{
+operator new(std::size_t size, const std::nothrow_t &) noexcept {
return (newImpl<true>(size));
}
void *
-operator new[](std::size_t size, const std::nothrow_t &) noexcept
-{
+operator new[](std::size_t size, const std::nothrow_t &) noexcept {
return (newImpl<true>(size));
}
void
-operator delete(void *ptr) noexcept
-{
+operator delete(void *ptr) noexcept {
je_free(ptr);
}
void
-operator delete[](void *ptr) noexcept
-{
+operator delete[](void *ptr) noexcept {
je_free(ptr);
}
void
-operator delete(void *ptr, const std::nothrow_t &) noexcept
-{
+operator delete(void *ptr, const std::nothrow_t &) noexcept {
je_free(ptr);
}
-void operator delete[](void *ptr, const std::nothrow_t &) noexcept
-{
+void operator delete[](void *ptr, const std::nothrow_t &) noexcept {
je_free(ptr);
}
#if __cpp_sized_deallocation >= 201309
void
-operator delete(void *ptr, std::size_t size) noexcept
-{
+operator delete(void *ptr, std::size_t size) noexcept {
if (unlikely(ptr == nullptr)) {
return;
}
je_sdallocx(ptr, size, /*flags=*/0);
}
-void operator delete[](void *ptr, std::size_t size) noexcept
-{
+void operator delete[](void *ptr, std::size_t size) noexcept {
if (unlikely(ptr == nullptr)) {
return;
}
diff --git a/src/large.c b/src/large.c
index 9936b23..0f2f176 100644
--- a/src/large.c
+++ b/src/large.c
@@ -4,8 +4,7 @@
/******************************************************************************/
void *
-large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero)
-{
+large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) {
assert(usize == s2u(usize));
return (large_palloc(tsdn, arena, usize, CACHELINE, zero));
@@ -13,8 +12,7 @@ large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero)
void *
large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
- bool zero)
-{
+ bool zero) {
size_t ausize;
extent_t *extent;
bool is_zeroed;
@@ -23,27 +21,31 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
assert(!tsdn_null(tsdn) || arena != NULL);
ausize = sa2u(usize, alignment);
- if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS))
+ if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS)) {
return (NULL);
+ }
/*
* Copy zero into is_zeroed and pass the copy to extent_alloc(), so that
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
- if (likely(!tsdn_null(tsdn)))
+ if (likely(!tsdn_null(tsdn))) {
arena = arena_choose(tsdn_tsd(tsdn), arena);
+ }
if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn,
- arena, usize, alignment, &is_zeroed)) == NULL)
+ arena, usize, alignment, &is_zeroed)) == NULL) {
return (NULL);
+ }
/* Insert extent into large. */
malloc_mutex_lock(tsdn, &arena->large_mtx);
ql_elm_new(extent, ql_link);
ql_tail_insert(&arena->large, extent, ql_link);
malloc_mutex_unlock(tsdn, &arena->large_mtx);
- if (config_prof && arena_prof_accum(tsdn, arena, usize))
+ if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
prof_idump(tsdn);
+ }
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed) {
@@ -64,8 +66,7 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
#define large_dalloc_junk JEMALLOC_N(n_large_dalloc_junk)
#endif
void
-large_dalloc_junk(void *ptr, size_t usize)
-{
+large_dalloc_junk(void *ptr, size_t usize) {
memset(ptr, JEMALLOC_FREE_JUNK, usize);
}
#ifdef JEMALLOC_JET
@@ -79,15 +80,15 @@ large_dalloc_junk_t *large_dalloc_junk = JEMALLOC_N(n_large_dalloc_junk);
#define large_dalloc_maybe_junk JEMALLOC_N(n_large_dalloc_maybe_junk)
#endif
void
-large_dalloc_maybe_junk(void *ptr, size_t usize)
-{
+large_dalloc_maybe_junk(void *ptr, size_t usize) {
if (config_fill && have_dss && unlikely(opt_junk_free)) {
/*
* Only bother junk filling if the extent isn't about to be
* unmapped.
*/
- if (!config_munmap || (have_dss && extent_in_dss(ptr)))
+ if (!config_munmap || (have_dss && extent_in_dss(ptr))) {
large_dalloc_junk(ptr, usize);
+ }
}
}
#ifdef JEMALLOC_JET
@@ -98,8 +99,7 @@ large_dalloc_maybe_junk_t *large_dalloc_maybe_junk =
#endif
static bool
-large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
-{
+large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
arena_t *arena = extent_arena_get(extent);
size_t oldusize = extent_usize_get(extent);
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
@@ -107,16 +107,18 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
assert(oldusize > usize);
- if (extent_hooks->split == NULL)
+ if (extent_hooks->split == NULL) {
return (true);
+ }
/* Split excess pages. */
if (diff != 0) {
extent_t *trail = extent_split_wrapper(tsdn, arena,
&extent_hooks, extent, usize + large_pad, usize, diff,
diff);
- if (trail == NULL)
+ if (trail == NULL) {
return (true);
+ }
if (config_fill && unlikely(opt_junk_free)) {
large_dalloc_maybe_junk(extent_addr_get(trail),
@@ -133,8 +135,7 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
static bool
large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
- bool zero)
-{
+ bool zero) {
arena_t *arena = extent_arena_get(extent);
size_t oldusize = extent_usize_get(extent);
bool is_zeroed_trail = false;
@@ -142,8 +143,9 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
size_t trailsize = usize - extent_usize_get(extent);
extent_t *trail;
- if (extent_hooks->merge == NULL)
+ if (extent_hooks->merge == NULL) {
return (true);
+ }
if ((trail = arena_extent_cache_alloc(tsdn, arena, &extent_hooks,
extent_past_get(extent), trailsize, CACHELINE, &is_zeroed_trail)) ==
@@ -151,8 +153,9 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
bool commit = true;
if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks,
extent_past_get(extent), trailsize, 0, CACHELINE,
- &is_zeroed_trail, &commit, false)) == NULL)
+ &is_zeroed_trail, &commit, false)) == NULL) {
return (true);
+ }
}
if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) {
@@ -193,8 +196,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
bool
large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
- size_t usize_max, bool zero)
-{
+ size_t usize_max, bool zero) {
assert(s2u(extent_usize_get(extent)) == extent_usize_get(extent));
/* The following should have been caught by callers. */
assert(usize_min > 0 && usize_max <= LARGE_MAXCLASS);
@@ -241,17 +243,16 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
static void *
large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, bool zero)
-{
- if (alignment <= CACHELINE)
+ size_t alignment, bool zero) {
+ if (alignment <= CACHELINE) {
return (large_malloc(tsdn, arena, usize, zero));
+ }
return (large_palloc(tsdn, arena, usize, alignment, zero));
}
void *
large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
- size_t alignment, bool zero, tcache_t *tcache)
-{
+ size_t alignment, bool zero, tcache_t *tcache) {
void *ret;
size_t copysize;
@@ -262,8 +263,9 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
LARGE_MINCLASS);
/* Try to avoid moving the allocation. */
- if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero))
+ if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) {
return (extent_addr_get(extent));
+ }
/*
* usize and old size are different enough that we need to use a
@@ -271,8 +273,9 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
* space and copying.
*/
ret = large_ralloc_move_helper(tsdn, arena, usize, alignment, zero);
- if (ret == NULL)
+ if (ret == NULL) {
return (NULL);
+ }
copysize = (usize < extent_usize_get(extent)) ? usize :
extent_usize_get(extent);
@@ -288,8 +291,7 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
* independent of these considerations.
*/
static void
-large_dalloc_impl(tsdn_t *tsdn, extent_t *extent, bool junked_locked)
-{
+large_dalloc_impl(tsdn_t *tsdn, extent_t *extent, bool junked_locked) {
arena_t *arena;
arena = extent_arena_get(extent);
@@ -302,42 +304,37 @@ large_dalloc_impl(tsdn_t *tsdn, extent_t *extent, bool junked_locked)
}
arena_extent_dalloc_large(tsdn, arena, extent, junked_locked);
- if (!junked_locked)
+ if (!junked_locked) {
arena_decay_tick(tsdn, arena);
+ }
}
void
-large_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent)
-{
+large_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent) {
large_dalloc_impl(tsdn, extent, true);
}
void
-large_dalloc(tsdn_t *tsdn, extent_t *extent)
-{
+large_dalloc(tsdn_t *tsdn, extent_t *extent) {
large_dalloc_impl(tsdn, extent, false);
}
size_t
-large_salloc(tsdn_t *tsdn, const extent_t *extent)
-{
+large_salloc(tsdn_t *tsdn, const extent_t *extent) {
return (extent_usize_get(extent));
}
prof_tctx_t *
-large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent)
-{
+large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) {
return (extent_prof_tctx_get(extent));
}
void
-large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx)
-{
+large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) {
extent_prof_tctx_set(extent, tctx);
}
void
-large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent)
-{
+large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) {
large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U);
}
diff --git a/src/mutex.c b/src/mutex.c
index bde536d..bc0869f 100644
--- a/src/mutex.c
+++ b/src/mutex.c
@@ -35,8 +35,7 @@ static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
void *(*)(void *), void *__restrict);
static void
-pthread_create_once(void)
-{
+pthread_create_once(void) {
pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
if (pthread_create_fptr == NULL) {
malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
@@ -50,8 +49,7 @@ pthread_create_once(void)
JEMALLOC_EXPORT int
pthread_create(pthread_t *__restrict thread,
const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
- void *__restrict arg)
-{
+ void *__restrict arg) {
static pthread_once_t once_control = PTHREAD_ONCE_INIT;
pthread_once(&once_control, pthread_create_once);
@@ -68,15 +66,16 @@ JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
#endif
bool
-malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
-{
+malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
+ witness_rank_t rank) {
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
InitializeSRWLock(&mutex->lock);
# else
if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
- _CRT_SPINCOUNT))
+ _CRT_SPINCOUNT)) {
return (true);
+ }
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
mutex->lock = OS_UNFAIR_LOCK_INIT;
@@ -88,14 +87,16 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
postponed_mutexes = mutex;
} else {
if (_pthread_mutex_init_calloc_cb(&mutex->lock,
- bootstrap_calloc) != 0)
+ bootstrap_calloc) != 0) {
return (true);
+ }
}
#else
pthread_mutexattr_t attr;
- if (pthread_mutexattr_init(&attr) != 0)
+ if (pthread_mutexattr_init(&attr) != 0) {
return (true);
+ }
pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
pthread_mutexattr_destroy(&attr);
@@ -103,26 +104,24 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
}
pthread_mutexattr_destroy(&attr);
#endif
- if (config_debug)
+ if (config_debug) {
witness_init(&mutex->witness, name, rank, NULL, NULL);
+ }
return (false);
}
void
-malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
+malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) {
malloc_mutex_lock(tsdn, mutex);
}
void
-malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
+malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) {
malloc_mutex_unlock(tsdn, mutex);
}
void
-malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
+malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) {
#ifdef JEMALLOC_MUTEX_INIT_CB
malloc_mutex_unlock(tsdn, mutex);
#else
@@ -130,21 +129,22 @@ malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex)
mutex->witness.rank)) {
malloc_printf("<jemalloc>: Error re-initializing mutex in "
"child\n");
- if (opt_abort)
+ if (opt_abort) {
abort();
+ }
}
#endif
}
bool
-malloc_mutex_boot(void)
-{
+malloc_mutex_boot(void) {
#ifdef JEMALLOC_MUTEX_INIT_CB
postpone_init = false;
while (postponed_mutexes != NULL) {
if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
- bootstrap_calloc) != 0)
+ bootstrap_calloc) != 0) {
return (true);
+ }
postponed_mutexes = postponed_mutexes->postponed_next;
}
#endif
diff --git a/src/nstime.c b/src/nstime.c
index 57ebf2e..66989a0 100644
--- a/src/nstime.c
+++ b/src/nstime.c
@@ -3,66 +3,56 @@
#define BILLION UINT64_C(1000000000)
void
-nstime_init(nstime_t *time, uint64_t ns)
-{
+nstime_init(nstime_t *time, uint64_t ns) {
time->ns = ns;
}
void
-nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec)
-{
+nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) {
time->ns = sec * BILLION + nsec;
}
uint64_t
-nstime_ns(const nstime_t *time)
-{
+nstime_ns(const nstime_t *time) {
return (time->ns);
}
uint64_t
-nstime_sec(const nstime_t *time)
-{
+nstime_sec(const nstime_t *time) {
return (time->ns / BILLION);
}
uint64_t
-nstime_nsec(const nstime_t *time)
-{
+nstime_nsec(const nstime_t *time) {
return (time->ns % BILLION);
}
void
-nstime_copy(nstime_t *time, const nstime_t *source)
-{
+nstime_copy(nstime_t *time, const nstime_t *source) {
*time = *source;
}
int
-nstime_compare(const nstime_t *a, const nstime_t *b)
-{
+nstime_compare(const nstime_t *a, const nstime_t *b) {
return ((a->ns > b->ns) - (a->ns < b->ns));
}
void
-nstime_add(nstime_t *time, const nstime_t *addend)
-{
+nstime_add(nstime_t *time, const nstime_t *addend) {
assert(UINT64_MAX - time->ns >= addend->ns);
time->ns += addend->ns;
}
void
-nstime_subtract(nstime_t *time, const nstime_t *subtrahend)
-{
+nstime_subtract(nstime_t *time, const nstime_t *subtrahend) {
assert(nstime_compare(time, subtrahend) >= 0);
time->ns -= subtrahend->ns;
}
void
-nstime_imultiply(nstime_t *time, uint64_t multiplier)
-{
+nstime_imultiply(nstime_t *time, uint64_t multiplier) {
assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) <<
2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns));
@@ -70,16 +60,14 @@ nstime_imultiply(nstime_t *time, uint64_t multiplier)
}
void
-nstime_idivide(nstime_t *time, uint64_t divisor)
-{
+nstime_idivide(nstime_t *time, uint64_t divisor) {
assert(divisor != 0);
time->ns /= divisor;
}
uint64_t
-nstime_divide(const nstime_t *time, const nstime_t *divisor)
-{
+nstime_divide(const nstime_t *time, const nstime_t *divisor) {
assert(divisor->ns != 0);
return (time->ns / divisor->ns);
@@ -88,8 +76,7 @@ nstime_divide(const nstime_t *time, const nstime_t *divisor)
#ifdef _WIN32
# define NSTIME_MONOTONIC true
static void
-nstime_get(nstime_t *time)
-{
+nstime_get(nstime_t *time) {
FILETIME ft;
uint64_t ticks_100ns;
@@ -101,8 +88,7 @@ nstime_get(nstime_t *time)
#elif JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
# define NSTIME_MONOTONIC true
static void
-nstime_get(nstime_t *time)
-{
+nstime_get(nstime_t *time) {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
@@ -111,8 +97,7 @@ nstime_get(nstime_t *time)
#elif JEMALLOC_HAVE_CLOCK_MONOTONIC
# define NSTIME_MONOTONIC true
static void
-nstime_get(nstime_t *time)
-{
+nstime_get(nstime_t *time) {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
@@ -121,15 +106,13 @@ nstime_get(nstime_t *time)
#elif JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
# define NSTIME_MONOTONIC true
static void
-nstime_get(nstime_t *time)
-{
+nstime_get(nstime_t *time) {
nstime_init(time, mach_absolute_time());
}
#else
# define NSTIME_MONOTONIC false
static void
-nstime_get(nstime_t *time)
-{
+nstime_get(nstime_t *time) {
struct timeval tv;
gettimeofday(&tv, NULL);
@@ -142,8 +125,7 @@ nstime_get(nstime_t *time)
#define nstime_monotonic JEMALLOC_N(n_nstime_monotonic)
#endif
bool
-nstime_monotonic(void)
-{
+nstime_monotonic(void) {
return (NSTIME_MONOTONIC);
#undef NSTIME_MONOTONIC
}
@@ -158,8 +140,7 @@ nstime_monotonic_t *nstime_monotonic = JEMALLOC_N(n_nstime_monotonic);
#define nstime_update JEMALLOC_N(n_nstime_update)
#endif
bool
-nstime_update(nstime_t *time)
-{
+nstime_update(nstime_t *time) {
nstime_t old_time;
nstime_copy(&old_time, time);
diff --git a/src/pages.c b/src/pages.c
index 7c26a28..c23dccd 100644
--- a/src/pages.c
+++ b/src/pages.c
@@ -18,14 +18,14 @@ static bool os_overcommits;
/******************************************************************************/
void *
-pages_map(void *addr, size_t size, bool *commit)
-{
+pages_map(void *addr, size_t size, bool *commit) {
void *ret;
assert(size != 0);
- if (os_overcommits)
+ if (os_overcommits) {
*commit = true;
+ }
#ifdef _WIN32
/*
@@ -46,9 +46,9 @@ pages_map(void *addr, size_t size, bool *commit)
}
assert(ret != NULL);
- if (ret == MAP_FAILED)
+ if (ret == MAP_FAILED) {
ret = NULL;
- else if (addr != NULL && ret != addr) {
+ } else if (addr != NULL && ret != addr) {
/*
* We succeeded in mapping memory, but not in the right place.
*/
@@ -62,8 +62,7 @@ pages_map(void *addr, size_t size, bool *commit)
}
void
-pages_unmap(void *addr, size_t size)
-{
+pages_unmap(void *addr, size_t size) {
#ifdef _WIN32
if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
#else
@@ -80,15 +79,15 @@ pages_unmap(void *addr, size_t size)
"munmap"
#endif
"(): %s\n", buf);
- if (opt_abort)
+ if (opt_abort) {
abort();
+ }
}
}
void *
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
- bool *commit)
-{
+ bool *commit) {
void *ret = (void *)((uintptr_t)addr + leadsize);
assert(alloc_size >= leadsize + size);
@@ -98,30 +97,34 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
pages_unmap(addr, alloc_size);
new_addr = pages_map(ret, size, commit);
- if (new_addr == ret)
+ if (new_addr == ret) {
return (ret);
- if (new_addr)
+ }
+ if (new_addr) {
pages_unmap(new_addr, size);
+ }
return (NULL);
}
#else
{
size_t trailsize = alloc_size - leadsize - size;
- if (leadsize != 0)
+ if (leadsize != 0) {
pages_unmap(addr, leadsize);
- if (trailsize != 0)
+ }
+ if (trailsize != 0) {
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
+ }
return (ret);
}
#endif
}
static bool
-pages_commit_impl(void *addr, size_t size, bool commit)
-{
- if (os_overcommits)
+pages_commit_impl(void *addr, size_t size, bool commit) {
+ if (os_overcommits) {
return (true);
+ }
#ifdef _WIN32
return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
@@ -131,8 +134,9 @@ pages_commit_impl(void *addr, size_t size, bool commit)
int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
-1, 0);
- if (result == MAP_FAILED)
+ if (result == MAP_FAILED) {
return (true);
+ }
if (result != addr) {
/*
* We succeeded in mapping memory, but not in the right
@@ -147,22 +151,20 @@ pages_commit_impl(void *addr, size_t size, bool commit)
}
bool
-pages_commit(void *addr, size_t size)
-{
+pages_commit(void *addr, size_t size) {
return (pages_commit_impl(addr, size, true));
}
bool
-pages_decommit(void *addr, size_t size)
-{
+pages_decommit(void *addr, size_t size) {
return (pages_commit_impl(addr, size, false));
}
bool
-pages_purge_lazy(void *addr, size_t size)
-{
- if (!pages_can_purge_lazy)
+pages_purge_lazy(void *addr, size_t size) {
+ if (!pages_can_purge_lazy) {
return (true);
+ }
#ifdef _WIN32
VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
@@ -175,10 +177,10 @@ pages_purge_lazy(void *addr, size_t size)
}
bool
-pages_purge_forced(void *addr, size_t size)
-{
- if (!pages_can_purge_forced)
+pages_purge_forced(void *addr, size_t size) {
+ if (!pages_can_purge_forced) {
return (true);
+ }
#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED)
return (madvise(addr, size, MADV_DONTNEED) != 0);
@@ -188,8 +190,7 @@ pages_purge_forced(void *addr, size_t size)
}
bool
-pages_huge(void *addr, size_t size)
-{
+pages_huge(void *addr, size_t size) {
assert(HUGEPAGE_ADDR2BASE(addr) == addr);
assert(HUGEPAGE_CEILING(size) == size);
@@ -201,8 +202,7 @@ pages_huge(void *addr, size_t size)
}
bool
-pages_nohuge(void *addr, size_t size)
-{
+pages_nohuge(void *addr, size_t size) {
assert(HUGEPAGE_ADDR2BASE(addr) == addr);
assert(HUGEPAGE_CEILING(size) == size);
@@ -215,14 +215,14 @@ pages_nohuge(void *addr, size_t size)
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
static bool
-os_overcommits_sysctl(void)
-{
+os_overcommits_sysctl(void) {
int vm_overcommit;
size_t sz;
sz = sizeof(vm_overcommit);
- if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0)
+ if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) {
return (false); /* Error. */
+ }
return ((vm_overcommit & 0x3) == 0);
}
@@ -235,8 +235,7 @@ os_overcommits_sysctl(void)
* wrappers.
*/
static bool
-os_overcommits_proc(void)
-{
+os_overcommits_proc(void) {
int fd;
char buf[1];
ssize_t nread;
@@ -246,8 +245,9 @@ os_overcommits_proc(void)
#else
fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
#endif
- if (fd == -1)
+ if (fd == -1) {
return (false); /* Error. */
+ }
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
@@ -261,8 +261,9 @@ os_overcommits_proc(void)
close(fd);
#endif
- if (nread < 1)
+ if (nread < 1) {
return (false); /* Error. */
+ }
/*
* /proc/sys/vm/overcommit_memory meanings:
* 0: Heuristic overcommit.
@@ -274,8 +275,7 @@ os_overcommits_proc(void)
#endif
void
-pages_boot(void)
-{
+pages_boot(void) {
#ifndef _WIN32
mmap_flags = MAP_PRIVATE | MAP_ANON;
#endif
@@ -285,8 +285,9 @@ pages_boot(void)
#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
os_overcommits = os_overcommits_proc();
# ifdef MAP_NORESERVE
- if (os_overcommits)
+ if (os_overcommits) {
mmap_flags |= MAP_NORESERVE;
+ }
# endif
#else
os_overcommits = false;
diff --git a/src/prof.c b/src/prof.c
index b161acf..ca01d8b 100644
--- a/src/prof.c
+++ b/src/prof.c
@@ -133,8 +133,7 @@ static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
/* Red-black trees. */
JEMALLOC_INLINE_C int
-prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b)
-{
+prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) {
uint64_t a_thr_uid = a->thr_uid;
uint64_t b_thr_uid = b->thr_uid;
int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
@@ -157,14 +156,14 @@ rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
tctx_link, prof_tctx_comp)
JEMALLOC_INLINE_C int
-prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b)
-{
+prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
unsigned a_len = a->bt.len;
unsigned b_len = b->bt.len;
unsigned comp_len = (a_len < b_len) ? a_len : b_len;
int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
- if (ret == 0)
+ if (ret == 0) {
ret = (a_len > b_len) - (a_len < b_len);
+ }
return (ret);
}
@@ -172,8 +171,7 @@ rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
prof_gctx_comp)
JEMALLOC_INLINE_C int
-prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b)
-{
+prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
int ret;
uint64_t a_uid = a->thr_uid;
uint64_t b_uid = b->thr_uid;
@@ -194,8 +192,7 @@ rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
/******************************************************************************/
void
-prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
-{
+prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) {
prof_tdata_t *tdata;
cassert(config_prof);
@@ -208,24 +205,25 @@ prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
* programs.
*/
tdata = prof_tdata_get(tsd, true);
- if (tdata != NULL)
+ if (tdata != NULL) {
prof_sample_threshold_update(tdata);
+ }
}
if ((uintptr_t)tctx > (uintptr_t)1U) {
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
tctx->prepared = false;
- if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
+ if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
prof_tctx_destroy(tsd, tctx);
- else
+ } else {
malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
+ }
}
}
void
prof_malloc_sample_object(tsdn_t *tsdn, extent_t *extent, const void *ptr,
- size_t usize, prof_tctx_t *tctx)
-{
+ size_t usize, prof_tctx_t *tctx) {
prof_tctx_set(tsdn, extent, ptr, usize, tctx);
malloc_mutex_lock(tsdn, tctx->tdata->lock);
@@ -240,23 +238,22 @@ prof_malloc_sample_object(tsdn_t *tsdn, extent_t *extent, const void *ptr,
}
void
-prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
-{
+prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) {
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
assert(tctx->cnts.curobjs > 0);
assert(tctx->cnts.curbytes >= usize);
tctx->cnts.curobjs--;
tctx->cnts.curbytes -= usize;
- if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
+ if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
prof_tctx_destroy(tsd, tctx);
- else
+ } else {
malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
+ }
}
void
-bt_init(prof_bt_t *bt, void **vec)
-{
+bt_init(prof_bt_t *bt, void **vec) {
cassert(config_prof);
bt->vec = vec;
@@ -264,8 +261,7 @@ bt_init(prof_bt_t *bt, void **vec)
}
JEMALLOC_INLINE_C void
-prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
-{
+prof_enter(tsd_t *tsd, prof_tdata_t *tdata) {
cassert(config_prof);
assert(tdata == prof_tdata_get(tsd, false));
@@ -278,8 +274,7 @@ prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
}
JEMALLOC_INLINE_C void
-prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
-{
+prof_leave(tsd_t *tsd, prof_tdata_t *tdata) {
cassert(config_prof);
assert(tdata == prof_tdata_get(tsd, false));
@@ -295,17 +290,18 @@ prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
gdump = tdata->enq_gdump;
tdata->enq_gdump = false;
- if (idump)
+ if (idump) {
prof_idump(tsd_tsdn(tsd));
- if (gdump)
+ }
+ if (gdump) {
prof_gdump(tsd_tsdn(tsd));
+ }
}
}
#ifdef JEMALLOC_PROF_LIBUNWIND
void
-prof_backtrace(prof_bt_t *bt)
-{
+prof_backtrace(prof_bt_t *bt) {
int nframes;
cassert(config_prof);
@@ -313,41 +309,41 @@ prof_backtrace(prof_bt_t *bt)
assert(bt->vec != NULL);
nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
- if (nframes <= 0)
+ if (nframes <= 0) {
return;
+ }
bt->len = nframes;
}
#elif (defined(JEMALLOC_PROF_LIBGCC))
static _Unwind_Reason_Code
-prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
-{
+prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) {
cassert(config_prof);
return (_URC_NO_REASON);
}
static _Unwind_Reason_Code
-prof_unwind_callback(struct _Unwind_Context *context, void *arg)
-{
+prof_unwind_callback(struct _Unwind_Context *context, void *arg) {
prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
void *ip;
cassert(config_prof);
ip = (void *)_Unwind_GetIP(context);
- if (ip == NULL)
+ if (ip == NULL) {
return (_URC_END_OF_STACK);
+ }
data->bt->vec[data->bt->len] = ip;
data->bt->len++;
- if (data->bt->len == data->max)
+ if (data->bt->len == data->max) {
return (_URC_END_OF_STACK);
+ }
return (_URC_NO_REASON);
}
void
-prof_backtrace(prof_bt_t *bt)
-{
+prof_backtrace(prof_bt_t *bt) {
prof_unwind_data_t data = {bt, PROF_BT_MAX};
cassert(config_prof);
@@ -356,20 +352,22 @@ prof_backtrace(prof_bt_t *bt)
}
#elif (defined(JEMALLOC_PROF_GCC))
void
-prof_backtrace(prof_bt_t *bt)
-{
+prof_backtrace(prof_bt_t *bt) {
#define BT_FRAME(i) \
if ((i) < PROF_BT_MAX) { \
void *p; \
- if (__builtin_frame_address(i) == 0) \
+ if (__builtin_frame_address(i) == 0) { \
return; \
+ } \
p = __builtin_return_address(i); \
- if (p == NULL) \
+ if (p == NULL) { \
return; \
+ } \
bt->vec[(i)] = p; \
bt->len = (i) + 1; \
- } else \
- return;
+ } else { \
+ return; \
+ }
cassert(config_prof);
@@ -517,30 +515,26 @@ prof_backtrace(prof_bt_t *bt)
}
#else
void
-prof_backtrace(prof_bt_t *bt)
-{
+prof_backtrace(prof_bt_t *bt) {
cassert(config_prof);
not_reached();
}
#endif
static malloc_mutex_t *
-prof_gctx_mutex_choose(void)
-{
+prof_gctx_mutex_choose(void) {
unsigned ngctxs = atomic_add_u(&cum_gctxs, 1);
return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]);
}
static malloc_mutex_t *
-prof_tdata_mutex_choose(uint64_t thr_uid)
-{
+prof_tdata_mutex_choose(uint64_t thr_uid) {
return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]);
}
static prof_gctx_t *
-prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt)
-{
+prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) {
/*
* Create a single allocation that has space for vec of length bt->len.
*/
@@ -548,8 +542,9 @@ prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt)
prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
true);
- if (gctx == NULL)
+ if (gctx == NULL) {
return (NULL);
+ }
gctx->lock = prof_gctx_mutex_choose();
/*
* Set nlimbo to 1, in order to avoid a race condition with
@@ -566,8 +561,7 @@ prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt)
static void
prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
- prof_tdata_t *tdata)
-{
+ prof_tdata_t *tdata) {
cassert(config_prof);
/*
@@ -582,8 +576,9 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
assert(gctx->nlimbo != 0);
if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
/* Remove gctx from bt2gctx. */
- if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL))
+ if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) {
not_reached();
+ }
prof_leave(tsd, tdata_self);
/* Destroy gctx. */
malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
@@ -601,34 +596,37 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
}
static bool
-prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx)
-{
+prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) {
malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
- if (opt_prof_accum)
+ if (opt_prof_accum) {
return (false);
- if (tctx->cnts.curobjs != 0)
+ }
+ if (tctx->cnts.curobjs != 0) {
return (false);
- if (tctx->prepared)
+ }
+ if (tctx->prepared) {
return (false);
+ }
return (true);
}
static bool
-prof_gctx_should_destroy(prof_gctx_t *gctx)
-{
- if (opt_prof_accum)
+prof_gctx_should_destroy(prof_gctx_t *gctx) {
+ if (opt_prof_accum) {
return (false);
- if (!tctx_tree_empty(&gctx->tctxs))
+ }
+ if (!tctx_tree_empty(&gctx->tctxs)) {
return (false);
- if (gctx->nlimbo != 0)
+ }
+ if (gctx->nlimbo != 0) {
return (false);
+ }
return (true);
}
static void
-prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
-{
+prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) {
prof_tdata_t *tdata = tctx->tdata;
prof_gctx_t *gctx = tctx->gctx;
bool destroy_tdata, destroy_tctx, destroy_gctx;
@@ -667,8 +665,9 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
*/
gctx->nlimbo++;
destroy_gctx = true;
- } else
+ } else {
destroy_gctx = false;
+ }
break;
case prof_tctx_state_dumping:
/*
@@ -693,18 +692,19 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
- if (destroy_tdata)
+ if (destroy_tdata) {
prof_tdata_destroy(tsd, tdata, false);
+ }
- if (destroy_tctx)
+ if (destroy_tctx) {
idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), tctx), tctx,
NULL, true, true);
+ }
}
static bool
prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
- void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx)
-{
+ void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) {
union {
prof_gctx_t *p;
void *v;
@@ -751,8 +751,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
}
prof_tctx_t *
-prof_lookup(tsd_t *tsd, prof_bt_t *bt)
-{
+prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
union {
prof_tctx_t *p;
void *v;
@@ -763,13 +762,15 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
cassert(config_prof);
tdata = prof_tdata_get(tsd, false);
- if (tdata == NULL)
+ if (tdata == NULL) {
return (NULL);
+ }
malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
- if (!not_found) /* Note double negative! */
+ if (!not_found) { /* Note double negative! */
ret.p->prepared = true;
+ }
malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
if (not_found) {
void *btkey;
@@ -781,16 +782,18 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
* cache.
*/
if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
- &new_gctx))
+ &new_gctx)) {
return (NULL);
+ }
/* Link a prof_tctx_t into gctx for this thread. */
ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
size2index(sizeof(prof_tctx_t)), false, NULL, true,
arena_ichoose(tsd, NULL), true);
if (ret.p == NULL) {
- if (new_gctx)
+ if (new_gctx) {
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
+ }
return (NULL);
}
ret.p->tdata = tdata;
@@ -805,8 +808,9 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
if (error) {
- if (new_gctx)
+ if (new_gctx) {
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
+ }
idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), ret.v),
ret.v, NULL, true, true);
return (NULL);
@@ -835,14 +839,14 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
* -mno-sse) in order for the workaround to be complete.
*/
void
-prof_sample_threshold_update(prof_tdata_t *tdata)
-{
+prof_sample_threshold_update(prof_tdata_t *tdata) {
#ifdef JEMALLOC_PROF
uint64_t r;
double u;
- if (!config_prof)
+ if (!config_prof) {
return;
+ }
if (lg_prof_sample == 0) {
tdata->bytes_until_sample = 0;
@@ -877,8 +881,8 @@ prof_sample_threshold_update(prof_tdata_t *tdata)
#ifdef JEMALLOC_JET
static prof_tdata_t *
-prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
-{
+prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
+ void *arg) {
size_t *tdata_count = (size_t *)arg;
(*tdata_count)++;
@@ -887,8 +891,7 @@ prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
}
size_t
-prof_tdata_count(void)
-{
+prof_tdata_count(void) {
size_t tdata_count = 0;
tsdn_t *tsdn;
@@ -904,16 +907,16 @@ prof_tdata_count(void)
#ifdef JEMALLOC_JET
size_t
-prof_bt_count(void)
-{
+prof_bt_count(void) {
size_t bt_count;
tsd_t *tsd;
prof_tdata_t *tdata;
tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, false);
- if (tdata == NULL)
+ if (tdata == NULL) {
return (0);
+ }
malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
bt_count = ckh_count(&bt2gctx);
@@ -928,16 +931,16 @@ prof_bt_count(void)
#define prof_dump_open JEMALLOC_N(prof_dump_open_impl)
#endif
static int
-prof_dump_open(bool propagate_err, const char *filename)
-{
+prof_dump_open(bool propagate_err, const char *filename) {
int fd;
fd = creat(filename, 0644);
if (fd == -1 && !propagate_err) {
malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
filename);
- if (opt_abort)
+ if (opt_abort) {
abort();
+ }
}
return (fd);
@@ -949,8 +952,7 @@ prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl);
#endif
static bool
-prof_dump_flush(bool propagate_err)
-{
+prof_dump_flush(bool propagate_err) {
bool ret = false;
ssize_t err;
@@ -961,8 +963,9 @@ prof_dump_flush(bool propagate_err)
if (!propagate_err) {
malloc_write("<jemalloc>: write() failed during heap "
"profile flush\n");
- if (opt_abort)
+ if (opt_abort) {
abort();
+ }
}
ret = true;
}
@@ -972,8 +975,7 @@ prof_dump_flush(bool propagate_err)
}
static bool
-prof_dump_close(bool propagate_err)
-{
+prof_dump_close(bool propagate_err) {
bool ret;
assert(prof_dump_fd != -1);
@@ -985,8 +987,7 @@ prof_dump_close(bool propagate_err)
}
static bool
-prof_dump_write(bool propagate_err, const char *s)
-{
+prof_dump_write(bool propagate_err, const char *s) {
size_t i, slen, n;
cassert(config_prof);
@@ -995,9 +996,11 @@ prof_dump_write(bool propagate_err, const char *s)
slen = strlen(s);
while (i < slen) {
/* Flush the buffer if it is full. */
- if (prof_dump_buf_end == PROF_DUMP_BUFSIZE)
- if (prof_dump_flush(propagate_err) && propagate_err)
+ if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
+ if (prof_dump_flush(propagate_err) && propagate_err) {
return (true);
+ }
+ }
if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
/* Finish writing. */
@@ -1016,8 +1019,7 @@ prof_dump_write(bool propagate_err, const char *s)
JEMALLOC_FORMAT_PRINTF(2, 3)
static bool
-prof_dump_printf(bool propagate_err, const char *format, ...)
-{
+prof_dump_printf(bool propagate_err, const char *format, ...) {
bool ret;
va_list ap;
char buf[PROF_PRINTF_BUFSIZE];
@@ -1031,8 +1033,7 @@ prof_dump_printf(bool propagate_err, const char *format, ...)
}
static void
-prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata)
-{
+prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) {
malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
malloc_mutex_lock(tsdn, tctx->gctx->lock);
@@ -1063,8 +1064,7 @@ prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata)
}
static void
-prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx)
-{
+prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) {
malloc_mutex_assert_owner(tsdn, gctx->lock);
gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
@@ -1076,8 +1076,7 @@ prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx)
}
static prof_tctx_t *
-prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
-{
+prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
tsdn_t *tsdn = (tsdn_t *)arg;
malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
@@ -1103,8 +1102,7 @@ struct prof_tctx_dump_iter_arg_s {
};
static prof_tctx_t *
-prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
-{
+prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) {
struct prof_tctx_dump_iter_arg_s *arg =
(struct prof_tctx_dump_iter_arg_s *)opaque;
@@ -1121,8 +1119,9 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
" t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
"%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
- tctx->dump_cnts.accumbytes))
+ tctx->dump_cnts.accumbytes)) {
return (tctx);
+ }
break;
default:
not_reached();
@@ -1131,8 +1130,7 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
}
static prof_tctx_t *
-prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
-{
+prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
tsdn_t *tsdn = (tsdn_t *)arg;
prof_tctx_t *ret;
@@ -1158,8 +1156,7 @@ label_return:
}
static void
-prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
-{
+prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) {
cassert(config_prof);
malloc_mutex_lock(tsdn, gctx->lock);
@@ -1183,24 +1180,23 @@ struct prof_gctx_merge_iter_arg_s {
};
static prof_gctx_t *
-prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
-{
+prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
struct prof_gctx_merge_iter_arg_s *arg =
(struct prof_gctx_merge_iter_arg_s *)opaque;
malloc_mutex_lock(arg->tsdn, gctx->lock);
tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
(void *)arg->tsdn);
- if (gctx->cnt_summed.curobjs != 0)
+ if (gctx->cnt_summed.curobjs != 0) {
arg->leak_ngctx++;
+ }
malloc_mutex_unlock(arg->tsdn, gctx->lock);
return (NULL);
}
static void
-prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
-{
+prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) {
prof_tdata_t *tdata = prof_tdata_get(tsd, false);
prof_gctx_t *gctx;
@@ -1230,8 +1226,9 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
idalloctm(tsd_tsdn(tsd),
iealloc(tsd_tsdn(tsd), to_destroy),
to_destroy, NULL, true, true);
- } else
+ } else {
next = NULL;
+ }
} while (next != NULL);
}
gctx->nlimbo--;
@@ -1239,8 +1236,9 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
gctx->nlimbo++;
malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
- } else
+ } else {
malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ }
}
}
@@ -1251,8 +1249,7 @@ struct prof_tdata_merge_iter_arg_s {
static prof_tdata_t *
prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
- void *opaque)
-{
+ void *opaque) {
struct prof_tdata_merge_iter_arg_s *arg =
(struct prof_tdata_merge_iter_arg_s *)opaque;
@@ -1267,8 +1264,9 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
tdata->dumping = true;
memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
- &tctx.v);)
+ &tctx.v);) {
prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
+ }
arg->cnt_all.curobjs += tdata->cnt_summed.curobjs;
arg->cnt_all.curbytes += tdata->cnt_summed.curbytes;
@@ -1276,20 +1274,22 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs;
arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes;
}
- } else
+ } else {
tdata->dumping = false;
+ }
malloc_mutex_unlock(arg->tsdn, tdata->lock);
return (NULL);
}
static prof_tdata_t *
-prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
-{
+prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
+ void *arg) {
bool propagate_err = *(bool *)arg;
- if (!tdata->dumping)
+ if (!tdata->dumping) {
return (NULL);
+ }
if (prof_dump_printf(propagate_err,
" t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
@@ -1297,8 +1297,9 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
tdata->cnt_summed.accumbytes,
(tdata->thread_name != NULL) ? " " : "",
- (tdata->thread_name != NULL) ? tdata->thread_name : ""))
+ (tdata->thread_name != NULL) ? tdata->thread_name : "")) {
return (tdata);
+ }
return (NULL);
}
@@ -1307,16 +1308,16 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
#define prof_dump_header JEMALLOC_N(prof_dump_header_impl)
#endif
static bool
-prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all)
-{
+prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all) {
bool ret;
if (prof_dump_printf(propagate_err,
"heap_v2/%"FMTu64"\n"
" t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
- cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
+ cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) {
return (true);
+ }
malloc_mutex_lock(tsdn, &tdatas_mtx);
ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
@@ -1332,8 +1333,7 @@ prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
static bool
prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
- const prof_bt_t *bt, prof_gctx_tree_t *gctxs)
-{
+ const prof_bt_t *bt, prof_gctx_tree_t *gctxs) {
bool ret;
unsigned i;
struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
@@ -1389,8 +1389,7 @@ label_return:
#ifndef _WIN32
JEMALLOC_FORMAT_PRINTF(1, 2)
static int
-prof_open_maps(const char *format, ...)
-{
+prof_open_maps(const char *format, ...) {
int mfd;
va_list ap;
char filename[PATH_MAX + 1];
@@ -1405,8 +1404,7 @@ prof_open_maps(const char *format, ...)
#endif
static int
-prof_getpid(void)
-{
+prof_getpid(void) {
#ifdef _WIN32
return (GetCurrentProcessId());
#else
@@ -1415,8 +1413,7 @@ prof_getpid(void)
}
static bool
-prof_dump_maps(bool propagate_err)
-{
+prof_dump_maps(bool propagate_err) {
bool ret;
int mfd;
@@ -1430,8 +1427,9 @@ prof_dump_maps(bool propagate_err)
int pid = prof_getpid();
mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
- if (mfd == -1)
+ if (mfd == -1) {
mfd = prof_open_maps("/proc/%d/maps", pid);
+ }
}
#endif
if (mfd != -1) {
@@ -1463,8 +1461,9 @@ prof_dump_maps(bool propagate_err)
ret = false;
label_return:
- if (mfd != -1)
+ if (mfd != -1) {
close(mfd);
+ }
return (ret);
}
@@ -1474,8 +1473,7 @@ label_return:
*/
static void
prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
- const char *filename)
-{
+ const char *filename) {
#ifdef JEMALLOC_PROF
/*
* Scaling is equivalent AdjustSamples() in jeprof, but the result may
@@ -1510,8 +1508,7 @@ struct prof_gctx_dump_iter_arg_s {
};
static prof_gctx_t *
-prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
-{
+prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
prof_gctx_t *ret;
struct prof_gctx_dump_iter_arg_s *arg =
(struct prof_gctx_dump_iter_arg_s *)opaque;
@@ -1534,8 +1531,7 @@ static void
prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata,
struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
- prof_gctx_tree_t *gctxs)
-{
+ prof_gctx_tree_t *gctxs) {
size_t tabind;
union {
prof_gctx_t *p;
@@ -1579,8 +1575,7 @@ prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename,
struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg,
- prof_gctx_tree_t *gctxs)
-{
+ prof_gctx_tree_t *gctxs) {
/* Create dump file. */
if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) {
return true;
@@ -1616,8 +1611,8 @@ label_write_error:
}
static bool
-prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
-{
+prof_dump(tsd_t *tsd, bool propagate_err, const char *filename,
+ bool leakcheck) {
prof_tdata_t *tdata;
struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
@@ -1657,8 +1652,7 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
#ifdef JEMALLOC_JET
void
prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
- uint64_t *accumbytes)
-{
+ uint64_t *accumbytes) {
tsd_t *tsd;
prof_tdata_t *tdata;
struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
@@ -1705,8 +1699,7 @@ prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
static void
-prof_dump_filename(char *filename, char v, uint64_t vseq)
-{
+prof_dump_filename(char *filename, char v, uint64_t vseq) {
cassert(config_prof);
if (vseq != VSEQ_INVALID) {
@@ -1724,8 +1717,7 @@ prof_dump_filename(char *filename, char v, uint64_t vseq)
}
static void
-prof_fdump(void)
-{
+prof_fdump(void) {
tsd_t *tsd;
char filename[DUMP_FILENAME_BUFSIZE];
@@ -1733,8 +1725,9 @@ prof_fdump(void)
assert(opt_prof_final);
assert(opt_prof_prefix[0] != '\0');
- if (!prof_booted)
+ if (!prof_booted) {
return;
+ }
tsd = tsd_fetch();
malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
@@ -1744,19 +1737,20 @@ prof_fdump(void)
}
void
-prof_idump(tsdn_t *tsdn)
-{
+prof_idump(tsdn_t *tsdn) {
tsd_t *tsd;
prof_tdata_t *tdata;
cassert(config_prof);
- if (!prof_booted || tsdn_null(tsdn))
+ if (!prof_booted || tsdn_null(tsdn)) {
return;
+ }
tsd = tsdn_tsd(tsdn);
tdata = prof_tdata_get(tsd, false);
- if (tdata == NULL)
+ if (tdata == NULL) {
return;
+ }
if (tdata->enq) {
tdata->enq_idump = true;
return;
@@ -1773,19 +1767,20 @@ prof_idump(tsdn_t *tsdn)
}
bool
-prof_mdump(tsd_t *tsd, const char *filename)
-{
+prof_mdump(tsd_t *tsd, const char *filename) {
char filename_buf[DUMP_FILENAME_BUFSIZE];
cassert(config_prof);
- if (!opt_prof || !prof_booted)
+ if (!opt_prof || !prof_booted) {
return (true);
+ }
if (filename == NULL) {
/* No filename specified, so automatically generate one. */
- if (opt_prof_prefix[0] == '\0')
+ if (opt_prof_prefix[0] == '\0') {
return (true);
+ }
malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
prof_dump_mseq++;
@@ -1796,19 +1791,20 @@ prof_mdump(tsd_t *tsd, const char *filename)
}
void
-prof_gdump(tsdn_t *tsdn)
-{
+prof_gdump(tsdn_t *tsdn) {
tsd_t *tsd;
prof_tdata_t *tdata;
cassert(config_prof);
- if (!prof_booted || tsdn_null(tsdn))
+ if (!prof_booted || tsdn_null(tsdn)) {
return;
+ }
tsd = tsdn_tsd(tsdn);
tdata = prof_tdata_get(tsd, false);
- if (tdata == NULL)
+ if (tdata == NULL) {
return;
+ }
if (tdata->enq) {
tdata->enq_gdump = true;
return;
@@ -1825,8 +1821,7 @@ prof_gdump(tsdn_t *tsdn)
}
static void
-prof_bt_hash(const void *key, size_t r_hash[2])
-{
+prof_bt_hash(const void *key, size_t r_hash[2]) {
prof_bt_t *bt = (prof_bt_t *)key;
cassert(config_prof);
@@ -1835,21 +1830,20 @@ prof_bt_hash(const void *key, size_t r_hash[2])
}
static bool
-prof_bt_keycomp(const void *k1, const void *k2)
-{
+prof_bt_keycomp(const void *k1, const void *k2) {
const prof_bt_t *bt1 = (prof_bt_t *)k1;
const prof_bt_t *bt2 = (prof_bt_t *)k2;
cassert(config_prof);
- if (bt1->len != bt2->len)
+ if (bt1->len != bt2->len) {
return (false);
+ }
return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
}
JEMALLOC_INLINE_C uint64_t
-prof_thr_uid_alloc(tsdn_t *tsdn)
-{
+prof_thr_uid_alloc(tsdn_t *tsdn) {
uint64_t thr_uid;
malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
@@ -1862,8 +1856,7 @@ prof_thr_uid_alloc(tsdn_t *tsdn)
static prof_tdata_t *
prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
- char *thread_name, bool active)
-{
+ char *thread_name, bool active) {
prof_tdata_t *tdata;
cassert(config_prof);
@@ -1872,8 +1865,9 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
size2index(sizeof(prof_tdata_t)), false, NULL, true,
arena_get(TSDN_NULL, 0, true), true);
- if (tdata == NULL)
+ if (tdata == NULL) {
return (NULL);
+ }
tdata->lock = prof_tdata_mutex_choose(thr_uid);
tdata->thr_uid = thr_uid;
@@ -1908,26 +1902,25 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
}
prof_tdata_t *
-prof_tdata_init(tsd_t *tsd)
-{
+prof_tdata_init(tsd_t *tsd) {
return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
NULL, prof_thread_active_init_get(tsd_tsdn(tsd))));
}
static bool
-prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached)
-{
- if (tdata->attached && !even_if_attached)
+prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) {
+ if (tdata->attached && !even_if_attached) {
return (false);
- if (ckh_count(&tdata->bt2tctx) != 0)
+ }
+ if (ckh_count(&tdata->bt2tctx) != 0) {
return (false);
+ }
return (true);
}
static bool
prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
- bool even_if_attached)
-{
+ bool even_if_attached) {
malloc_mutex_assert_owner(tsdn, tdata->lock);
return (prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
@@ -1935,8 +1928,7 @@ prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
static void
prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
- bool even_if_attached)
-{
+ bool even_if_attached) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx);
tdata_tree_remove(&tdatas, tdata);
@@ -1953,16 +1945,14 @@ prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
}
static void
-prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached)
-{
+prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) {
malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
}
static void
-prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
-{
+prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) {
bool destroy_tdata;
malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
@@ -1973,19 +1963,21 @@ prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
* Only detach if !destroy_tdata, because detaching would allow
* another thread to win the race to destroy tdata.
*/
- if (!destroy_tdata)
+ if (!destroy_tdata) {
tdata->attached = false;
+ }
tsd_prof_tdata_set(tsd, NULL);
- } else
+ } else {
destroy_tdata = false;
+ }
malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
- if (destroy_tdata)
+ if (destroy_tdata) {
prof_tdata_destroy(tsd, tdata, true);
+ }
}
prof_tdata_t *
-prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
-{
+prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) {
uint64_t thr_uid = tdata->thr_uid;
uint64_t thr_discrim = tdata->thr_discrim + 1;
char *thread_name = (tdata->thread_name != NULL) ?
@@ -1998,8 +1990,7 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
}
static bool
-prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata)
-{
+prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) {
bool destroy_tdata;
malloc_mutex_lock(tsdn, tdata->lock);
@@ -2007,24 +1998,24 @@ prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata)
tdata->expired = true;
destroy_tdata = tdata->attached ? false :
prof_tdata_should_destroy(tsdn, tdata, false);
- } else
+ } else {
destroy_tdata = false;
+ }
malloc_mutex_unlock(tsdn, tdata->lock);
return (destroy_tdata);
}
static prof_tdata_t *
-prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
-{
+prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
+ void *arg) {
tsdn_t *tsdn = (tsdn_t *)arg;
return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
}
void
-prof_reset(tsd_t *tsd, size_t lg_sample)
-{
+prof_reset(tsd_t *tsd, size_t lg_sample) {
prof_tdata_t *next;
assert(lg_sample < (sizeof(uint64_t) << 3));
@@ -2041,8 +2032,9 @@ prof_reset(tsd_t *tsd, size_t lg_sample)
if (to_destroy != NULL) {
next = tdata_tree_next(&tdatas, to_destroy);
prof_tdata_destroy_locked(tsd, to_destroy, false);
- } else
+ } else {
next = NULL;
+ }
} while (next != NULL);
malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
@@ -2050,21 +2042,21 @@ prof_reset(tsd_t *tsd, size_t lg_sample)
}
void
-prof_tdata_cleanup(tsd_t *tsd)
-{
+prof_tdata_cleanup(tsd_t *tsd) {
prof_tdata_t *tdata;
- if (!config_prof)
+ if (!config_prof) {
return;
+ }
tdata = tsd_prof_tdata_get(tsd);
- if (tdata != NULL)
+ if (tdata != NULL) {
prof_tdata_detach(tsd, tdata);
+ }
}
bool
-prof_active_get(tsdn_t *tsdn)
-{
+prof_active_get(tsdn_t *tsdn) {
bool prof_active_current;
malloc_mutex_lock(tsdn, &prof_active_mtx);
@@ -2074,8 +2066,7 @@ prof_active_get(tsdn_t *tsdn)
}
bool
-prof_active_set(tsdn_t *tsdn, bool active)
-{
+prof_active_set(tsdn_t *tsdn, bool active) {
bool prof_active_old;
malloc_mutex_lock(tsdn, &prof_active_mtx);
@@ -2086,97 +2077,102 @@ prof_active_set(tsdn_t *tsdn, bool active)
}
const char *
-prof_thread_name_get(tsd_t *tsd)
-{
+prof_thread_name_get(tsd_t *tsd) {
prof_tdata_t *tdata;
tdata = prof_tdata_get(tsd, true);
- if (tdata == NULL)
+ if (tdata == NULL) {
return ("");
+ }
return (tdata->thread_name != NULL ? tdata->thread_name : "");
}
static char *
-prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name)
-{
+prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) {
char *ret;
size_t size;
- if (thread_name == NULL)
+ if (thread_name == NULL) {
return (NULL);
+ }
size = strlen(thread_name) + 1;
- if (size == 1)
+ if (size == 1) {
return ("");
+ }
ret = iallocztm(tsdn, size, size2index(size), false, NULL, true,
arena_get(TSDN_NULL, 0, true), true);
- if (ret == NULL)
+ if (ret == NULL) {
return (NULL);
+ }
memcpy(ret, thread_name, size);
return (ret);
}
int
-prof_thread_name_set(tsd_t *tsd, const char *thread_name)
-{
+prof_thread_name_set(tsd_t *tsd, const char *thread_name) {
prof_tdata_t *tdata;
unsigned i;
char *s;
tdata = prof_tdata_get(tsd, true);
- if (tdata == NULL)
+ if (tdata == NULL) {
return (EAGAIN);
+ }
/* Validate input. */
- if (thread_name == NULL)
+ if (thread_name == NULL) {
return (EFAULT);
+ }
for (i = 0; thread_name[i] != '\0'; i++) {
char c = thread_name[i];
- if (!isgraph(c) && !isblank(c))
+ if (!isgraph(c) && !isblank(c)) {
return (EFAULT);
+ }
}
s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
- if (s == NULL)
+ if (s == NULL) {
return (EAGAIN);
+ }
if (tdata->thread_name != NULL) {
idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd),
tdata->thread_name), tdata->thread_name, NULL, true, true);
tdata->thread_name = NULL;
}
- if (strlen(s) > 0)
+ if (strlen(s) > 0) {
tdata->thread_name = s;
+ }
return (0);
}
bool
-prof_thread_active_get(tsd_t *tsd)
-{
+prof_thread_active_get(tsd_t *tsd) {
prof_tdata_t *tdata;
tdata = prof_tdata_get(tsd, true);
- if (tdata == NULL)
+ if (tdata == NULL) {
return (false);
+ }
return (tdata->active);
}
bool
-prof_thread_active_set(tsd_t *tsd, bool active)
-{
+prof_thread_active_set(tsd_t *tsd, bool active) {
prof_tdata_t *tdata;
tdata = prof_tdata_get(tsd, true);
- if (tdata == NULL)
+ if (tdata == NULL) {
return (true);
+ }
tdata->active = active;
return (false);
}
bool
-prof_thread_active_init_get(tsdn_t *tsdn)
-{
+prof_thread_active_init_get(tsdn_t *tsdn) {
bool active_init;
malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
@@ -2186,8 +2182,7 @@ prof_thread_active_init_get(tsdn_t *tsdn)
}
bool
-prof_thread_active_init_set(tsdn_t *tsdn, bool active_init)
-{
+prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) {
bool active_init_old;
malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
@@ -2198,8 +2193,7 @@ prof_thread_active_init_set(tsdn_t *tsdn, bool active_init)
}
bool
-prof_gdump_get(tsdn_t *tsdn)
-{
+prof_gdump_get(tsdn_t *tsdn) {
bool prof_gdump_current;
malloc_mutex_lock(tsdn, &prof_gdump_mtx);
@@ -2209,8 +2203,7 @@ prof_gdump_get(tsdn_t *tsdn)
}
bool
-prof_gdump_set(tsdn_t *tsdn, bool gdump)
-{
+prof_gdump_set(tsdn_t *tsdn, bool gdump) {
bool prof_gdump_old;
malloc_mutex_lock(tsdn, &prof_gdump_mtx);
@@ -2221,8 +2214,7 @@ prof_gdump_set(tsdn_t *tsdn, bool gdump)
}
void
-prof_boot0(void)
-{
+prof_boot0(void) {
cassert(config_prof);
memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
@@ -2230,8 +2222,7 @@ prof_boot0(void)
}
void
-prof_boot1(void)
-{
+prof_boot1(void) {
cassert(config_prof);
/*
@@ -2255,8 +2246,7 @@ prof_boot1(void)
}
bool
-prof_boot2(tsd_t *tsd)
-{
+prof_boot2(tsd_t *tsd) {
cassert(config_prof);
if (opt_prof) {
@@ -2266,71 +2256,85 @@ prof_boot2(tsd_t *tsd)
prof_active = opt_prof_active;
if (malloc_mutex_init(&prof_active_mtx, "prof_active",
- WITNESS_RANK_PROF_ACTIVE))
+ WITNESS_RANK_PROF_ACTIVE)) {
return (true);
+ }
prof_gdump_val = opt_prof_gdump;
if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
- WITNESS_RANK_PROF_GDUMP))
+ WITNESS_RANK_PROF_GDUMP)) {
return (true);
+ }
prof_thread_active_init = opt_prof_thread_active_init;
if (malloc_mutex_init(&prof_thread_active_init_mtx,
"prof_thread_active_init",
- WITNESS_RANK_PROF_THREAD_ACTIVE_INIT))
+ WITNESS_RANK_PROF_THREAD_ACTIVE_INIT)) {
return (true);
+ }
if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
- prof_bt_keycomp))
+ prof_bt_keycomp)) {
return (true);
+ }
if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
- WITNESS_RANK_PROF_BT2GCTX))
+ WITNESS_RANK_PROF_BT2GCTX)) {
return (true);
+ }
tdata_tree_new(&tdatas);
if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
- WITNESS_RANK_PROF_TDATAS))
+ WITNESS_RANK_PROF_TDATAS)) {
return (true);
+ }
next_thr_uid = 0;
if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
- WITNESS_RANK_PROF_NEXT_THR_UID))
+ WITNESS_RANK_PROF_NEXT_THR_UID)) {
return (true);
+ }
if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
- WITNESS_RANK_PROF_DUMP_SEQ))
+ WITNESS_RANK_PROF_DUMP_SEQ)) {
return (true);
+ }
if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
- WITNESS_RANK_PROF_DUMP))
+ WITNESS_RANK_PROF_DUMP)) {
return (true);
+ }
if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
atexit(prof_fdump) != 0) {
malloc_write("<jemalloc>: Error in atexit()\n");
- if (opt_abort)
+ if (opt_abort) {
abort();
+ }
}
gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t),
CACHELINE);
- if (gctx_locks == NULL)
+ if (gctx_locks == NULL) {
return (true);
+ }
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
- WITNESS_RANK_PROF_GCTX))
+ WITNESS_RANK_PROF_GCTX)) {
return (true);
+ }
}
tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t),
CACHELINE);
- if (tdata_locks == NULL)
+ if (tdata_locks == NULL) {
return (true);
+ }
for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
- WITNESS_RANK_PROF_TDATA))
+ WITNESS_RANK_PROF_TDATA)) {
return (true);
+ }
}
}
@@ -2348,24 +2352,24 @@ prof_boot2(tsd_t *tsd)
}
void
-prof_prefork0(tsdn_t *tsdn)
-{
+prof_prefork0(tsdn_t *tsdn) {
if (opt_prof) {
unsigned i;
malloc_mutex_prefork(tsdn, &prof_dump_mtx);
malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
malloc_mutex_prefork(tsdn, &tdatas_mtx);
- for (i = 0; i < PROF_NTDATA_LOCKS; i++)
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
malloc_mutex_prefork(tsdn, &tdata_locks[i]);
- for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ }
+ for (i = 0; i < PROF_NCTX_LOCKS; i++) {
malloc_mutex_prefork(tsdn, &gctx_locks[i]);
+ }
}
}
void
-prof_prefork1(tsdn_t *tsdn)
-{
+prof_prefork1(tsdn_t *tsdn) {
if (opt_prof) {
malloc_mutex_prefork(tsdn, &prof_active_mtx);
malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
@@ -2376,8 +2380,7 @@ prof_prefork1(tsdn_t *tsdn)
}
void
-prof_postfork_parent(tsdn_t *tsdn)
-{
+prof_postfork_parent(tsdn_t *tsdn) {
if (opt_prof) {
unsigned i;
@@ -2387,10 +2390,12 @@ prof_postfork_parent(tsdn_t *tsdn)
malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
- for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ for (i = 0; i < PROF_NCTX_LOCKS; i++) {
malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
- for (i = 0; i < PROF_NTDATA_LOCKS; i++)
+ }
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
+ }
malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
@@ -2398,8 +2403,7 @@ prof_postfork_parent(tsdn_t *tsdn)
}
void
-prof_postfork_child(tsdn_t *tsdn)
-{
+prof_postfork_child(tsdn_t *tsdn) {
if (opt_prof) {
unsigned i;
@@ -2408,10 +2412,12 @@ prof_postfork_child(tsdn_t *tsdn)
malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
- for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ for (i = 0; i < PROF_NCTX_LOCKS; i++) {
malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
- for (i = 0; i < PROF_NTDATA_LOCKS; i++)
+ }
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
+ }
malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
diff --git a/src/rtree.c b/src/rtree.c
index 43f2165..de3e596 100644
--- a/src/rtree.c
+++ b/src/rtree.c
@@ -2,8 +2,7 @@
#include "jemalloc/internal/jemalloc_internal.h"
static unsigned
-hmin(unsigned ha, unsigned hb)
-{
+hmin(unsigned ha, unsigned hb) {
return (ha < hb ? ha : hb);
}
@@ -12,8 +11,7 @@ hmin(unsigned ha, unsigned hb)
* used.
*/
bool
-rtree_new(rtree_t *rtree, unsigned bits)
-{
+rtree_new(rtree_t *rtree, unsigned bits) {
unsigned bits_in_leaf, height, i;
assert(RTREE_HEIGHT_MAX == ((ZU(1) << (LG_SIZEOF_PTR+3)) /
@@ -24,10 +22,12 @@ rtree_new(rtree_t *rtree, unsigned bits)
: (bits % RTREE_BITS_PER_LEVEL);
if (bits > bits_in_leaf) {
height = 1 + (bits - bits_in_leaf) / RTREE_BITS_PER_LEVEL;
- if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits)
+ if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits) {
height++;
- } else
+ }
+ } else {
height = 1;
+ }
assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits);
rtree->height = height;
@@ -68,8 +68,7 @@ rtree_new(rtree_t *rtree, unsigned bits)
#define rtree_node_alloc JEMALLOC_N(rtree_node_alloc_impl)
#endif
static rtree_elm_t *
-rtree_node_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms)
-{
+rtree_node_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
return ((rtree_elm_t *)base_alloc(tsdn, b0get(), nelms *
sizeof(rtree_elm_t), CACHELINE));
}
@@ -84,8 +83,7 @@ rtree_node_alloc_t *rtree_node_alloc = JEMALLOC_N(rtree_node_alloc_impl);
#define rtree_node_dalloc JEMALLOC_N(rtree_node_dalloc_impl)
#endif
UNUSED static void
-rtree_node_dalloc(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node)
-{
+rtree_node_dalloc(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node) {
/* Nodes are never deleted during normal operation. */
not_reached();
}
@@ -98,8 +96,7 @@ rtree_node_dalloc_t *rtree_node_dalloc = JEMALLOC_N(rtree_node_dalloc_impl);
#ifdef JEMALLOC_JET
static void
rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node,
- unsigned level)
-{
+ unsigned level) {
if (level + 1 < rtree->height) {
size_t nchildren, i;
@@ -116,22 +113,21 @@ rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node,
}
void
-rtree_delete(tsdn_t *tsdn, rtree_t *rtree)
-{
+rtree_delete(tsdn_t *tsdn, rtree_t *rtree) {
unsigned i;
for (i = 0; i < rtree->height; i++) {
rtree_elm_t *subtree = rtree->levels[i].subtree;
- if (subtree != NULL)
+ if (subtree != NULL) {
rtree_delete_subtree(tsdn, rtree, subtree, i);
+ }
}
}
#endif
static rtree_elm_t *
rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
- rtree_elm_t **elmp)
-{
+ rtree_elm_t **elmp) {
rtree_elm_t *node;
malloc_mutex_lock(tsdn, &rtree->init_lock);
@@ -151,23 +147,20 @@ rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
}
rtree_elm_t *
-rtree_subtree_read_hard(tsdn_t *tsdn, rtree_t *rtree, unsigned level)
-{
+rtree_subtree_read_hard(tsdn_t *tsdn, rtree_t *rtree, unsigned level) {
return (rtree_node_init(tsdn, rtree, level,
&rtree->levels[level].subtree));
}
rtree_elm_t *
rtree_child_read_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *elm,
- unsigned level)
-{
+ unsigned level) {
return (rtree_node_init(tsdn, rtree, level+1, &elm->child));
}
static int
rtree_elm_witness_comp(const witness_t *a, void *oa, const witness_t *b,
- void *ob)
-{
+ void *ob) {
uintptr_t ka = (uintptr_t)oa;
uintptr_t kb = (uintptr_t)ob;
@@ -178,8 +171,7 @@ rtree_elm_witness_comp(const witness_t *a, void *oa, const witness_t *b,
}
static witness_t *
-rtree_elm_witness_alloc(tsd_t *tsd, uintptr_t key, const rtree_elm_t *elm)
-{
+rtree_elm_witness_alloc(tsd_t *tsd, uintptr_t key, const rtree_elm_t *elm) {
witness_t *witness;
size_t i;
rtree_elm_witness_tsd_t *witnesses = tsd_rtree_elm_witnessesp_get(tsd);
@@ -204,8 +196,7 @@ rtree_elm_witness_alloc(tsd_t *tsd, uintptr_t key, const rtree_elm_t *elm)
}
static witness_t *
-rtree_elm_witness_find(tsd_t *tsd, const rtree_elm_t *elm)
-{
+rtree_elm_witness_find(tsd_t *tsd, const rtree_elm_t *elm) {
size_t i;
rtree_elm_witness_tsd_t *witnesses = tsd_rtree_elm_witnessesp_get(tsd);
@@ -213,15 +204,16 @@ rtree_elm_witness_find(tsd_t *tsd, const rtree_elm_t *elm)
i++) {
rtree_elm_witness_t *rew = &witnesses->witnesses[i];
- if (rew->elm == elm)
+ if (rew->elm == elm) {
return (&rew->witness);
+ }
}
not_reached();
}
static void
-rtree_elm_witness_dalloc(tsd_t *tsd, witness_t *witness, const rtree_elm_t *elm)
-{
+rtree_elm_witness_dalloc(tsd_t *tsd, witness_t *witness,
+ const rtree_elm_t *elm) {
size_t i;
rtree_elm_witness_tsd_t *witnesses = tsd_rtree_elm_witnessesp_get(tsd);
@@ -242,12 +234,12 @@ rtree_elm_witness_dalloc(tsd_t *tsd, witness_t *witness, const rtree_elm_t *elm)
void
rtree_elm_witness_acquire(tsdn_t *tsdn, const rtree_t *rtree, uintptr_t key,
- const rtree_elm_t *elm)
-{
+ const rtree_elm_t *elm) {
witness_t *witness;
- if (tsdn_null(tsdn))
+ if (tsdn_null(tsdn)) {
return;
+ }
witness = rtree_elm_witness_alloc(tsdn_tsd(tsdn), key, elm);
witness_lock(tsdn, witness);
@@ -255,12 +247,12 @@ rtree_elm_witness_acquire(tsdn_t *tsdn, const rtree_t *rtree, uintptr_t key,
void
rtree_elm_witness_access(tsdn_t *tsdn, const rtree_t *rtree,
- const rtree_elm_t *elm)
-{
+ const rtree_elm_t *elm) {
witness_t *witness;
- if (tsdn_null(tsdn))
+ if (tsdn_null(tsdn)) {
return;
+ }
witness = rtree_elm_witness_find(tsdn_tsd(tsdn), elm);
witness_assert_owner(tsdn, witness);
@@ -268,12 +260,12 @@ rtree_elm_witness_access(tsdn_t *tsdn, const rtree_t *rtree,
void
rtree_elm_witness_release(tsdn_t *tsdn, const rtree_t *rtree,
- const rtree_elm_t *elm)
-{
+ const rtree_elm_t *elm) {
witness_t *witness;
- if (tsdn_null(tsdn))
+ if (tsdn_null(tsdn)) {
return;
+ }
witness = rtree_elm_witness_find(tsdn_tsd(tsdn), elm);
witness_unlock(tsdn, witness);
diff --git a/src/stats.c b/src/stats.c
index 020d56b..b0a7fca 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -34,8 +34,7 @@ bool opt_stats_print = false;
static void
stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
- bool json, bool large, unsigned i)
-{
+ bool json, bool large, unsigned i) {
size_t page;
bool in_gap, in_gap_prev;
unsigned nbins, j;
@@ -144,8 +143,9 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
} else if (milli < 1000) {
malloc_snprintf(util, sizeof(util), "0.%zu",
milli);
- } else
+ } else {
malloc_snprintf(util, sizeof(util), "1");
+ }
if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
@@ -183,8 +183,7 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
static void
stats_arena_lextents_print(void (*write_cb)(void *, const char *),
- void *cbopaque, bool json, unsigned i)
-{
+ void *cbopaque, bool json, unsigned i) {
unsigned nbins, nlextents, j;
bool in_gap, in_gap_prev;
@@ -248,8 +247,7 @@ stats_arena_lextents_print(void (*write_cb)(void *, const char *),
static void
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
- bool json, unsigned i, bool bins, bool large)
-{
+ bool json, unsigned i, bool bins, bool large) {
unsigned nthreads;
const char *dss;
ssize_t decay_time;
@@ -290,8 +288,9 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
if (decay_time >= 0) {
malloc_cprintf(write_cb, cbopaque, "decay time: %zd\n",
decay_time);
- } else
+ } else {
malloc_cprintf(write_cb, cbopaque, "decay time: N/A\n");
+ }
}
CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
@@ -445,16 +444,17 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
"resident: %12zu\n", resident);
}
- if (bins)
+ if (bins) {
stats_arena_bins_print(write_cb, cbopaque, json, large, i);
- if (large)
+ }
+ if (large) {
stats_arena_lextents_print(write_cb, cbopaque, json, i);
+ }
}
static void
stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
- bool json, bool more)
-{
+ bool json, bool more) {
const char *cpv;
bool bv;
unsigned uv;
@@ -473,8 +473,9 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\"version\": \"%s\",\n", cpv);
- } else
+ } else {
malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
+ }
/* config. */
#define CONFIG_WRITE_BOOL_JSON(n, c) \
@@ -655,8 +656,9 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"narenas\": %u,\n", uv);
- } else
+ } else {
malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);
+ }
CTL_GET("arenas.decay_time", &ssv, ssize_t);
if (json) {
@@ -672,15 +674,17 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"quantum\": %zu,\n", sv);
- } else
+ } else {
malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
+ }
CTL_GET("arenas.page", &sv, size_t);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"page\": %zu,\n", sv);
- } else
+ } else {
malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
+ }
if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) {
if (json) {
@@ -787,8 +791,7 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
static void
stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
bool json, bool merged, bool destroyed, bool unmerged, bool bins,
- bool large)
-{
+ bool large) {
size_t allocated, active, metadata, resident, mapped, retained;
CTL_GET("stats.allocated", &allocated, size_t);
@@ -846,8 +849,9 @@ stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
sz = sizeof(bool);
xmallctlbymib(mib, miblen, &initialized[i], &sz,
NULL, 0);
- if (initialized[i])
+ if (initialized[i]) {
ninitialized++;
+ }
}
mib[1] = MALLCTL_ARENAS_DESTROYED;
sz = sizeof(bool);
@@ -934,8 +938,7 @@ stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
void
stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *opts)
-{
+ const char *opts) {
int err;
uint64_t epoch;
size_t u64sz;
diff --git a/src/tcache.c b/src/tcache.c
index d132341..bb6a5a7 100644
--- a/src/tcache.c
+++ b/src/tcache.c
@@ -24,14 +24,12 @@ static tcaches_t *tcaches_avail;
/******************************************************************************/
size_t
-tcache_salloc(tsdn_t *tsdn, const void *ptr)
-{
+tcache_salloc(tsdn_t *tsdn, const void *ptr) {
return (arena_salloc(tsdn, iealloc(tsdn, ptr), ptr));
}
void
-tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
-{
+tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
szind_t binind = tcache->next_gc_bin;
tcache_bin_t *tbin = &tcache->tbins[binind];
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
@@ -52,33 +50,36 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
* Reduce fill count by 2X. Limit lg_fill_div such that the
* fill count is always at least 1.
*/
- if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
+ if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1) {
tbin->lg_fill_div++;
+ }
} else if (tbin->low_water < 0) {
/*
* Increase fill count by 2X. Make sure lg_fill_div stays
* greater than 0.
*/
- if (tbin->lg_fill_div > 1)
+ if (tbin->lg_fill_div > 1) {
tbin->lg_fill_div--;
+ }
}
tbin->low_water = tbin->ncached;
tcache->next_gc_bin++;
- if (tcache->next_gc_bin == nhbins)
+ if (tcache->next_gc_bin == nhbins) {
tcache->next_gc_bin = 0;
+ }
}
void *
tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
- tcache_bin_t *tbin, szind_t binind, bool *tcache_success)
-{
+ tcache_bin_t *tbin, szind_t binind, bool *tcache_success) {
void *ret;
arena_tcache_fill_small(tsdn, arena, tbin, binind, config_prof ?
tcache->prof_accumbytes : 0);
- if (config_prof)
+ if (config_prof) {
tcache->prof_accumbytes = 0;
+ }
ret = tcache_alloc_easy(tbin, tcache_success);
return (ret);
@@ -86,8 +87,7 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
void
tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
- szind_t binind, unsigned rem)
-{
+ szind_t binind, unsigned rem) {
arena_t *arena;
void *ptr;
unsigned i, nflush, ndeferred;
@@ -106,8 +106,9 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
if (config_prof && bin_arena == arena) {
if (arena_prof_accum(tsd_tsdn(tsd), arena,
- tcache->prof_accumbytes))
+ tcache->prof_accumbytes)) {
prof_idump(tsd_tsdn(tsd));
+ }
tcache->prof_accumbytes = 0;
}
@@ -158,14 +159,14 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
sizeof(void *));
tbin->ncached = rem;
- if ((int)tbin->ncached < tbin->low_water)
+ if ((int)tbin->ncached < tbin->low_water) {
tbin->low_water = tbin->ncached;
+ }
}
void
tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
- unsigned rem, tcache_t *tcache)
-{
+ unsigned rem, tcache_t *tcache) {
arena_t *arena;
void *ptr;
unsigned i, nflush, ndeferred;
@@ -182,8 +183,9 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
arena_t *locked_arena = extent_arena_get(extent);
UNUSED bool idump;
- if (config_prof)
+ if (config_prof) {
idump = false;
+ }
malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock);
if ((config_prof || config_stats) && locked_arena == arena) {
if (config_prof) {
@@ -220,8 +222,9 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
}
}
malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock);
- if (config_prof && idump)
+ if (config_prof && idump) {
prof_idump(tsd_tsdn(tsd));
+ }
arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
ndeferred);
}
@@ -241,13 +244,13 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
sizeof(void *));
tbin->ncached = rem;
- if ((int)tbin->ncached < tbin->low_water)
+ if ((int)tbin->ncached < tbin->low_water) {
tbin->low_water = tbin->ncached;
+ }
}
static void
-tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
-{
+tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
if (config_stats) {
/* Link into list of extant tcaches. */
malloc_mutex_lock(tsdn, &arena->lock);
@@ -258,8 +261,7 @@ tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
}
static void
-tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
-{
+tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
if (config_stats) {
/* Unlink from list of extant tcaches. */
malloc_mutex_lock(tsdn, &arena->lock);
@@ -282,31 +284,30 @@ tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
void
tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena,
- arena_t *newarena)
-{
+ arena_t *newarena) {
tcache_arena_dissociate(tsdn, tcache, oldarena);
tcache_arena_associate(tsdn, tcache, newarena);
}
tcache_t *
-tcache_get_hard(tsd_t *tsd)
-{
+tcache_get_hard(tsd_t *tsd) {
arena_t *arena;
if (!tcache_enabled_get()) {
- if (tsd_nominal(tsd))
+ if (tsd_nominal(tsd)) {
tcache_enabled_set(false); /* Memoize. */
+ }
return (NULL);
}
arena = arena_choose(tsd, NULL);
- if (unlikely(arena == NULL))
+ if (unlikely(arena == NULL)) {
return (NULL);
+ }
return (tcache_create(tsd_tsdn(tsd), arena));
}
tcache_t *
-tcache_create(tsdn_t *tsdn, arena_t *arena)
-{
+tcache_create(tsdn_t *tsdn, arena_t *arena) {
tcache_t *tcache;
size_t size, stack_offset;
unsigned i;
@@ -321,8 +322,9 @@ tcache_create(tsdn_t *tsdn, arena_t *arena)
tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true,
arena_get(TSDN_NULL, 0, true));
- if (tcache == NULL)
+ if (tcache == NULL) {
return (NULL);
+ }
tcache_arena_associate(tsdn, tcache, arena);
@@ -345,8 +347,7 @@ tcache_create(tsdn_t *tsdn, arena_t *arena)
}
static void
-tcache_destroy(tsd_t *tsd, tcache_t *tcache)
-{
+tcache_destroy(tsd_t *tsd, tcache_t *tcache) {
arena_t *arena;
unsigned i;
@@ -372,20 +373,21 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
}
if (config_prof && tcache->prof_accumbytes > 0 &&
- arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes))
+ arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes)) {
prof_idump(tsd_tsdn(tsd));
+ }
idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), tcache), tcache, NULL,
true, true);
}
void
-tcache_cleanup(tsd_t *tsd)
-{
+tcache_cleanup(tsd_t *tsd) {
tcache_t *tcache;
- if (!config_tcache)
+ if (!config_tcache) {
return;
+ }
if ((tcache = tsd_tcache_get(tsd)) != NULL) {
tcache_destroy(tsd, tcache);
@@ -394,8 +396,7 @@ tcache_cleanup(tsd_t *tsd)
}
void
-tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
-{
+tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
unsigned i;
cassert(config_stats);
@@ -422,8 +423,7 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
}
bool
-tcaches_create(tsd_t *tsd, unsigned *r_ind)
-{
+tcaches_create(tsd_t *tsd, unsigned *r_ind) {
arena_t *arena;
tcache_t *tcache;
tcaches_t *elm;
@@ -431,18 +431,22 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
if (tcaches == NULL) {
tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *)
* (MALLOCX_TCACHE_MAX+1), CACHELINE);
- if (tcaches == NULL)
+ if (tcaches == NULL) {
return (true);
+ }
}
- if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
+ if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) {
return (true);
+ }
arena = arena_ichoose(tsd, NULL);
- if (unlikely(arena == NULL))
+ if (unlikely(arena == NULL)) {
return (true);
+ }
tcache = tcache_create(tsd_tsdn(tsd), arena);
- if (tcache == NULL)
+ if (tcache == NULL) {
return (true);
+ }
if (tcaches_avail != NULL) {
elm = tcaches_avail;
@@ -460,23 +464,21 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
}
static void
-tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm)
-{
- if (elm->tcache == NULL)
+tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm) {
+ if (elm->tcache == NULL) {
return;
+ }
tcache_destroy(tsd, elm->tcache);
elm->tcache = NULL;
}
void
-tcaches_flush(tsd_t *tsd, unsigned ind)
-{
+tcaches_flush(tsd_t *tsd, unsigned ind) {
tcaches_elm_flush(tsd, &tcaches[ind]);
}
void
-tcaches_destroy(tsd_t *tsd, unsigned ind)
-{
+tcaches_destroy(tsd_t *tsd, unsigned ind) {
tcaches_t *elm = &tcaches[ind];
tcaches_elm_flush(tsd, elm);
elm->next = tcaches_avail;
@@ -484,23 +486,25 @@ tcaches_destroy(tsd_t *tsd, unsigned ind)
}
bool
-tcache_boot(tsdn_t *tsdn)
-{
+tcache_boot(tsdn_t *tsdn) {
unsigned i;
/* If necessary, clamp opt_lg_tcache_max. */
- if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < SMALL_MAXCLASS)
+ if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) <
+ SMALL_MAXCLASS) {
tcache_maxclass = SMALL_MAXCLASS;
- else
+ } else {
tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
+ }
nhbins = size2index(tcache_maxclass) + 1;
/* Initialize tcache_bin_info. */
tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins
* sizeof(tcache_bin_info_t), CACHELINE);
- if (tcache_bin_info == NULL)
+ if (tcache_bin_info == NULL) {
return (true);
+ }
stack_nelms = 0;
for (i = 0; i < NBINS; i++) {
if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
diff --git a/src/tsd.c b/src/tsd.c
index b4d7e79..f02fc28 100644
--- a/src/tsd.c
+++ b/src/tsd.c
@@ -12,20 +12,17 @@ malloc_tsd_data(, , tsd_t, TSD_INITIALIZER)
/******************************************************************************/
void *
-malloc_tsd_malloc(size_t size)
-{
+malloc_tsd_malloc(size_t size) {
return (a0malloc(CACHELINE_CEILING(size)));
}
void
-malloc_tsd_dalloc(void *wrapper)
-{
+malloc_tsd_dalloc(void *wrapper) {
a0dalloc(wrapper);
}
void
-malloc_tsd_no_cleanup(void *arg)
-{
+malloc_tsd_no_cleanup(void *arg) {
not_reached();
}
@@ -34,21 +31,22 @@ malloc_tsd_no_cleanup(void *arg)
JEMALLOC_EXPORT
#endif
void
-_malloc_thread_cleanup(void)
-{
+_malloc_thread_cleanup(void) {
bool pending[MALLOC_TSD_CLEANUPS_MAX], again;
unsigned i;
- for (i = 0; i < ncleanups; i++)
+ for (i = 0; i < ncleanups; i++) {
pending[i] = true;
+ }
do {
again = false;
for (i = 0; i < ncleanups; i++) {
if (pending[i]) {
pending[i] = cleanups[i]();
- if (pending[i])
+ if (pending[i]) {
again = true;
+ }
}
}
} while (again);
@@ -56,16 +54,14 @@ _malloc_thread_cleanup(void)
#endif
void
-malloc_tsd_cleanup_register(bool (*f)(void))
-{
+malloc_tsd_cleanup_register(bool (*f)(void)) {
assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX);
cleanups[ncleanups] = f;
ncleanups++;
}
void
-tsd_cleanup(void *arg)
-{
+tsd_cleanup(void *arg) {
tsd_t *tsd = (tsd_t *)arg;
switch (tsd->state) {
@@ -108,29 +104,27 @@ MALLOC_TSD
}
tsd_t *
-malloc_tsd_boot0(void)
-{
+malloc_tsd_boot0(void) {
tsd_t *tsd;
ncleanups = 0;
- if (tsd_boot0())
+ if (tsd_boot0()) {
return (NULL);
+ }
tsd = tsd_fetch();
*tsd_arenas_tdata_bypassp_get(tsd) = true;
return (tsd);
}
void
-malloc_tsd_boot1(void)
-{
+malloc_tsd_boot1(void) {
tsd_boot1();
*tsd_arenas_tdata_bypassp_get(tsd_fetch()) = false;
}
#ifdef _WIN32
static BOOL WINAPI
-_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
-{
+_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) {
switch (fdwReason) {
#ifdef JEMALLOC_LAZY_LOCK
case DLL_THREAD_ATTACH:
@@ -164,8 +158,7 @@ BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
void *
-tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
-{
+tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) {
pthread_t self = pthread_self();
tsd_init_block_t *iter;
@@ -186,8 +179,7 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
}
void
-tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block)
-{
+tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) {
malloc_mutex_lock(TSDN_NULL, &head->lock);
ql_remove(&head->blocks, block, link);
malloc_mutex_unlock(TSDN_NULL, &head->lock);
diff --git a/src/util.c b/src/util.c
index c6ac4e1..a959539 100644
--- a/src/util.c
+++ b/src/util.c
@@ -46,8 +46,7 @@ static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
/* malloc_message() setup. */
static void
-wrtmessage(void *cbopaque, const char *s)
-{
+wrtmessage(void *cbopaque, const char *s) {
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
/*
* Use syscall(2) rather than write(2) when possible in order to avoid
@@ -71,12 +70,12 @@ JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);
* je_malloc_message(...) throughout the code.
*/
void
-malloc_write(const char *s)
-{
- if (je_malloc_message != NULL)
+malloc_write(const char *s) {
+ if (je_malloc_message != NULL) {
je_malloc_message(NULL, s);
- else
+ } else {
wrtmessage(NULL, s);
+ }
}
/*
@@ -84,8 +83,7 @@ malloc_write(const char *s)
* provide a wrapper.
*/
int
-buferror(int err, char *buf, size_t buflen)
-{
+buferror(int err, char *buf, size_t buflen) {
#ifdef _WIN32
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0,
(LPSTR)buf, (DWORD)buflen, NULL);
@@ -103,8 +101,7 @@ buferror(int err, char *buf, size_t buflen)
}
uintmax_t
-malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
-{
+malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) {
uintmax_t ret, digit;
unsigned b;
bool neg;
@@ -149,10 +146,12 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
switch (p[1]) {
case '0': case '1': case '2': case '3': case '4': case '5':
case '6': case '7':
- if (b == 0)
+ if (b == 0) {
b = 8;
- if (b == 8)
+ }
+ if (b == 8) {
p++;
+ }
break;
case 'X': case 'x':
switch (p[2]) {
@@ -162,10 +161,12 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
case 'F':
case 'a': case 'b': case 'c': case 'd': case 'e':
case 'f':
- if (b == 0)
+ if (b == 0) {
b = 16;
- if (b == 16)
+ }
+ if (b == 16) {
p += 2;
+ }
break;
default:
break;
@@ -177,8 +178,9 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
goto label_return;
}
}
- if (b == 0)
+ if (b == 0) {
b = 10;
+ }
/* Convert. */
ret = 0;
@@ -196,8 +198,9 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
}
p++;
}
- if (neg)
+ if (neg) {
ret = (uintmax_t)(-((intmax_t)ret));
+ }
if (p == ns) {
/* No conversion performed. */
@@ -211,15 +214,15 @@ label_return:
if (p == ns) {
/* No characters were converted. */
*endptr = (char *)nptr;
- } else
+ } else {
*endptr = (char *)p;
+ }
}
return (ret);
}
static char *
-u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p)
-{
+u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) {
unsigned i;
i = U2S_BUFSIZE - 1;
@@ -261,19 +264,21 @@ u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p)
}
static char *
-d2s(intmax_t x, char sign, char *s, size_t *slen_p)
-{
+d2s(intmax_t x, char sign, char *s, size_t *slen_p) {
bool neg;
- if ((neg = (x < 0)))
+ if ((neg = (x < 0))) {
x = -x;
+ }
s = u2s(x, 10, false, s, slen_p);
- if (neg)
+ if (neg) {
sign = '-';
+ }
switch (sign) {
case '-':
- if (!neg)
+ if (!neg) {
break;
+ }
/* Fall through. */
case ' ':
case '+':
@@ -287,8 +292,7 @@ d2s(intmax_t x, char sign, char *s, size_t *slen_p)
}
static char *
-o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p)
-{
+o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) {
s = u2s(x, 8, false, s, slen_p);
if (alt_form && *s != '0') {
s--;
@@ -299,8 +303,7 @@ o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p)
}
static char *
-x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p)
-{
+x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) {
s = u2s(x, 16, uppercase, s, slen_p);
if (alt_form) {
s -= 2;
@@ -311,14 +314,14 @@ x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p)
}
size_t
-malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
-{
+malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
size_t i;
const char *f;
#define APPEND_C(c) do { \
- if (i < size) \
+ if (i < size) { \
str[i] = (c); \
+ } \
i++; \
} while (0)
#define APPEND_S(s, slen) do { \
@@ -334,16 +337,18 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
(size_t)width - slen : 0); \
if (!left_justify && pad_len != 0) { \
size_t j; \
- for (j = 0; j < pad_len; j++) \
+ for (j = 0; j < pad_len; j++) { \
APPEND_C(' '); \
+ } \
} \
/* Value. */ \
APPEND_S(s, slen); \
/* Right padding. */ \
if (left_justify && pad_len != 0) { \
size_t j; \
- for (j = 0; j < pad_len; j++) \
+ for (j = 0; j < pad_len; j++) { \
APPEND_C(' '); \
+ } \
} \
} while (0)
#define GET_ARG_NUMERIC(val, len) do { \
@@ -454,10 +459,11 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
break;
}
/* Width/precision separator. */
- if (*f == '.')
+ if (*f == '.') {
f++;
- else
+ } else {
goto label_length;
+ }
/* Precision. */
switch (*f) {
case '*':
@@ -484,8 +490,9 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
if (*f == 'l') {
len = 'q';
f++;
- } else
+ } else {
len = 'l';
+ }
break;
case 'q': case 'j': case 't': case 'z':
len = *f;
@@ -576,10 +583,11 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
}}
}
label_out:
- if (i < size)
+ if (i < size) {
str[i] = '\0';
- else
+ } else {
str[size - 1] = '\0';
+ }
#undef APPEND_C
#undef APPEND_S
@@ -590,8 +598,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
JEMALLOC_FORMAT_PRINTF(3, 4)
size_t
-malloc_snprintf(char *str, size_t size, const char *format, ...)
-{
+malloc_snprintf(char *str, size_t size, const char *format, ...) {
size_t ret;
va_list ap;
@@ -604,8 +611,7 @@ malloc_snprintf(char *str, size_t size, const char *format, ...)
void
malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *format, va_list ap)
-{
+ const char *format, va_list ap) {
char buf[MALLOC_PRINTF_BUFSIZE];
if (write_cb == NULL) {
@@ -630,8 +636,7 @@ malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
JEMALLOC_FORMAT_PRINTF(3, 4)
void
malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *format, ...)
-{
+ const char *format, ...) {
va_list ap;
va_start(ap, format);
@@ -642,8 +647,7 @@ malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
/* Print to stderr in such a way as to avoid memory allocation. */
JEMALLOC_FORMAT_PRINTF(1, 2)
void
-malloc_printf(const char *format, ...)
-{
+malloc_printf(const char *format, ...) {
va_list ap;
va_start(ap, format);
diff --git a/src/witness.c b/src/witness.c
index ffc7e24..f8d6621 100644
--- a/src/witness.c
+++ b/src/witness.c
@@ -3,8 +3,7 @@
void
witness_init(witness_t *witness, const char *name, witness_rank_t rank,
- witness_comp_t *comp, void *opaque)
-{
+ witness_comp_t *comp, void *opaque) {
witness->name = name;
witness->rank = rank;
witness->comp = comp;
@@ -16,8 +15,7 @@ witness_init(witness_t *witness, const char *name, witness_rank_t rank,
#define witness_lock_error JEMALLOC_N(n_witness_lock_error)
#endif
void
-witness_lock_error(const witness_list_t *witnesses, const witness_t *witness)
-{
+witness_lock_error(const witness_list_t *witnesses, const witness_t *witness) {
witness_t *w;
malloc_printf("<jemalloc>: Lock rank order reversal:");
@@ -38,8 +36,7 @@ witness_lock_error_t *witness_lock_error = JEMALLOC_N(n_witness_lock_error);
#define witness_owner_error JEMALLOC_N(n_witness_owner_error)
#endif
void
-witness_owner_error(const witness_t *witness)
-{
+witness_owner_error(const witness_t *witness) {
malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name,
witness->rank);
abort();
@@ -55,8 +52,7 @@ witness_owner_error_t *witness_owner_error = JEMALLOC_N(n_witness_owner_error);
#define witness_not_owner_error JEMALLOC_N(n_witness_not_owner_error)
#endif
void
-witness_not_owner_error(const witness_t *witness)
-{
+witness_not_owner_error(const witness_t *witness) {
malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name,
witness->rank);
abort();
@@ -73,8 +69,7 @@ witness_not_owner_error_t *witness_not_owner_error =
#define witness_lockless_error JEMALLOC_N(n_witness_lockless_error)
#endif
void
-witness_lockless_error(const witness_list_t *witnesses)
-{
+witness_lockless_error(const witness_list_t *witnesses) {
witness_t *w;
malloc_printf("<jemalloc>: Should not own any locks:");
@@ -92,28 +87,24 @@ witness_lockless_error_t *witness_lockless_error =
#endif
void
-witnesses_cleanup(tsd_t *tsd)
-{
+witnesses_cleanup(tsd_t *tsd) {
witness_assert_lockless(tsd_tsdn(tsd));
/* Do nothing. */
}
void
-witness_prefork(tsd_t *tsd)
-{
+witness_prefork(tsd_t *tsd) {
tsd_witness_fork_set(tsd, true);
}
void
-witness_postfork_parent(tsd_t *tsd)
-{
+witness_postfork_parent(tsd_t *tsd) {
tsd_witness_fork_set(tsd, false);
}
void
-witness_postfork_child(tsd_t *tsd)
-{
+witness_postfork_child(tsd_t *tsd) {
#ifndef JEMALLOC_MUTEX_INIT_CB
witness_list_t *witnesses;
diff --git a/src/zone.c b/src/zone.c
index c54f4a4..8e10663 100644
--- a/src/zone.c
+++ b/src/zone.c
@@ -125,8 +125,7 @@ static void zone_reinit_lock(malloc_zone_t *zone);
*/
static size_t
-zone_size(malloc_zone_t *zone, const void *ptr)
-{
+zone_size(malloc_zone_t *zone, const void *ptr) {
/*
* There appear to be places within Darwin (such as setenv(3)) that
* cause calls to this function with pointers that *no* zone owns. If
@@ -140,20 +139,17 @@ zone_size(malloc_zone_t *zone, const void *ptr)
}
static void *
-zone_malloc(malloc_zone_t *zone, size_t size)
-{
+zone_malloc(malloc_zone_t *zone, size_t size) {
return (je_malloc(size));
}
static void *
-zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
-{
+zone_calloc(malloc_zone_t *zone, size_t num, size_t size) {
return (je_calloc(num, size));
}
static void *
-zone_valloc(malloc_zone_t *zone, size_t size)
-{
+zone_valloc(malloc_zone_t *zone, size_t size) {
void *ret = NULL; /* Assignment avoids useless compiler warning. */
je_posix_memalign(&ret, PAGE, size);
@@ -162,8 +158,7 @@ zone_valloc(malloc_zone_t *zone, size_t size)
}
static void
-zone_free(malloc_zone_t *zone, void *ptr)
-{
+zone_free(malloc_zone_t *zone, void *ptr) {
if (ivsalloc(tsdn_fetch(), ptr) != 0) {
je_free(ptr);
return;
@@ -173,17 +168,16 @@ zone_free(malloc_zone_t *zone, void *ptr)
}
static void *
-zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
-{
- if (ivsalloc(tsdn_fetch(), ptr) != 0)
+zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
+ if (ivsalloc(tsdn_fetch(), ptr) != 0) {
return (je_realloc(ptr, size));
+ }
return (realloc(ptr, size));
}
static void *
-zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
-{
+zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) {
void *ret = NULL; /* Assignment avoids useless compiler warning. */
je_posix_memalign(&ret, alignment, size);
@@ -192,8 +186,7 @@ zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
}
static void
-zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
-{
+zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) {
size_t alloc_size;
alloc_size = ivsalloc(tsdn_fetch(), ptr);
@@ -207,16 +200,14 @@ zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
}
static void
-zone_destroy(malloc_zone_t *zone)
-{
+zone_destroy(malloc_zone_t *zone) {
/* This function should never be called. */
not_reached();
}
static unsigned
zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results,
- unsigned num_requested)
-{
+ unsigned num_requested) {
unsigned i;
for (i = 0; i < num_requested; i++) {
@@ -230,8 +221,7 @@ zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results,
static void
zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed,
- unsigned num_to_be_freed)
-{
+ unsigned num_to_be_freed) {
unsigned i;
for (i = 0; i < num_to_be_freed; i++) {
@@ -241,53 +231,47 @@ zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed,
}
static size_t
-zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal)
-{
+zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) {
return 0;
}
static size_t
-zone_good_size(malloc_zone_t *zone, size_t size)
-{
- if (size == 0)
+zone_good_size(malloc_zone_t *zone, size_t size) {
+ if (size == 0) {
size = 1;
+ }
return (s2u(size));
}
static kern_return_t
zone_enumerator(task_t task, void *data, unsigned type_mask,
vm_address_t zone_address, memory_reader_t reader,
- vm_range_recorder_t recorder)
-{
+ vm_range_recorder_t recorder) {
return KERN_SUCCESS;
}
static boolean_t
-zone_check(malloc_zone_t *zone)
-{
+zone_check(malloc_zone_t *zone) {
return true;
}
static void
-zone_print(malloc_zone_t *zone, boolean_t verbose)
-{
+zone_print(malloc_zone_t *zone, boolean_t verbose) {
}
static void
-zone_log(malloc_zone_t *zone, void *address)
-{
+zone_log(malloc_zone_t *zone, void *address) {
}
static void
-zone_force_lock(malloc_zone_t *zone)
-{
- if (isthreaded)
+zone_force_lock(malloc_zone_t *zone) {
+ if (isthreaded) {
jemalloc_prefork();
+ }
}
static void
-zone_force_unlock(malloc_zone_t *zone)
-{
+zone_force_unlock(malloc_zone_t *zone) {
/*
* Call jemalloc_postfork_child() rather than
* jemalloc_postfork_parent(), because this function is executed by both
@@ -295,13 +279,13 @@ zone_force_unlock(malloc_zone_t *zone)
* reinitialized, but the child cannot unlock mutexes that were locked
* by the parent.
*/
- if (isthreaded)
+ if (isthreaded) {
jemalloc_postfork_child();
+ }
}
static void
-zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats)
-{
+zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
/* We make no effort to actually fill the values */
stats->blocks_in_use = 0;
stats->size_in_use = 0;
@@ -310,23 +294,20 @@ zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats)
}
static boolean_t
-zone_locked(malloc_zone_t *zone)
-{
+zone_locked(malloc_zone_t *zone) {
/* Pretend no lock is being held */
return false;
}
static void
-zone_reinit_lock(malloc_zone_t *zone)
-{
+zone_reinit_lock(malloc_zone_t *zone) {
/* As of OSX 10.12, this function is only used when force_unlock would
* be used if the zone version were < 9. So just use force_unlock. */
zone_force_unlock(zone);
}
static void
-zone_init(void)
-{
+zone_init(void) {
jemalloc_zone.size = zone_size;
jemalloc_zone.malloc = zone_malloc;
jemalloc_zone.calloc = zone_calloc;
@@ -364,8 +345,7 @@ zone_init(void)
}
static malloc_zone_t *
-zone_default_get(void)
-{
+zone_default_get(void) {
malloc_zone_t **zones = NULL;
unsigned int num_zones = 0;
@@ -387,16 +367,16 @@ zone_default_get(void)
num_zones = 0;
}
- if (num_zones)
+ if (num_zones) {
return (zones[0]);
+ }
return (malloc_default_zone());
}
/* As written, this function can only promote jemalloc_zone. */
static void
-zone_promote(void)
-{
+zone_promote(void) {
malloc_zone_t *zone;
do {
@@ -433,16 +413,16 @@ zone_promote(void)
JEMALLOC_ATTR(constructor)
void
-zone_register(void)
-{
+zone_register(void) {
/*
* If something else replaced the system default zone allocator, don't
* register jemalloc's.
*/
default_zone = zone_default_get();
if (!default_zone->zone_name || strcmp(default_zone->zone_name,
- "DefaultMallocZone") != 0)
+ "DefaultMallocZone") != 0) {
return;
+ }
/*
* The default purgeable zone is created lazily by OSX's libc. It uses
diff --git a/test/include/test/SFMT.h b/test/include/test/SFMT.h
index 09c1607..4ad7484 100644
--- a/test/include/test/SFMT.h
+++ b/test/include/test/SFMT.h
@@ -97,75 +97,65 @@ double genrand_res53_mix(sfmt_t *ctx);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(SFMT_C_))
/* These real versions are due to Isaku Wada */
/** generates a random number on [0,1]-real-interval */
-JEMALLOC_INLINE double to_real1(uint32_t v)
-{
+JEMALLOC_INLINE double to_real1(uint32_t v) {
return v * (1.0/4294967295.0);
/* divided by 2^32-1 */
}
/** generates a random number on [0,1]-real-interval */
-JEMALLOC_INLINE double genrand_real1(sfmt_t *ctx)
-{
+JEMALLOC_INLINE double genrand_real1(sfmt_t *ctx) {
return to_real1(gen_rand32(ctx));
}
/** generates a random number on [0,1)-real-interval */
-JEMALLOC_INLINE double to_real2(uint32_t v)
-{
+JEMALLOC_INLINE double to_real2(uint32_t v) {
return v * (1.0/4294967296.0);
/* divided by 2^32 */
}
/** generates a random number on [0,1)-real-interval */
-JEMALLOC_INLINE double genrand_real2(sfmt_t *ctx)
-{
+JEMALLOC_INLINE double genrand_real2(sfmt_t *ctx) {
return to_real2(gen_rand32(ctx));
}
/** generates a random number on (0,1)-real-interval */
-JEMALLOC_INLINE double to_real3(uint32_t v)
-{
+JEMALLOC_INLINE double to_real3(uint32_t v) {
return (((double)v) + 0.5)*(1.0/4294967296.0);
/* divided by 2^32 */
}
/** generates a random number on (0,1)-real-interval */
-JEMALLOC_INLINE double genrand_real3(sfmt_t *ctx)
-{
+JEMALLOC_INLINE double genrand_real3(sfmt_t *ctx) {
return to_real3(gen_rand32(ctx));
}
/** These real versions are due to Isaku Wada */
/** generates a random number on [0,1) with 53-bit resolution*/
-JEMALLOC_INLINE double to_res53(uint64_t v)
-{
+JEMALLOC_INLINE double to_res53(uint64_t v) {
return v * (1.0/18446744073709551616.0L);
}
/** generates a random number on [0,1) with 53-bit resolution from two
* 32 bit integers */
-JEMALLOC_INLINE double to_res53_mix(uint32_t x, uint32_t y)
-{
+JEMALLOC_INLINE double to_res53_mix(uint32_t x, uint32_t y) {
return to_res53(x | ((uint64_t)y << 32));
}
/** generates a random number on [0,1) with 53-bit resolution
*/
-JEMALLOC_INLINE double genrand_res53(sfmt_t *ctx)
-{
+JEMALLOC_INLINE double genrand_res53(sfmt_t *ctx) {
return to_res53(gen_rand64(ctx));
-}
+}
/** generates a random number on [0,1) with 53-bit resolution
using 32bit integer.
*/
-JEMALLOC_INLINE double genrand_res53_mix(sfmt_t *ctx)
-{
+JEMALLOC_INLINE double genrand_res53_mix(sfmt_t *ctx) {
uint32_t x, y;
x = gen_rand32(ctx);
y = gen_rand32(ctx);
return to_res53_mix(x, y);
-}
+}
#endif
#endif
diff --git a/test/include/test/btalloc.h b/test/include/test/btalloc.h
index c3f9d4d..98366af 100644
--- a/test/include/test/btalloc.h
+++ b/test/include/test/btalloc.h
@@ -8,13 +8,12 @@ btalloc_n_proto(1)
#define btalloc_n_gen(n) \
void * \
-btalloc_##n(size_t size, unsigned bits) \
-{ \
+btalloc_##n(size_t size, unsigned bits) { \
void *p; \
\
- if (bits == 0) \
+ if (bits == 0) { \
p = mallocx(size, 0); \
- else { \
+ } else { \
switch (bits & 0x1U) { \
case 0: \
p = (btalloc_0(size, bits >> 1)); \
diff --git a/test/include/test/extent_hooks.h b/test/include/test/extent_hooks.h
index f50747d..a664c43 100644
--- a/test/include/test/extent_hooks.h
+++ b/test/include/test/extent_hooks.h
@@ -73,8 +73,7 @@ static bool did_merge;
static void *
extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit, unsigned arena_ind)
-{
+ size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
void *ret;
TRACE_HOOK("%s(extent_hooks=%p, new_addr=%p, size=%zu, alignment=%zu, "
@@ -86,8 +85,9 @@ extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
assert_ptr_eq(extent_hooks->alloc, extent_alloc_hook,
"Wrong hook function");
called_alloc = true;
- if (!try_alloc)
+ if (!try_alloc) {
return (NULL);
+ }
ret = default_hooks->alloc(default_hooks, new_addr, size, alignment,
zero, commit, 0);
did_alloc = (ret != NULL);
@@ -96,8 +96,7 @@ extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
static bool
extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
- bool committed, unsigned arena_ind)
-{
+ bool committed, unsigned arena_ind) {
bool err;
TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, "
@@ -108,8 +107,9 @@ extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
assert_ptr_eq(extent_hooks->dalloc, extent_dalloc_hook,
"Wrong hook function");
called_dalloc = true;
- if (!try_dalloc)
+ if (!try_dalloc) {
return (true);
+ }
err = default_hooks->dalloc(default_hooks, addr, size, committed, 0);
did_dalloc = !err;
return (err);
@@ -117,8 +117,7 @@ extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
static bool
extent_commit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
- size_t offset, size_t length, unsigned arena_ind)
-{
+ size_t offset, size_t length, unsigned arena_ind) {
bool err;
TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
@@ -129,8 +128,9 @@ extent_commit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
assert_ptr_eq(extent_hooks->commit, extent_commit_hook,
"Wrong hook function");
called_commit = true;
- if (!try_commit)
+ if (!try_commit) {
return (true);
+ }
err = default_hooks->commit(default_hooks, addr, size, offset, length,
0);
did_commit = !err;
@@ -139,8 +139,7 @@ extent_commit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
static bool
extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
- size_t offset, size_t length, unsigned arena_ind)
-{
+ size_t offset, size_t length, unsigned arena_ind) {
bool err;
TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
@@ -151,8 +150,9 @@ extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
assert_ptr_eq(extent_hooks->decommit, extent_decommit_hook,
"Wrong hook function");
called_decommit = true;
- if (!try_decommit)
+ if (!try_decommit) {
return (true);
+ }
err = default_hooks->decommit(default_hooks, addr, size, offset, length,
0);
did_decommit = !err;
@@ -161,8 +161,7 @@ extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
static bool
extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
- size_t offset, size_t length, unsigned arena_ind)
-{
+ size_t offset, size_t length, unsigned arena_ind) {
bool err;
TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
@@ -173,8 +172,9 @@ extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
assert_ptr_eq(extent_hooks->purge_lazy, extent_purge_lazy_hook,
"Wrong hook function");
called_purge_lazy = true;
- if (!try_purge_lazy)
+ if (!try_purge_lazy) {
return (true);
+ }
err = default_hooks->purge_lazy == NULL ||
default_hooks->purge_lazy(default_hooks, addr, size, offset, length,
0);
@@ -184,8 +184,7 @@ extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
static bool
extent_purge_forced_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
- size_t offset, size_t length, unsigned arena_ind)
-{
+ size_t offset, size_t length, unsigned arena_ind) {
bool err;
TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
@@ -196,8 +195,9 @@ extent_purge_forced_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
assert_ptr_eq(extent_hooks->purge_forced, extent_purge_forced_hook,
"Wrong hook function");
called_purge_forced = true;
- if (!try_purge_forced)
+ if (!try_purge_forced) {
return (true);
+ }
err = default_hooks->purge_forced == NULL ||
default_hooks->purge_forced(default_hooks, addr, size, offset,
length, 0);
@@ -207,8 +207,7 @@ extent_purge_forced_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
static bool
extent_split_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
- size_t size_a, size_t size_b, bool committed, unsigned arena_ind)
-{
+ size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
bool err;
TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, size_a=%zu, "
@@ -220,8 +219,9 @@ extent_split_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
assert_ptr_eq(extent_hooks->split, extent_split_hook,
"Wrong hook function");
called_split = true;
- if (!try_split)
+ if (!try_split) {
return (true);
+ }
err = (default_hooks->split == NULL ||
default_hooks->split(default_hooks, addr, size, size_a, size_b,
committed, 0));
@@ -231,8 +231,7 @@ extent_split_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
static bool
extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
- void *addr_b, size_t size_b, bool committed, unsigned arena_ind)
-{
+ void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
bool err;
TRACE_HOOK("%s(extent_hooks=%p, addr_a=%p, size_a=%zu, addr_b=%p "
@@ -244,8 +243,9 @@ extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
assert_ptr_eq(extent_hooks->merge, extent_merge_hook,
"Wrong hook function");
called_merge = true;
- if (!try_merge)
+ if (!try_merge) {
return (true);
+ }
err = (default_hooks->merge == NULL ||
default_hooks->merge(default_hooks, addr_a, size_a, addr_b, size_b,
committed, 0));
@@ -254,8 +254,7 @@ extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
}
static void
-extent_hooks_prep(void)
-{
+extent_hooks_prep(void) {
size_t sz;
sz = sizeof(default_hooks);
diff --git a/test/include/test/jemalloc_test.h.in b/test/include/test/jemalloc_test.h.in
index 2dd0cde..a0b9474 100644
--- a/test/include/test/jemalloc_test.h.in
+++ b/test/include/test/jemalloc_test.h.in
@@ -159,8 +159,9 @@ static const bool config_debug =
} while (0)
#define assert_not_implemented(e) do { \
- if (!(e)) \
+ if (!(e)) { \
not_implemented(); \
+ } \
} while (0)
#ifdef __cplusplus
diff --git a/test/include/test/math.h b/test/include/test/math.h
index 1728d60..08be69f 100644
--- a/test/include/test/math.h
+++ b/test/include/test/math.h
@@ -16,8 +16,7 @@ double pt_gamma(double p, double shape, double scale, double ln_gamma_shape);
* [S14]. Communications of the ACM 9(9):684.
*/
JEMALLOC_INLINE double
-ln_gamma(double x)
-{
+ln_gamma(double x) {
double f, z;
assert(x > 0.0);
@@ -31,8 +30,9 @@ ln_gamma(double x)
}
x = z;
f = -log(f);
- } else
+ } else {
f = 0.0;
+ }
z = 1.0 / (x * x);
@@ -51,8 +51,7 @@ ln_gamma(double x)
* Applied Statistics 19:285-287.
*/
JEMALLOC_INLINE double
-i_gamma(double x, double p, double ln_gamma_p)
-{
+i_gamma(double x, double p, double ln_gamma_p) {
double acu, factor, oflo, gin, term, rn, a, b, an, dif;
double pn[6];
unsigned i;
@@ -60,8 +59,9 @@ i_gamma(double x, double p, double ln_gamma_p)
assert(p > 0.0);
assert(x >= 0.0);
- if (x == 0.0)
+ if (x == 0.0) {
return (0.0);
+ }
acu = 1.0e-10;
oflo = 1.0e30;
@@ -99,8 +99,9 @@ i_gamma(double x, double p, double ln_gamma_p)
b += 2.0;
term += 1.0;
an = a * term;
- for (i = 0; i < 2; i++)
+ for (i = 0; i < 2; i++) {
pn[i+4] = b * pn[i+2] - an * pn[i];
+ }
if (pn[5] != 0.0) {
rn = pn[4] / pn[5];
dif = fabs(gin - rn);
@@ -110,12 +111,14 @@ i_gamma(double x, double p, double ln_gamma_p)
}
gin = rn;
}
- for (i = 0; i < 4; i++)
+ for (i = 0; i < 4; i++) {
pn[i] = pn[i+2];
+ }
if (fabs(pn[4]) >= oflo) {
- for (i = 0; i < 4; i++)
+ for (i = 0; i < 4; i++) {
pn[i] /= oflo;
+ }
}
}
}
@@ -132,8 +135,7 @@ i_gamma(double x, double p, double ln_gamma_p)
* distribution. Applied Statistics 37(3):477-484.
*/
JEMALLOC_INLINE double
-pt_norm(double p)
-{
+pt_norm(double p) {
double q, r, ret;
assert(p > 0.0 && p < 1.0);
@@ -153,10 +155,11 @@ pt_norm(double p)
r + 6.8718700749205790830e2) * r + 4.2313330701600911252e1)
* r + 1.0));
} else {
- if (q < 0.0)
+ if (q < 0.0) {
r = p;
- else
+ } else {
r = 1.0 - p;
+ }
assert(r > 0.0);
r = sqrt(-log(r));
@@ -198,8 +201,9 @@ pt_norm(double p)
5.99832206555887937690e-1)
* r + 1.0));
}
- if (q < 0.0)
+ if (q < 0.0) {
ret = -ret;
+ }
return (ret);
}
}
@@ -219,8 +223,7 @@ pt_norm(double p)
* points of the Chi^2 distribution. Applied Statistics 40(1):233-235.
*/
JEMALLOC_INLINE double
-pt_chi2(double p, double df, double ln_gamma_df_2)
-{
+pt_chi2(double p, double df, double ln_gamma_df_2) {
double e, aa, xx, c, ch, a, q, p1, p2, t, x, b, s1, s2, s3, s4, s5, s6;
unsigned i;
@@ -236,8 +239,9 @@ pt_chi2(double p, double df, double ln_gamma_df_2)
if (df < -1.24 * log(p)) {
/* Starting approximation for small Chi^2. */
ch = pow(p * xx * exp(ln_gamma_df_2 + xx * aa), 1.0 / xx);
- if (ch - e < 0.0)
+ if (ch - e < 0.0) {
return (ch);
+ }
} else {
if (df > 0.32) {
x = pt_norm(p);
@@ -263,8 +267,9 @@ pt_chi2(double p, double df, double ln_gamma_df_2)
* (13.32 + 3.0 * ch)) / p2;
ch -= (1.0 - exp(a + ln_gamma_df_2 + 0.5 * ch +
c * aa) * p2 / p1) / t;
- if (fabs(q / ch - 1.0) - 0.01 <= 0.0)
+ if (fabs(q / ch - 1.0) - 0.01 <= 0.0) {
break;
+ }
}
}
}
@@ -273,8 +278,9 @@ pt_chi2(double p, double df, double ln_gamma_df_2)
/* Calculation of seven-term Taylor series. */
q = ch;
p1 = 0.5 * ch;
- if (p1 < 0.0)
+ if (p1 < 0.0) {
return (-1.0);
+ }
p2 = p - i_gamma(p1, xx, ln_gamma_df_2);
t = p2 * exp(xx * aa + ln_gamma_df_2 + p1 - c * log(ch));
b = t / ch;
@@ -290,8 +296,9 @@ pt_chi2(double p, double df, double ln_gamma_df_2)
s6 = (120.0 + c * (346.0 + 127.0 * c)) / 5040.0;
ch += t * (1.0 + 0.5 * t * s1 - b * c * (s1 - b * (s2 - b * (s3
- b * (s4 - b * (s5 - b * s6))))));
- if (fabs(q / ch - 1.0) <= e)
+ if (fabs(q / ch - 1.0) <= e) {
break;
+ }
}
return (ch);
@@ -303,8 +310,7 @@ pt_chi2(double p, double df, double ln_gamma_df_2)
* p.
*/
JEMALLOC_INLINE double
-pt_gamma(double p, double shape, double scale, double ln_gamma_shape)
-{
+pt_gamma(double p, double shape, double scale, double ln_gamma_shape) {
return (pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale);
}
#endif
diff --git a/test/include/test/mq.h b/test/include/test/mq.h
index a974eb9..fd66de9 100644
--- a/test/include/test/mq.h
+++ b/test/include/test/mq.h
@@ -37,20 +37,19 @@ typedef struct { \
a_attr bool \
a_prefix##init(a_mq_type *mq) { \
\
- if (mtx_init(&mq->lock)) \
+ if (mtx_init(&mq->lock)) { \
return (true); \
+ } \
ql_new(&mq->msgs); \
mq->count = 0; \
return (false); \
} \
a_attr void \
-a_prefix##fini(a_mq_type *mq) \
-{ \
+a_prefix##fini(a_mq_type *mq) { \
mtx_fini(&mq->lock); \
} \
a_attr unsigned \
-a_prefix##count(a_mq_type *mq) \
-{ \
+a_prefix##count(a_mq_type *mq) { \
unsigned count; \
\
mtx_lock(&mq->lock); \
@@ -59,8 +58,7 @@ a_prefix##count(a_mq_type *mq) \
return (count); \
} \
a_attr a_mq_msg_type * \
-a_prefix##tryget(a_mq_type *mq) \
-{ \
+a_prefix##tryget(a_mq_type *mq) { \
a_mq_msg_type *msg; \
\
mtx_lock(&mq->lock); \
@@ -73,32 +71,33 @@ a_prefix##tryget(a_mq_type *mq) \
return (msg); \
} \
a_attr a_mq_msg_type * \
-a_prefix##get(a_mq_type *mq) \
-{ \
+a_prefix##get(a_mq_type *mq) { \
a_mq_msg_type *msg; \
unsigned ns; \
\
msg = a_prefix##tryget(mq); \
- if (msg != NULL) \
+ if (msg != NULL) { \
return (msg); \
+ } \
\
ns = 1; \
while (true) { \
mq_nanosleep(ns); \
msg = a_prefix##tryget(mq); \
- if (msg != NULL) \
+ if (msg != NULL) { \
return (msg); \
+ } \
if (ns < 1000*1000*1000) { \
/* Double sleep time, up to max 1 second. */ \
ns <<= 1; \
- if (ns > 1000*1000*1000) \
+ if (ns > 1000*1000*1000) { \
ns = 1000*1000*1000; \
+ } \
} \
} \
} \
a_attr void \
-a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) \
-{ \
+a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) { \
\
mtx_lock(&mq->lock); \
ql_elm_new(msg, a_field); \
diff --git a/test/include/test/test.h b/test/include/test/test.h
index 8c69fc2..a1b6f72 100644
--- a/test/include/test/test.h
+++ b/test/include/test/test.h
@@ -298,8 +298,7 @@ typedef void (test_t)(void);
#define TEST_BEGIN(f) \
static void \
-f(void) \
-{ \
+f(void) { \
p_test_init(#f);
#define TEST_END \
diff --git a/test/integration/MALLOCX_ARENA.c b/test/integration/MALLOCX_ARENA.c
index 1d9e423..f706e5a 100644
--- a/test/integration/MALLOCX_ARENA.c
+++ b/test/integration/MALLOCX_ARENA.c
@@ -11,8 +11,7 @@ static bool have_dss =
;
void *
-thd_start(void *arg)
-{
+thd_start(void *arg) {
unsigned thread_ind = (unsigned)(uintptr_t)arg;
unsigned arena_ind;
void *p;
@@ -45,8 +44,7 @@ thd_start(void *arg)
return (NULL);
}
-TEST_BEGIN(test_MALLOCX_ARENA)
-{
+TEST_BEGIN(test_MALLOCX_ARENA) {
thd_t thds[NTHREADS];
unsigned i;
@@ -55,14 +53,14 @@ TEST_BEGIN(test_MALLOCX_ARENA)
(void *)(uintptr_t)i);
}
- for (i = 0; i < NTHREADS; i++)
+ for (i = 0; i < NTHREADS; i++) {
thd_join(thds[i], NULL);
+ }
}
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_MALLOCX_ARENA));
}
diff --git a/test/integration/aligned_alloc.c b/test/integration/aligned_alloc.c
index 52b69ac..8a3ad6b 100644
--- a/test/integration/aligned_alloc.c
+++ b/test/integration/aligned_alloc.c
@@ -8,14 +8,12 @@
* potential OOM on e.g. 32-bit Windows.
*/
static void
-purge(void)
-{
+purge(void) {
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl error");
}
-TEST_BEGIN(test_alignment_errors)
-{
+TEST_BEGIN(test_alignment_errors) {
size_t alignment;
void *p;
@@ -36,8 +34,7 @@ TEST_BEGIN(test_alignment_errors)
}
TEST_END
-TEST_BEGIN(test_oom_errors)
-{
+TEST_BEGIN(test_oom_errors) {
size_t alignment, size;
void *p;
@@ -81,15 +78,15 @@ TEST_BEGIN(test_oom_errors)
}
TEST_END
-TEST_BEGIN(test_alignment_and_size)
-{
+TEST_BEGIN(test_alignment_and_size) {
#define NITER 4
size_t alignment, size, total;
unsigned i;
void *ps[NITER];
- for (i = 0; i < NITER; i++)
+ for (i = 0; i < NITER; i++) {
ps[i] = NULL;
+ }
for (alignment = 8;
alignment <= MAXALIGN;
@@ -110,8 +107,9 @@ TEST_BEGIN(test_alignment_and_size)
alignment, size, size, buf);
}
total += malloc_usable_size(ps[i]);
- if (total >= (MAXALIGN << 1))
+ if (total >= (MAXALIGN << 1)) {
break;
+ }
}
for (i = 0; i < NITER; i++) {
if (ps[i] != NULL) {
@@ -127,8 +125,7 @@ TEST_BEGIN(test_alignment_and_size)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_alignment_errors,
test_oom_errors,
diff --git a/test/integration/allocated.c b/test/integration/allocated.c
index 7570c52..555d40a 100644
--- a/test/integration/allocated.c
+++ b/test/integration/allocated.c
@@ -9,8 +9,7 @@ static const bool config_stats =
;
void *
-thd_start(void *arg)
-{
+thd_start(void *arg) {
int err;
void *p;
uint64_t a0, a1, d0, d1;
@@ -19,15 +18,17 @@ thd_start(void *arg)
sz = sizeof(a0);
if ((err = mallctl("thread.allocated", (void *)&a0, &sz, NULL, 0))) {
- if (err == ENOENT)
+ if (err == ENOENT) {
goto label_ENOENT;
+ }
test_fail("%s(): Error in mallctl(): %s", __func__,
strerror(err));
}
sz = sizeof(ap0);
if ((err = mallctl("thread.allocatedp", (void *)&ap0, &sz, NULL, 0))) {
- if (err == ENOENT)
+ if (err == ENOENT) {
goto label_ENOENT;
+ }
test_fail("%s(): Error in mallctl(): %s", __func__,
strerror(err));
}
@@ -37,16 +38,18 @@ thd_start(void *arg)
sz = sizeof(d0);
if ((err = mallctl("thread.deallocated", (void *)&d0, &sz, NULL, 0))) {
- if (err == ENOENT)
+ if (err == ENOENT) {
goto label_ENOENT;
+ }
test_fail("%s(): Error in mallctl(): %s", __func__,
strerror(err));
}
sz = sizeof(dp0);
if ((err = mallctl("thread.deallocatedp", (void *)&dp0, &sz, NULL,
0))) {
- if (err == ENOENT)
+ if (err == ENOENT) {
goto label_ENOENT;
+ }
test_fail("%s(): Error in mallctl(): %s", __func__,
strerror(err));
}
@@ -96,14 +99,12 @@ label_ENOENT:
return (NULL);
}
-TEST_BEGIN(test_main_thread)
-{
+TEST_BEGIN(test_main_thread) {
thd_start(NULL);
}
TEST_END
-TEST_BEGIN(test_subthread)
-{
+TEST_BEGIN(test_subthread) {
thd_t thd;
thd_create(&thd, thd_start, NULL);
@@ -112,8 +113,7 @@ TEST_BEGIN(test_subthread)
TEST_END
int
-main(void)
-{
+main(void) {
/* Run tests multiple times to check for bad interactions. */
return (test(
test_main_thread,
diff --git a/test/integration/cpp/basic.cpp b/test/integration/cpp/basic.cpp
index b208e1d..fe8874f 100644
--- a/test/integration/cpp/basic.cpp
+++ b/test/integration/cpp/basic.cpp
@@ -1,8 +1,7 @@
#include <memory>
#include "test/jemalloc_test.h"
-TEST_BEGIN(test_basic)
-{
+TEST_BEGIN(test_basic) {
auto foo = new long(4);
assert_ptr_not_null(foo, "Unexpected new[] failure");
delete foo;
@@ -20,8 +19,7 @@ TEST_BEGIN(test_basic)
TEST_END
int
-main()
-{
+main() {
return (test(
test_basic));
}
diff --git a/test/integration/extent.c b/test/integration/extent.c
index 30849b0..d12c123 100644
--- a/test/integration/extent.c
+++ b/test/integration/extent.c
@@ -7,8 +7,7 @@ const char *malloc_conf = "junk:false";
#include "test/extent_hooks.h"
static void
-test_extent_body(unsigned arena_ind)
-{
+test_extent_body(unsigned arena_ind) {
void *p;
size_t large0, large1, large2, sz;
size_t purge_mib[3];
@@ -67,15 +66,17 @@ test_extent_body(unsigned arena_ind)
xallocx_success_b = (xallocx(p, large0, 0, flags) == large0);
assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
0, "Unexpected arena.%u.purge error", arena_ind);
- if (xallocx_success_b)
+ if (xallocx_success_b) {
assert_true(did_split, "Expected split");
+ }
xallocx_success_c = (xallocx(p, large0 * 2, 0, flags) == large0 * 2);
if (did_split) {
assert_b_eq(did_decommit, did_commit,
"Expected decommit/commit match");
}
- if (xallocx_success_b && xallocx_success_c)
+ if (xallocx_success_b && xallocx_success_c) {
assert_true(did_merge, "Expected merge");
+ }
dallocx(p, flags);
try_dalloc = true;
try_decommit = false;
@@ -86,8 +87,7 @@ test_extent_body(unsigned arena_ind)
dallocx(p, flags);
}
-TEST_BEGIN(test_extent_manual_hook)
-{
+TEST_BEGIN(test_extent_manual_hook) {
unsigned arena_ind;
size_t old_size, new_size, sz;
size_t hooks_mib[3];
@@ -155,8 +155,7 @@ TEST_BEGIN(test_extent_manual_hook)
}
TEST_END
-TEST_BEGIN(test_extent_auto_hook)
-{
+TEST_BEGIN(test_extent_auto_hook) {
unsigned arena_ind;
size_t new_size, sz;
extent_hooks_t *new_hooks;
@@ -174,8 +173,7 @@ TEST_BEGIN(test_extent_auto_hook)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_extent_manual_hook,
test_extent_auto_hook));
diff --git a/test/integration/mallocx.c b/test/integration/mallocx.c
index 7617b1b..ec04c39 100644
--- a/test/integration/mallocx.c
+++ b/test/integration/mallocx.c
@@ -5,8 +5,7 @@ const char *malloc_conf = "junk:false";
#endif
static unsigned
-get_nsizes_impl(const char *cmd)
-{
+get_nsizes_impl(const char *cmd) {
unsigned ret;
size_t z;
@@ -18,14 +17,12 @@ get_nsizes_impl(const char *cmd)
}
static unsigned
-get_nlarge(void)
-{
+get_nlarge(void) {
return (get_nsizes_impl("arenas.nlextents"));
}
static size_t
-get_size_impl(const char *cmd, size_t ind)
-{
+get_size_impl(const char *cmd, size_t ind) {
size_t ret;
size_t z;
size_t mib[4];
@@ -43,8 +40,7 @@ get_size_impl(const char *cmd, size_t ind)
}
static size_t
-get_large_size(size_t ind)
-{
+get_large_size(size_t ind) {
return (get_size_impl("arenas.lextent.0.size", ind));
}
@@ -54,14 +50,12 @@ get_large_size(size_t ind)
* potential OOM on e.g. 32-bit Windows.
*/
static void
-purge(void)
-{
+purge(void) {
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl error");
}
-TEST_BEGIN(test_overflow)
-{
+TEST_BEGIN(test_overflow) {
size_t largemax;
largemax = get_large_size(get_nlarge()-1);
@@ -81,8 +75,7 @@ TEST_BEGIN(test_overflow)
}
TEST_END
-TEST_BEGIN(test_oom)
-{
+TEST_BEGIN(test_oom) {
size_t largemax;
bool oom;
void *ptrs[3];
@@ -96,15 +89,17 @@ TEST_BEGIN(test_oom)
oom = false;
for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
ptrs[i] = mallocx(largemax, 0);
- if (ptrs[i] == NULL)
+ if (ptrs[i] == NULL) {
oom = true;
+ }
}
assert_true(oom,
"Expected OOM during series of calls to mallocx(size=%zu, 0)",
largemax);
for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
- if (ptrs[i] != NULL)
+ if (ptrs[i] != NULL) {
dallocx(ptrs[i], 0);
+ }
}
purge();
@@ -122,8 +117,7 @@ TEST_BEGIN(test_oom)
}
TEST_END
-TEST_BEGIN(test_basic)
-{
+TEST_BEGIN(test_basic) {
#define MAXSZ (((size_t)1) << 23)
size_t sz;
@@ -160,16 +154,16 @@ TEST_BEGIN(test_basic)
}
TEST_END
-TEST_BEGIN(test_alignment_and_size)
-{
+TEST_BEGIN(test_alignment_and_size) {
#define MAXALIGN (((size_t)1) << 23)
#define NITER 4
size_t nsz, rsz, sz, alignment, total;
unsigned i;
void *ps[NITER];
- for (i = 0; i < NITER; i++)
+ for (i = 0; i < NITER; i++) {
ps[i] = NULL;
+ }
for (alignment = 8;
alignment <= MAXALIGN;
@@ -202,8 +196,9 @@ TEST_BEGIN(test_alignment_and_size)
" alignment=%zu, size=%zu", ps[i],
alignment, sz);
total += rsz;
- if (total >= (MAXALIGN << 1))
+ if (total >= (MAXALIGN << 1)) {
break;
+ }
}
for (i = 0; i < NITER; i++) {
if (ps[i] != NULL) {
@@ -220,8 +215,7 @@ TEST_BEGIN(test_alignment_and_size)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_overflow,
test_oom,
diff --git a/test/integration/overflow.c b/test/integration/overflow.c
index ad867e7..a7f4b51 100644
--- a/test/integration/overflow.c
+++ b/test/integration/overflow.c
@@ -1,7 +1,6 @@
#include "test/jemalloc_test.h"
-TEST_BEGIN(test_overflow)
-{
+TEST_BEGIN(test_overflow) {
unsigned nlextents;
size_t mib[4];
size_t sz, miblen, max_size_class;
@@ -41,8 +40,7 @@ TEST_BEGIN(test_overflow)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_overflow));
}
diff --git a/test/integration/posix_memalign.c b/test/integration/posix_memalign.c
index dace10f..6bbf183 100644
--- a/test/integration/posix_memalign.c
+++ b/test/integration/posix_memalign.c
@@ -8,14 +8,12 @@
* potential OOM on e.g. 32-bit Windows.
*/
static void
-purge(void)
-{
+purge(void) {
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl error");
}
-TEST_BEGIN(test_alignment_errors)
-{
+TEST_BEGIN(test_alignment_errors) {
size_t alignment;
void *p;
@@ -34,8 +32,7 @@ TEST_BEGIN(test_alignment_errors)
}
TEST_END
-TEST_BEGIN(test_oom_errors)
-{
+TEST_BEGIN(test_oom_errors) {
size_t alignment, size;
void *p;
@@ -73,16 +70,16 @@ TEST_BEGIN(test_oom_errors)
}
TEST_END
-TEST_BEGIN(test_alignment_and_size)
-{
+TEST_BEGIN(test_alignment_and_size) {
#define NITER 4
size_t alignment, size, total;
unsigned i;
int err;
void *ps[NITER];
- for (i = 0; i < NITER; i++)
+ for (i = 0; i < NITER; i++) {
ps[i] = NULL;
+ }
for (alignment = 8;
alignment <= MAXALIGN;
@@ -104,8 +101,9 @@ TEST_BEGIN(test_alignment_and_size)
alignment, size, size, buf);
}
total += malloc_usable_size(ps[i]);
- if (total >= (MAXALIGN << 1))
+ if (total >= (MAXALIGN << 1)) {
break;
+ }
}
for (i = 0; i < NITER; i++) {
if (ps[i] != NULL) {
@@ -121,8 +119,7 @@ TEST_BEGIN(test_alignment_and_size)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_alignment_errors,
test_oom_errors,
diff --git a/test/integration/rallocx.c b/test/integration/rallocx.c
index 0a8b50c..176b995 100644
--- a/test/integration/rallocx.c
+++ b/test/integration/rallocx.c
@@ -1,8 +1,7 @@
#include "test/jemalloc_test.h"
static unsigned
-get_nsizes_impl(const char *cmd)
-{
+get_nsizes_impl(const char *cmd) {
unsigned ret;
size_t z;
@@ -14,14 +13,12 @@ get_nsizes_impl(const char *cmd)
}
static unsigned
-get_nlarge(void)
-{
+get_nlarge(void) {
return (get_nsizes_impl("arenas.nlextents"));
}
static size_t
-get_size_impl(const char *cmd, size_t ind)
-{
+get_size_impl(const char *cmd, size_t ind) {
size_t ret;
size_t z;
size_t mib[4];
@@ -39,13 +36,11 @@ get_size_impl(const char *cmd, size_t ind)
}
static size_t
-get_large_size(size_t ind)
-{
+get_large_size(size_t ind) {
return (get_size_impl("arenas.lextent.0.size", ind));
}
-TEST_BEGIN(test_grow_and_shrink)
-{
+TEST_BEGIN(test_grow_and_shrink) {
void *p, *q;
size_t tsz;
#define NCYCLES 3
@@ -90,8 +85,7 @@ TEST_BEGIN(test_grow_and_shrink)
TEST_END
static bool
-validate_fill(const void *p, uint8_t c, size_t offset, size_t len)
-{
+validate_fill(const void *p, uint8_t c, size_t offset, size_t len) {
bool ret = false;
const uint8_t *buf = (const uint8_t *)p;
size_t i;
@@ -109,8 +103,7 @@ validate_fill(const void *p, uint8_t c, size_t offset, size_t len)
return (ret);
}
-TEST_BEGIN(test_zero)
-{
+TEST_BEGIN(test_zero) {
void *p, *q;
size_t psz, qsz, i, j;
size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024};
@@ -154,8 +147,7 @@ TEST_BEGIN(test_zero)
}
TEST_END
-TEST_BEGIN(test_align)
-{
+TEST_BEGIN(test_align) {
void *p, *q;
size_t align;
#define MAX_ALIGN (ZU(1) << 25)
@@ -179,8 +171,7 @@ TEST_BEGIN(test_align)
}
TEST_END
-TEST_BEGIN(test_lg_align_and_zero)
-{
+TEST_BEGIN(test_lg_align_and_zero) {
void *p, *q;
unsigned lg_align;
size_t sz;
@@ -217,8 +208,7 @@ TEST_BEGIN(test_lg_align_and_zero)
}
TEST_END
-TEST_BEGIN(test_overflow)
-{
+TEST_BEGIN(test_overflow) {
size_t largemax;
void *p;
@@ -245,8 +235,7 @@ TEST_BEGIN(test_overflow)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_grow_and_shrink,
test_zero,
diff --git a/test/integration/sdallocx.c b/test/integration/sdallocx.c
index 5d0a8f8..bf2fd2c 100644
--- a/test/integration/sdallocx.c
+++ b/test/integration/sdallocx.c
@@ -3,21 +3,20 @@
#define MAXALIGN (((size_t)1) << 22)
#define NITER 3
-TEST_BEGIN(test_basic)
-{
+TEST_BEGIN(test_basic) {
void *ptr = mallocx(64, 0);
sdallocx(ptr, 64, 0);
}
TEST_END
-TEST_BEGIN(test_alignment_and_size)
-{
+TEST_BEGIN(test_alignment_and_size) {
size_t nsz, sz, alignment, total;
unsigned i;
void *ps[NITER];
- for (i = 0; i < NITER; i++)
+ for (i = 0; i < NITER; i++) {
ps[i] = NULL;
+ }
for (alignment = 8;
alignment <= MAXALIGN;
@@ -32,8 +31,9 @@ TEST_BEGIN(test_alignment_and_size)
ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
MALLOCX_ZERO);
total += nsz;
- if (total >= (MAXALIGN << 1))
+ if (total >= (MAXALIGN << 1)) {
break;
+ }
}
for (i = 0; i < NITER; i++) {
if (ps[i] != NULL) {
@@ -48,8 +48,7 @@ TEST_BEGIN(test_alignment_and_size)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_basic,
test_alignment_and_size));
diff --git a/test/integration/thread_arena.c b/test/integration/thread_arena.c
index cf8240d..5adb5ce 100644
--- a/test/integration/thread_arena.c
+++ b/test/integration/thread_arena.c
@@ -3,8 +3,7 @@
#define NTHREADS 10
void *
-thd_start(void *arg)
-{
+thd_start(void *arg) {
unsigned main_arena_ind = *(unsigned *)arg;
void *p;
unsigned arena_ind;
@@ -38,8 +37,7 @@ thd_start(void *arg)
return (NULL);
}
-TEST_BEGIN(test_thread_arena)
-{
+TEST_BEGIN(test_thread_arena) {
void *p;
unsigned arena_ind;
size_t size;
@@ -73,8 +71,7 @@ TEST_BEGIN(test_thread_arena)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_thread_arena));
}
diff --git a/test/integration/thread_tcache_enabled.c b/test/integration/thread_tcache_enabled.c
index 1394371..117d06b 100644
--- a/test/integration/thread_tcache_enabled.c
+++ b/test/integration/thread_tcache_enabled.c
@@ -9,8 +9,7 @@ static const bool config_tcache =
;
void *
-thd_start(void *arg)
-{
+thd_start(void *arg) {
int err;
size_t sz;
bool e0, e1;
@@ -84,14 +83,12 @@ label_ENOENT:
return (NULL);
}
-TEST_BEGIN(test_main_thread)
-{
+TEST_BEGIN(test_main_thread) {
thd_start(NULL);
}
TEST_END
-TEST_BEGIN(test_subthread)
-{
+TEST_BEGIN(test_subthread) {
thd_t thd;
thd_create(&thd, thd_start, NULL);
@@ -100,8 +97,7 @@ TEST_BEGIN(test_subthread)
TEST_END
int
-main(void)
-{
+main(void) {
/* Run tests multiple times to check for bad interactions. */
return (test(
test_main_thread,
diff --git a/test/integration/xallocx.c b/test/integration/xallocx.c
index 647404a..9b4b68e 100644
--- a/test/integration/xallocx.c
+++ b/test/integration/xallocx.c
@@ -10,8 +10,7 @@ const char *malloc_conf = "junk:false";
* xallocx() would ordinarily be able to extend.
*/
static unsigned
-arena_ind(void)
-{
+arena_ind(void) {
static unsigned ind = 0;
if (ind == 0) {
@@ -23,8 +22,7 @@ arena_ind(void)
return (ind);
}
-TEST_BEGIN(test_same_size)
-{
+TEST_BEGIN(test_same_size) {
void *p;
size_t sz, tsz;
@@ -39,8 +37,7 @@ TEST_BEGIN(test_same_size)
}
TEST_END
-TEST_BEGIN(test_extra_no_move)
-{
+TEST_BEGIN(test_extra_no_move) {
void *p;
size_t sz, tsz;
@@ -55,8 +52,7 @@ TEST_BEGIN(test_extra_no_move)
}
TEST_END
-TEST_BEGIN(test_no_move_fail)
-{
+TEST_BEGIN(test_no_move_fail) {
void *p;
size_t sz, tsz;
@@ -72,8 +68,7 @@ TEST_BEGIN(test_no_move_fail)
TEST_END
static unsigned
-get_nsizes_impl(const char *cmd)
-{
+get_nsizes_impl(const char *cmd) {
unsigned ret;
size_t z;
@@ -85,20 +80,17 @@ get_nsizes_impl(const char *cmd)
}
static unsigned
-get_nsmall(void)
-{
+get_nsmall(void) {
return (get_nsizes_impl("arenas.nbins"));
}
static unsigned
-get_nlarge(void)
-{
+get_nlarge(void) {
return (get_nsizes_impl("arenas.nlextents"));
}
static size_t
-get_size_impl(const char *cmd, size_t ind)
-{
+get_size_impl(const char *cmd, size_t ind) {
size_t ret;
size_t z;
size_t mib[4];
@@ -116,19 +108,16 @@ get_size_impl(const char *cmd, size_t ind)
}
static size_t
-get_small_size(size_t ind)
-{
+get_small_size(size_t ind) {
return (get_size_impl("arenas.bin.0.size", ind));
}
static size_t
-get_large_size(size_t ind)
-{
+get_large_size(size_t ind) {
return (get_size_impl("arenas.lextent.0.size", ind));
}
-TEST_BEGIN(test_size)
-{
+TEST_BEGIN(test_size) {
size_t small0, largemax;
void *p;
@@ -157,8 +146,7 @@ TEST_BEGIN(test_size)
}
TEST_END
-TEST_BEGIN(test_size_extra_overflow)
-{
+TEST_BEGIN(test_size_extra_overflow) {
size_t small0, largemax;
void *p;
@@ -189,8 +177,7 @@ TEST_BEGIN(test_size_extra_overflow)
}
TEST_END
-TEST_BEGIN(test_extra_small)
-{
+TEST_BEGIN(test_extra_small) {
size_t small0, small1, largemax;
void *p;
@@ -221,8 +208,7 @@ TEST_BEGIN(test_extra_small)
}
TEST_END
-TEST_BEGIN(test_extra_large)
-{
+TEST_BEGIN(test_extra_large) {
int flags = MALLOCX_ARENA(arena_ind());
size_t smallmax, large1, large2, large3, largemax;
void *p;
@@ -292,8 +278,7 @@ TEST_BEGIN(test_extra_large)
TEST_END
static void
-print_filled_extents(const void *p, uint8_t c, size_t len)
-{
+print_filled_extents(const void *p, uint8_t c, size_t len) {
const uint8_t *pc = (const uint8_t *)p;
size_t i, range0;
uint8_t c0;
@@ -312,26 +297,26 @@ print_filled_extents(const void *p, uint8_t c, size_t len)
}
static bool
-validate_fill(const void *p, uint8_t c, size_t offset, size_t len)
-{
+validate_fill(const void *p, uint8_t c, size_t offset, size_t len) {
const uint8_t *pc = (const uint8_t *)p;
bool err;
size_t i;
for (i = offset, err = false; i < offset+len; i++) {
- if (pc[i] != c)
+ if (pc[i] != c) {
err = true;
+ }
}
- if (err)
+ if (err) {
print_filled_extents(p, c, offset + len);
+ }
return (err);
}
static void
-test_zero(size_t szmin, size_t szmax)
-{
+test_zero(size_t szmin, size_t szmax) {
int flags = MALLOCX_ARENA(arena_ind()) | MALLOCX_ZERO;
size_t sz, nsz;
void *p;
@@ -378,8 +363,7 @@ test_zero(size_t szmin, size_t szmax)
dallocx(p, flags);
}
-TEST_BEGIN(test_zero_large)
-{
+TEST_BEGIN(test_zero_large) {
size_t large0, large1;
/* Get size classes. */
@@ -391,8 +375,7 @@ TEST_BEGIN(test_zero_large)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_same_size,
test_extra_no_move,
diff --git a/test/src/btalloc.c b/test/src/btalloc.c
index a78cb89..bc31f9b 100644
--- a/test/src/btalloc.c
+++ b/test/src/btalloc.c
@@ -1,7 +1,6 @@
#include "test/jemalloc_test.h"
void *
-btalloc(size_t size, unsigned bits)
-{
+btalloc(size_t size, unsigned bits) {
return (btalloc_0(size, bits));
}
diff --git a/test/src/mq.c b/test/src/mq.c
index 47f362c..9b5f672 100644
--- a/test/src/mq.c
+++ b/test/src/mq.c
@@ -5,8 +5,7 @@
* time is guaranteed.
*/
void
-mq_nanosleep(unsigned ns)
-{
+mq_nanosleep(unsigned ns) {
assert(ns <= 1000*1000*1000);
#ifdef _WIN32
diff --git a/test/src/mtx.c b/test/src/mtx.c
index bbfec4a..924ba28 100644
--- a/test/src/mtx.c
+++ b/test/src/mtx.c
@@ -5,11 +5,12 @@
#endif
bool
-mtx_init(mtx_t *mtx)
-{
+mtx_init(mtx_t *mtx) {
#ifdef _WIN32
- if (!InitializeCriticalSectionAndSpinCount(&mtx->lock, _CRT_SPINCOUNT))
+ if (!InitializeCriticalSectionAndSpinCount(&mtx->lock,
+ _CRT_SPINCOUNT)) {
return (true);
+ }
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
mtx->lock = OS_UNFAIR_LOCK_INIT;
#elif (defined(JEMALLOC_OSSPIN))
@@ -17,8 +18,9 @@ mtx_init(mtx_t *mtx)
#else
pthread_mutexattr_t attr;
- if (pthread_mutexattr_init(&attr) != 0)
+ if (pthread_mutexattr_init(&attr) != 0) {
return (true);
+ }
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT);
if (pthread_mutex_init(&mtx->lock, &attr) != 0) {
pthread_mutexattr_destroy(&attr);
@@ -30,8 +32,7 @@ mtx_init(mtx_t *mtx)
}
void
-mtx_fini(mtx_t *mtx)
-{
+mtx_fini(mtx_t *mtx) {
#ifdef _WIN32
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
#elif (defined(JEMALLOC_OSSPIN))
@@ -41,8 +42,7 @@ mtx_fini(mtx_t *mtx)
}
void
-mtx_lock(mtx_t *mtx)
-{
+mtx_lock(mtx_t *mtx) {
#ifdef _WIN32
EnterCriticalSection(&mtx->lock);
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
@@ -55,8 +55,7 @@ mtx_lock(mtx_t *mtx)
}
void
-mtx_unlock(mtx_t *mtx)
-{
+mtx_unlock(mtx_t *mtx) {
#ifdef _WIN32
LeaveCriticalSection(&mtx->lock);
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
diff --git a/test/src/test.c b/test/src/test.c
index 345cc1c..1155326 100644
--- a/test/src/test.c
+++ b/test/src/test.c
@@ -7,8 +7,7 @@ static const char * test_name = "";
JEMALLOC_FORMAT_PRINTF(1, 2)
void
-test_skip(const char *format, ...)
-{
+test_skip(const char *format, ...) {
va_list ap;
va_start(ap, format);
@@ -20,8 +19,7 @@ test_skip(const char *format, ...)
JEMALLOC_FORMAT_PRINTF(1, 2)
void
-test_fail(const char *format, ...)
-{
+test_fail(const char *format, ...) {
va_list ap;
va_start(ap, format);
@@ -32,8 +30,7 @@ test_fail(const char *format, ...)
}
static const char *
-test_status_string(test_status_t test_status)
-{
+test_status_string(test_status_t test_status) {
switch (test_status) {
case test_status_pass: return "pass";
case test_status_skip: return "skip";
@@ -43,23 +40,20 @@ test_status_string(test_status_t test_status)
}
void
-p_test_init(const char *name)
-{
+p_test_init(const char *name) {
test_count++;
test_status = test_status_pass;
test_name = name;
}
void
-p_test_fini(void)
-{
+p_test_fini(void) {
test_counts[test_status]++;
malloc_printf("%s: %s\n", test_name, test_status_string(test_status));
}
static test_status_t
-p_test_impl(bool do_malloc_init, test_t *t, va_list ap)
-{
+p_test_impl(bool do_malloc_init, test_t *t, va_list ap) {
test_status_t ret;
if (do_malloc_init) {
@@ -78,8 +72,9 @@ p_test_impl(bool do_malloc_init, test_t *t, va_list ap)
ret = test_status_pass;
for (; t != NULL; t = va_arg(ap, test_t *)) {
t();
- if (test_status > ret)
+ if (test_status > ret) {
ret = test_status;
+ }
}
malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n",
@@ -94,8 +89,7 @@ p_test_impl(bool do_malloc_init, test_t *t, va_list ap)
}
test_status_t
-p_test(test_t *t, ...)
-{
+p_test(test_t *t, ...) {
test_status_t ret;
va_list ap;
@@ -108,8 +102,7 @@ p_test(test_t *t, ...)
}
test_status_t
-p_test_no_malloc_init(test_t *t, ...)
-{
+p_test_no_malloc_init(test_t *t, ...) {
test_status_t ret;
va_list ap;
@@ -122,8 +115,7 @@ p_test_no_malloc_init(test_t *t, ...)
}
void
-p_test_fail(const char *prefix, const char *message)
-{
+p_test_fail(const char *prefix, const char *message) {
malloc_cprintf(NULL, NULL, "%s%s\n", prefix, message);
test_status = test_status_fail;
}
diff --git a/test/src/thd.c b/test/src/thd.c
index e316708..9a15eab 100644
--- a/test/src/thd.c
+++ b/test/src/thd.c
@@ -2,17 +2,16 @@
#ifdef _WIN32
void
-thd_create(thd_t *thd, void *(*proc)(void *), void *arg)
-{
+thd_create(thd_t *thd, void *(*proc)(void *), void *arg) {
LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc;
*thd = CreateThread(NULL, 0, routine, arg, 0, NULL);
- if (*thd == NULL)
+ if (*thd == NULL) {
test_fail("Error in CreateThread()\n");
+ }
}
void
-thd_join(thd_t thd, void **ret)
-{
+thd_join(thd_t thd, void **ret) {
if (WaitForSingleObject(thd, INFINITE) == WAIT_OBJECT_0 && ret) {
DWORD exit_code;
GetExitCodeThread(thd, (LPDWORD) &exit_code);
@@ -22,15 +21,14 @@ thd_join(thd_t thd, void **ret)
#else
void
-thd_create(thd_t *thd, void *(*proc)(void *), void *arg)
-{
- if (pthread_create(thd, NULL, proc, arg) != 0)
+thd_create(thd_t *thd, void *(*proc)(void *), void *arg) {
+ if (pthread_create(thd, NULL, proc, arg) != 0) {
test_fail("Error in pthread_create()\n");
+ }
}
void
-thd_join(thd_t thd, void **ret)
-{
+thd_join(thd_t thd, void **ret) {
pthread_join(thd, ret);
}
#endif
diff --git a/test/src/timer.c b/test/src/timer.c
index 82f69d0..1b18633 100644
--- a/test/src/timer.c
+++ b/test/src/timer.c
@@ -1,22 +1,19 @@
#include "test/jemalloc_test.h"
void
-timer_start(timedelta_t *timer)
-{
+timer_start(timedelta_t *timer) {
nstime_init(&timer->t0, 0);
nstime_update(&timer->t0);
}
void
-timer_stop(timedelta_t *timer)
-{
+timer_stop(timedelta_t *timer) {
nstime_copy(&timer->t1, &timer->t0);
nstime_update(&timer->t1);
}
uint64_t
-timer_usec(const timedelta_t *timer)
-{
+timer_usec(const timedelta_t *timer) {
nstime_t delta;
nstime_copy(&delta, &timer->t1);
@@ -25,8 +22,7 @@ timer_usec(const timedelta_t *timer)
}
void
-timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen)
-{
+timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen) {
uint64_t t0 = timer_usec(a);
uint64_t t1 = timer_usec(b);
uint64_t mult;
@@ -36,11 +32,13 @@ timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen)
/* Whole. */
n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, t0 / t1);
i += n;
- if (i >= buflen)
+ if (i >= buflen) {
return;
+ }
mult = 1;
- for (j = 0; j < n; j++)
+ for (j = 0; j < n; j++) {
mult *= 10;
+ }
/* Decimal. */
n = malloc_snprintf(&buf[i], buflen-i, ".");
diff --git a/test/stress/microbench.c b/test/stress/microbench.c
index c599d9d..3b7e966 100644
--- a/test/stress/microbench.c
+++ b/test/stress/microbench.c
@@ -2,22 +2,22 @@
JEMALLOC_INLINE_C void
time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter,
- void (*func)(void))
-{
+ void (*func)(void)) {
uint64_t i;
- for (i = 0; i < nwarmup; i++)
+ for (i = 0; i < nwarmup; i++) {
func();
+ }
timer_start(timer);
- for (i = 0; i < niter; i++)
+ for (i = 0; i < niter; i++) {
func();
+ }
timer_stop(timer);
}
void
compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a,
- void (*func_a), const char *name_b, void (*func_b))
-{
+ void (*func_a), const char *name_b, void (*func_b)) {
timedelta_t timer_a, timer_b;
char ratio_buf[6];
void *p;
@@ -41,8 +41,7 @@ compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a,
}
static void
-malloc_free(void)
-{
+malloc_free(void) {
/* The compiler can optimize away free(malloc(1))! */
void *p = malloc(1);
if (p == NULL) {
@@ -53,8 +52,7 @@ malloc_free(void)
}
static void
-mallocx_free(void)
-{
+mallocx_free(void) {
void *p = mallocx(1, 0);
if (p == NULL) {
test_fail("Unexpected mallocx() failure");
@@ -63,16 +61,14 @@ mallocx_free(void)
free(p);
}
-TEST_BEGIN(test_malloc_vs_mallocx)
-{
+TEST_BEGIN(test_malloc_vs_mallocx) {
compare_funcs(10*1000*1000, 100*1000*1000, "malloc",
malloc_free, "mallocx", mallocx_free);
}
TEST_END
static void
-malloc_dallocx(void)
-{
+malloc_dallocx(void) {
void *p = malloc(1);
if (p == NULL) {
test_fail("Unexpected malloc() failure");
@@ -82,8 +78,7 @@ malloc_dallocx(void)
}
static void
-malloc_sdallocx(void)
-{
+malloc_sdallocx(void) {
void *p = malloc(1);
if (p == NULL) {
test_fail("Unexpected malloc() failure");
@@ -92,23 +87,20 @@ malloc_sdallocx(void)
sdallocx(p, 1, 0);
}
-TEST_BEGIN(test_free_vs_dallocx)
-{
+TEST_BEGIN(test_free_vs_dallocx) {
compare_funcs(10*1000*1000, 100*1000*1000, "free", malloc_free,
"dallocx", malloc_dallocx);
}
TEST_END
-TEST_BEGIN(test_dallocx_vs_sdallocx)
-{
+TEST_BEGIN(test_dallocx_vs_sdallocx) {
compare_funcs(10*1000*1000, 100*1000*1000, "dallocx", malloc_dallocx,
"sdallocx", malloc_sdallocx);
}
TEST_END
static void
-malloc_mus_free(void)
-{
+malloc_mus_free(void) {
void *p;
p = malloc(1);
@@ -121,8 +113,7 @@ malloc_mus_free(void)
}
static void
-malloc_sallocx_free(void)
-{
+malloc_sallocx_free(void) {
void *p;
p = malloc(1);
@@ -130,21 +121,20 @@ malloc_sallocx_free(void)
test_fail("Unexpected malloc() failure");
return;
}
- if (sallocx(p, 0) < 1)
+ if (sallocx(p, 0) < 1) {
test_fail("Unexpected sallocx() failure");
+ }
free(p);
}
-TEST_BEGIN(test_mus_vs_sallocx)
-{
+TEST_BEGIN(test_mus_vs_sallocx) {
compare_funcs(10*1000*1000, 100*1000*1000, "malloc_usable_size",
malloc_mus_free, "sallocx", malloc_sallocx_free);
}
TEST_END
static void
-malloc_nallocx_free(void)
-{
+malloc_nallocx_free(void) {
void *p;
p = malloc(1);
@@ -152,21 +142,20 @@ malloc_nallocx_free(void)
test_fail("Unexpected malloc() failure");
return;
}
- if (nallocx(1, 0) < 1)
+ if (nallocx(1, 0) < 1) {
test_fail("Unexpected nallocx() failure");
+ }
free(p);
}
-TEST_BEGIN(test_sallocx_vs_nallocx)
-{
+TEST_BEGIN(test_sallocx_vs_nallocx) {
compare_funcs(10*1000*1000, 100*1000*1000, "sallocx",
malloc_sallocx_free, "nallocx", malloc_nallocx_free);
}
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_malloc_vs_mallocx,
test_free_vs_dallocx,
diff --git a/test/unit/SFMT.c b/test/unit/SFMT.c
index cf52670..b1bcf3d 100644
--- a/test/unit/SFMT.c
+++ b/test/unit/SFMT.c
@@ -1449,8 +1449,7 @@ static const uint64_t init_by_array_64_expected[] = {
KQU(15570163926716513029), KQU(13356980519185762498)
};
-TEST_BEGIN(test_gen_rand_32)
-{
+TEST_BEGIN(test_gen_rand_32) {
uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
int i;
@@ -1484,8 +1483,7 @@ TEST_BEGIN(test_gen_rand_32)
}
TEST_END
-TEST_BEGIN(test_by_array_32)
-{
+TEST_BEGIN(test_by_array_32) {
uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
int i;
@@ -1520,8 +1518,7 @@ TEST_BEGIN(test_by_array_32)
}
TEST_END
-TEST_BEGIN(test_gen_rand_64)
-{
+TEST_BEGIN(test_gen_rand_64) {
uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
int i;
@@ -1556,8 +1553,7 @@ TEST_BEGIN(test_gen_rand_64)
}
TEST_END
-TEST_BEGIN(test_by_array_64)
-{
+TEST_BEGIN(test_by_array_64) {
uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
int i;
@@ -1594,8 +1590,7 @@ TEST_BEGIN(test_by_array_64)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_gen_rand_32,
test_by_array_32,
diff --git a/test/unit/a0.c b/test/unit/a0.c
index 87f7e52..c7ce8cf 100644
--- a/test/unit/a0.c
+++ b/test/unit/a0.c
@@ -1,7 +1,6 @@
#include "test/jemalloc_test.h"
-TEST_BEGIN(test_a0)
-{
+TEST_BEGIN(test_a0) {
void *p;
p = a0malloc(1);
@@ -11,8 +10,7 @@ TEST_BEGIN(test_a0)
TEST_END
int
-main(void)
-{
+main(void) {
return (test_no_malloc_init(
test_a0));
}
diff --git a/test/unit/arena_reset.c b/test/unit/arena_reset.c
index 257f972..710aaf5 100644
--- a/test/unit/arena_reset.c
+++ b/test/unit/arena_reset.c
@@ -5,8 +5,7 @@
#include "test/extent_hooks.h"
static unsigned
-get_nsizes_impl(const char *cmd)
-{
+get_nsizes_impl(const char *cmd) {
unsigned ret;
size_t z;
@@ -18,20 +17,17 @@ get_nsizes_impl(const char *cmd)
}
static unsigned
-get_nsmall(void)
-{
+get_nsmall(void) {
return (get_nsizes_impl("arenas.nbins"));
}
static unsigned
-get_nlarge(void)
-{
+get_nlarge(void) {
return (get_nsizes_impl("arenas.nlextents"));
}
static size_t
-get_size_impl(const char *cmd, size_t ind)
-{
+get_size_impl(const char *cmd, size_t ind) {
size_t ret;
size_t z;
size_t mib[4];
@@ -49,35 +45,33 @@ get_size_impl(const char *cmd, size_t ind)
}
static size_t
-get_small_size(size_t ind)
-{
+get_small_size(size_t ind) {
return (get_size_impl("arenas.bin.0.size", ind));
}
static size_t
-get_large_size(size_t ind)
-{
+get_large_size(size_t ind) {
return (get_size_impl("arenas.lextent.0.size", ind));
}
/* Like ivsalloc(), but safe to call on discarded allocations. */
static size_t
-vsalloc(tsdn_t *tsdn, const void *ptr)
-{
+vsalloc(tsdn_t *tsdn, const void *ptr) {
extent_t *extent;
extent = extent_lookup(tsdn, ptr, false);
- if (extent == NULL)
+ if (extent == NULL) {
return (0);
- if (!extent_active_get(extent))
+ }
+ if (!extent_active_get(extent)) {
return (0);
+ }
return (isalloc(tsdn, extent, ptr));
}
static unsigned
-do_arena_create(extent_hooks_t *h)
-{
+do_arena_create(extent_hooks_t *h) {
unsigned arena_ind;
size_t sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
@@ -87,8 +81,7 @@ do_arena_create(extent_hooks_t *h)
}
static void
-do_arena_reset_pre(unsigned arena_ind, void ***ptrs, unsigned *nptrs)
-{
+do_arena_reset_pre(unsigned arena_ind, void ***ptrs, unsigned *nptrs) {
#define NLARGE 32
unsigned nsmall, nlarge, i;
size_t sz;
@@ -127,8 +120,7 @@ do_arena_reset_pre(unsigned arena_ind, void ***ptrs, unsigned *nptrs)
}
static void
-do_arena_reset_post(void **ptrs, unsigned nptrs)
-{
+do_arena_reset_post(void **ptrs, unsigned nptrs) {
tsdn_t *tsdn;
unsigned i;
@@ -144,8 +136,7 @@ do_arena_reset_post(void **ptrs, unsigned nptrs)
}
static void
-do_arena_reset_destroy(const char *name, unsigned arena_ind)
-{
+do_arena_reset_destroy(const char *name, unsigned arena_ind) {
size_t mib[3];
size_t miblen;
@@ -158,19 +149,16 @@ do_arena_reset_destroy(const char *name, unsigned arena_ind)
}
static void
-do_arena_reset(unsigned arena_ind)
-{
+do_arena_reset(unsigned arena_ind) {
do_arena_reset_destroy("arena.0.reset", arena_ind);
}
static void
-do_arena_destroy(unsigned arena_ind)
-{
+do_arena_destroy(unsigned arena_ind) {
do_arena_reset_destroy("arena.0.destroy", arena_ind);
}
-TEST_BEGIN(test_arena_reset)
-{
+TEST_BEGIN(test_arena_reset) {
unsigned arena_ind;
void **ptrs;
unsigned nptrs;
@@ -183,8 +171,7 @@ TEST_BEGIN(test_arena_reset)
TEST_END
static bool
-arena_i_initialized(unsigned arena_ind, bool refresh)
-{
+arena_i_initialized(unsigned arena_ind, bool refresh) {
bool initialized;
size_t mib[3];
size_t miblen, sz;
@@ -206,15 +193,13 @@ arena_i_initialized(unsigned arena_ind, bool refresh)
return (initialized);
}
-TEST_BEGIN(test_arena_destroy_initial)
-{
+TEST_BEGIN(test_arena_destroy_initial) {
assert_false(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
"Destroyed arena stats should not be initialized");
}
TEST_END
-TEST_BEGIN(test_arena_destroy_hooks_default)
-{
+TEST_BEGIN(test_arena_destroy_hooks_default) {
unsigned arena_ind, arena_ind_another, arena_ind_prev;
void **ptrs;
unsigned nptrs;
@@ -260,8 +245,7 @@ TEST_END
*/
static bool
extent_dalloc_unmap(extent_hooks_t *extent_hooks, void *addr, size_t size,
- bool committed, unsigned arena_ind)
-{
+ bool committed, unsigned arena_ind) {
TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, "
"arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ?
"true" : "false", arena_ind);
@@ -270,8 +254,9 @@ extent_dalloc_unmap(extent_hooks_t *extent_hooks, void *addr, size_t size,
assert_ptr_eq(extent_hooks->dalloc, extent_dalloc_unmap,
"Wrong hook function");
called_dalloc = true;
- if (!try_dalloc)
+ if (!try_dalloc) {
return (true);
+ }
pages_unmap(addr, size);
did_dalloc = true;
return (false);
@@ -290,8 +275,7 @@ static extent_hooks_t hooks_unmap = {
extent_merge_hook
};
-TEST_BEGIN(test_arena_destroy_hooks_unmap)
-{
+TEST_BEGIN(test_arena_destroy_hooks_unmap) {
unsigned arena_ind;
void **ptrs;
unsigned nptrs;
@@ -328,8 +312,7 @@ TEST_BEGIN(test_arena_destroy_hooks_unmap)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_arena_reset,
test_arena_destroy_initial,
diff --git a/test/unit/atomic.c b/test/unit/atomic.c
index 1d14368..3e36acd 100644
--- a/test/unit/atomic.c
+++ b/test/unit/atomic.c
@@ -66,8 +66,7 @@ typedef struct p##_test_s p##_test_t;
} while (0)
TEST_STRUCT(u64, uint64_t)
-TEST_BEGIN(test_atomic_u64)
-{
+TEST_BEGIN(test_atomic_u64) {
#if !(LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
test_skip("64-bit atomic operations not supported");
#else
@@ -77,36 +76,31 @@ TEST_BEGIN(test_atomic_u64)
TEST_END
TEST_STRUCT(u32, uint32_t)
-TEST_BEGIN(test_atomic_u32)
-{
+TEST_BEGIN(test_atomic_u32) {
TEST_BODY(u32, uint32_t, uint32_t, u32, "#"FMTx32);
}
TEST_END
TEST_STRUCT(p, void *)
-TEST_BEGIN(test_atomic_p)
-{
+TEST_BEGIN(test_atomic_p) {
TEST_BODY(p, void *, uintptr_t, ptr, "p");
}
TEST_END
TEST_STRUCT(zu, size_t)
-TEST_BEGIN(test_atomic_zu)
-{
+TEST_BEGIN(test_atomic_zu) {
TEST_BODY(zu, size_t, size_t, zu, "#zx");
}
TEST_END
TEST_STRUCT(u, unsigned)
-TEST_BEGIN(test_atomic_u)
-{
+TEST_BEGIN(test_atomic_u) {
TEST_BODY(u, unsigned, unsigned, u, "#x");
}
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_atomic_u64,
test_atomic_u32,
diff --git a/test/unit/base.c b/test/unit/base.c
index 76e96da..65cf980 100644
--- a/test/unit/base.c
+++ b/test/unit/base.c
@@ -24,8 +24,7 @@ static extent_hooks_t hooks_not_null = {
NULL /* merge */
};
-TEST_BEGIN(test_base_hooks_default)
-{
+TEST_BEGIN(test_base_hooks_default) {
tsdn_t *tsdn;
base_t *base;
size_t allocated0, allocated1, resident, mapped;
@@ -52,8 +51,7 @@ TEST_BEGIN(test_base_hooks_default)
}
TEST_END
-TEST_BEGIN(test_base_hooks_null)
-{
+TEST_BEGIN(test_base_hooks_null) {
extent_hooks_t hooks_orig;
tsdn_t *tsdn;
base_t *base;
@@ -92,8 +90,7 @@ TEST_BEGIN(test_base_hooks_null)
}
TEST_END
-TEST_BEGIN(test_base_hooks_not_null)
-{
+TEST_BEGIN(test_base_hooks_not_null) {
extent_hooks_t hooks_orig;
tsdn_t *tsdn;
base_t *base;
@@ -214,8 +211,7 @@ TEST_BEGIN(test_base_hooks_not_null)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_base_hooks_default,
test_base_hooks_null,
diff --git a/test/unit/bitmap.c b/test/unit/bitmap.c
index b502bfe..6dfa72f 100644
--- a/test/unit/bitmap.c
+++ b/test/unit/bitmap.c
@@ -93,8 +93,7 @@
NB(16384) \
static void
-test_bitmap_initializer_body(const bitmap_info_t *binfo, size_t nbits)
-{
+test_bitmap_initializer_body(const bitmap_info_t *binfo, size_t nbits) {
bitmap_info_t binfo_dyn;
bitmap_info_init(&binfo_dyn, nbits);
@@ -124,8 +123,7 @@ test_bitmap_initializer_body(const bitmap_info_t *binfo, size_t nbits)
#endif
}
-TEST_BEGIN(test_bitmap_initializer)
-{
+TEST_BEGIN(test_bitmap_initializer) {
#define NB(nbits) { \
if (nbits <= BITMAP_MAXBITS) { \
bitmap_info_t binfo = \
@@ -140,8 +138,7 @@ TEST_END
static size_t
test_bitmap_size_body(const bitmap_info_t *binfo, size_t nbits,
- size_t prev_size)
-{
+ size_t prev_size) {
size_t size = bitmap_size(binfo);
assert_zu_ge(size, (nbits >> 3),
"Bitmap size is smaller than expected");
@@ -149,8 +146,7 @@ test_bitmap_size_body(const bitmap_info_t *binfo, size_t nbits,
return (size);
}
-TEST_BEGIN(test_bitmap_size)
-{
+TEST_BEGIN(test_bitmap_size) {
size_t nbits, prev_size;
prev_size = 0;
@@ -171,8 +167,7 @@ TEST_BEGIN(test_bitmap_size)
TEST_END
static void
-test_bitmap_init_body(const bitmap_info_t *binfo, size_t nbits)
-{
+test_bitmap_init_body(const bitmap_info_t *binfo, size_t nbits) {
size_t i;
bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
@@ -185,8 +180,7 @@ test_bitmap_init_body(const bitmap_info_t *binfo, size_t nbits)
free(bitmap);
}
-TEST_BEGIN(test_bitmap_init)
-{
+TEST_BEGIN(test_bitmap_init) {
size_t nbits;
for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
@@ -204,21 +198,20 @@ TEST_BEGIN(test_bitmap_init)
TEST_END
static void
-test_bitmap_set_body(const bitmap_info_t *binfo, size_t nbits)
-{
+test_bitmap_set_body(const bitmap_info_t *binfo, size_t nbits) {
size_t i;
bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
bitmap_init(bitmap, binfo);
- for (i = 0; i < nbits; i++)
+ for (i = 0; i < nbits; i++) {
bitmap_set(bitmap, binfo, i);
+ }
assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
free(bitmap);
}
-TEST_BEGIN(test_bitmap_set)
-{
+TEST_BEGIN(test_bitmap_set) {
size_t nbits;
for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
@@ -236,26 +229,27 @@ TEST_BEGIN(test_bitmap_set)
TEST_END
static void
-test_bitmap_unset_body(const bitmap_info_t *binfo, size_t nbits)
-{
+test_bitmap_unset_body(const bitmap_info_t *binfo, size_t nbits) {
size_t i;
bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
bitmap_init(bitmap, binfo);
- for (i = 0; i < nbits; i++)
+ for (i = 0; i < nbits; i++) {
bitmap_set(bitmap, binfo, i);
+ }
assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
- for (i = 0; i < nbits; i++)
+ for (i = 0; i < nbits; i++) {
bitmap_unset(bitmap, binfo, i);
- for (i = 0; i < nbits; i++)
+ }
+ for (i = 0; i < nbits; i++) {
bitmap_set(bitmap, binfo, i);
+ }
assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
free(bitmap);
}
-TEST_BEGIN(test_bitmap_unset)
-{
+TEST_BEGIN(test_bitmap_unset) {
size_t nbits;
for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
@@ -273,8 +267,7 @@ TEST_BEGIN(test_bitmap_unset)
TEST_END
static void
-test_bitmap_sfu_body(const bitmap_info_t *binfo, size_t nbits)
-{
+test_bitmap_sfu_body(const bitmap_info_t *binfo, size_t nbits) {
size_t i;
bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
@@ -317,8 +310,7 @@ test_bitmap_sfu_body(const bitmap_info_t *binfo, size_t nbits)
free(bitmap);
}
-TEST_BEGIN(test_bitmap_sfu)
-{
+TEST_BEGIN(test_bitmap_sfu) {
size_t nbits;
for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
@@ -336,8 +328,7 @@ TEST_BEGIN(test_bitmap_sfu)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_bitmap_initializer,
test_bitmap_size,
diff --git a/test/unit/ckh.c b/test/unit/ckh.c
index 1f57668..0638cb3 100644
--- a/test/unit/ckh.c
+++ b/test/unit/ckh.c
@@ -1,7 +1,6 @@
#include "test/jemalloc_test.h"
-TEST_BEGIN(test_new_delete)
-{
+TEST_BEGIN(test_new_delete) {
tsd_t *tsd;
ckh_t ckh;
@@ -17,8 +16,7 @@ TEST_BEGIN(test_new_delete)
}
TEST_END
-TEST_BEGIN(test_count_insert_search_remove)
-{
+TEST_BEGIN(test_count_insert_search_remove) {
tsd_t *tsd;
ckh_t ckh;
const char *strs[] = {
@@ -105,8 +103,7 @@ TEST_BEGIN(test_count_insert_search_remove)
}
TEST_END
-TEST_BEGIN(test_insert_iter_remove)
-{
+TEST_BEGIN(test_insert_iter_remove) {
#define NITEMS ZU(1000)
tsd_t *tsd;
ckh_t ckh;
@@ -174,10 +171,12 @@ TEST_BEGIN(test_insert_iter_remove)
}
}
- for (j = 0; j < i + 1; j++)
+ for (j = 0; j < i + 1; j++) {
assert_true(seen[j], "Item %zu not seen", j);
- for (; j < NITEMS; j++)
+ }
+ for (; j < NITEMS; j++) {
assert_false(seen[j], "Item %zu seen", j);
+ }
}
}
@@ -204,8 +203,7 @@ TEST_BEGIN(test_insert_iter_remove)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_new_delete,
test_count_insert_search_remove,
diff --git a/test/unit/decay.c b/test/unit/decay.c
index b3b1dd9..d6334cd 100644
--- a/test/unit/decay.c
+++ b/test/unit/decay.c
@@ -10,22 +10,20 @@ static nstime_t time_mock;
static bool monotonic_mock;
static bool
-nstime_monotonic_mock(void)
-{
+nstime_monotonic_mock(void) {
return (monotonic_mock);
}
static bool
-nstime_update_mock(nstime_t *time)
-{
+nstime_update_mock(nstime_t *time) {
nupdates_mock++;
- if (monotonic_mock)
+ if (monotonic_mock) {
nstime_copy(time, &time_mock);
+ }
return (!monotonic_mock);
}
-TEST_BEGIN(test_decay_ticks)
-{
+TEST_BEGIN(test_decay_ticks) {
ticker_t *decay_ticker;
unsigned tick0, tick1;
size_t sz, large0;
@@ -197,8 +195,7 @@ TEST_BEGIN(test_decay_ticks)
}
TEST_END
-TEST_BEGIN(test_decay_ticker)
-{
+TEST_BEGIN(test_decay_ticker) {
#define NPS 1024
int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
void *ps[NPS];
@@ -284,14 +281,14 @@ TEST_BEGIN(test_decay_ticker)
nstime_update(&time);
} while (nstime_compare(&time, &deadline) <= 0 && npurge1 == npurge0);
- if (config_stats)
+ if (config_stats) {
assert_u64_gt(npurge1, npurge0, "Expected purging to occur");
+ }
#undef NPS
}
TEST_END
-TEST_BEGIN(test_decay_nonmonotonic)
-{
+TEST_BEGIN(test_decay_nonmonotonic) {
#define NPS (SMOOTHSTEP_NSTEPS + 1)
int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
void *ps[NPS];
@@ -343,8 +340,9 @@ TEST_BEGIN(test_decay_nonmonotonic)
assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge1, &sz,
NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result");
- if (config_stats)
+ if (config_stats) {
assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred");
+ }
nstime_monotonic = nstime_monotonic_orig;
nstime_update = nstime_update_orig;
@@ -353,8 +351,7 @@ TEST_BEGIN(test_decay_nonmonotonic)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_decay_ticks,
test_decay_ticker,
diff --git a/test/unit/extent_quantize.c b/test/unit/extent_quantize.c
index a5c1b7a..343d1d8 100644
--- a/test/unit/extent_quantize.c
+++ b/test/unit/extent_quantize.c
@@ -1,7 +1,6 @@
#include "test/jemalloc_test.h"
-TEST_BEGIN(test_small_extent_size)
-{
+TEST_BEGIN(test_small_extent_size) {
unsigned nbins, i;
size_t sz, extent_size;
size_t mib[4];
@@ -35,8 +34,7 @@ TEST_BEGIN(test_small_extent_size)
}
TEST_END
-TEST_BEGIN(test_large_extent_size)
-{
+TEST_BEGIN(test_large_extent_size) {
bool cache_oblivious;
unsigned nlextents, i;
size_t sz, extent_size_prev, ceil_prev;
@@ -100,8 +98,7 @@ TEST_BEGIN(test_large_extent_size)
}
TEST_END
-TEST_BEGIN(test_monotonic)
-{
+TEST_BEGIN(test_monotonic) {
#define SZ_MAX ZU(4 * 1024 * 1024)
unsigned i;
size_t floor_prev, ceil_prev;
@@ -136,8 +133,7 @@ TEST_BEGIN(test_monotonic)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_small_extent_size,
test_large_extent_size,
diff --git a/test/unit/fork.c b/test/unit/fork.c
index 58091c6..4880328 100644
--- a/test/unit/fork.c
+++ b/test/unit/fork.c
@@ -4,8 +4,7 @@
#include <sys/wait.h>
#endif
-TEST_BEGIN(test_fork)
-{
+TEST_BEGIN(test_fork) {
#ifndef _WIN32
void *p;
pid_t pid;
@@ -32,8 +31,9 @@ TEST_BEGIN(test_fork)
/* Parent. */
while (true) {
- if (waitpid(pid, &status, 0) == -1)
+ if (waitpid(pid, &status, 0) == -1) {
test_fail("Unexpected waitpid() failure");
+ }
if (WIFSIGNALED(status)) {
test_fail("Unexpected child termination due to "
"signal %d", WTERMSIG(status));
@@ -56,8 +56,7 @@ TEST_BEGIN(test_fork)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_fork));
}
diff --git a/test/unit/hash.c b/test/unit/hash.c
index ff23777..977d058 100644
--- a/test/unit/hash.c
+++ b/test/unit/hash.c
@@ -36,8 +36,7 @@ typedef enum {
} hash_variant_t;
static int
-hash_variant_bits(hash_variant_t variant)
-{
+hash_variant_bits(hash_variant_t variant) {
switch (variant) {
case hash_variant_x86_32: return (32);
case hash_variant_x86_128: return (128);
@@ -47,8 +46,7 @@ hash_variant_bits(hash_variant_t variant)
}
static const char *
-hash_variant_string(hash_variant_t variant)
-{
+hash_variant_string(hash_variant_t variant) {
switch (variant) {
case hash_variant_x86_32: return ("hash_x86_32");
case hash_variant_x86_128: return ("hash_x86_128");
@@ -59,8 +57,7 @@ hash_variant_string(hash_variant_t variant)
#define KEY_SIZE 256
static void
-hash_variant_verify_key(hash_variant_t variant, uint8_t *key)
-{
+hash_variant_verify_key(hash_variant_t variant, uint8_t *key) {
const int hashbytes = hash_variant_bits(variant) / 8;
const int hashes_size = hashbytes * 256;
VARIABLE_ARRAY(uint8_t, hashes, hashes_size);
@@ -139,39 +136,35 @@ hash_variant_verify_key(hash_variant_t variant, uint8_t *key)
}
static void
-hash_variant_verify(hash_variant_t variant)
-{
+hash_variant_verify(hash_variant_t variant) {
#define MAX_ALIGN 16
uint8_t key[KEY_SIZE + (MAX_ALIGN - 1)];
unsigned i;
- for (i = 0; i < MAX_ALIGN; i++)
+ for (i = 0; i < MAX_ALIGN; i++) {
hash_variant_verify_key(variant, &key[i]);
+ }
#undef MAX_ALIGN
}
#undef KEY_SIZE
-TEST_BEGIN(test_hash_x86_32)
-{
+TEST_BEGIN(test_hash_x86_32) {
hash_variant_verify(hash_variant_x86_32);
}
TEST_END
-TEST_BEGIN(test_hash_x86_128)
-{
+TEST_BEGIN(test_hash_x86_128) {
hash_variant_verify(hash_variant_x86_128);
}
TEST_END
-TEST_BEGIN(test_hash_x64_128)
-{
+TEST_BEGIN(test_hash_x64_128) {
hash_variant_verify(hash_variant_x64_128);
}
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_hash_x86_32,
test_hash_x86_128,
diff --git a/test/unit/junk.c b/test/unit/junk.c
index 5f34d05..02f0726 100644
--- a/test/unit/junk.c
+++ b/test/unit/junk.c
@@ -15,15 +15,13 @@ static void *watch_for_junking;
static bool saw_junking;
static void
-watch_junking(void *p)
-{
+watch_junking(void *p) {
watch_for_junking = p;
saw_junking = false;
}
static void
-arena_dalloc_junk_small_intercept(void *ptr, const arena_bin_info_t *bin_info)
-{
+arena_dalloc_junk_small_intercept(void *ptr, const arena_bin_info_t *bin_info) {
size_t i;
arena_dalloc_junk_small_orig(ptr, bin_info);
@@ -32,13 +30,13 @@ arena_dalloc_junk_small_intercept(void *ptr, const arena_bin_info_t *bin_info)
"Missing junk fill for byte %zu/%zu of deallocated region",
i, bin_info->reg_size);
}
- if (ptr == watch_for_junking)
+ if (ptr == watch_for_junking) {
saw_junking = true;
+ }
}
static void
-large_dalloc_junk_intercept(void *ptr, size_t usize)
-{
+large_dalloc_junk_intercept(void *ptr, size_t usize) {
size_t i;
large_dalloc_junk_orig(ptr, usize);
@@ -47,21 +45,21 @@ large_dalloc_junk_intercept(void *ptr, size_t usize)
"Missing junk fill for byte %zu/%zu of deallocated region",
i, usize);
}
- if (ptr == watch_for_junking)
+ if (ptr == watch_for_junking) {
saw_junking = true;
+ }
}
static void
-large_dalloc_maybe_junk_intercept(void *ptr, size_t usize)
-{
+large_dalloc_maybe_junk_intercept(void *ptr, size_t usize) {
large_dalloc_maybe_junk_orig(ptr, usize);
- if (ptr == watch_for_junking)
+ if (ptr == watch_for_junking) {
saw_junking = true;
+ }
}
static void
-test_junk(size_t sz_min, size_t sz_max)
-{
+test_junk(size_t sz_min, size_t sz_max) {
uint8_t *s;
size_t sz_prev, sz, i;
@@ -126,23 +124,20 @@ test_junk(size_t sz_min, size_t sz_max)
}
}
-TEST_BEGIN(test_junk_small)
-{
+TEST_BEGIN(test_junk_small) {
test_skip_if(!config_fill);
test_junk(1, SMALL_MAXCLASS-1);
}
TEST_END
-TEST_BEGIN(test_junk_large)
-{
+TEST_BEGIN(test_junk_large) {
test_skip_if(!config_fill);
test_junk(SMALL_MAXCLASS+1, (1U << (LG_LARGE_MINCLASS+1)));
}
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_junk_small,
test_junk_large));
diff --git a/test/unit/mallctl.c b/test/unit/mallctl.c
index 5b734e1..a116894 100644
--- a/test/unit/mallctl.c
+++ b/test/unit/mallctl.c
@@ -1,7 +1,6 @@
#include "test/jemalloc_test.h"
-TEST_BEGIN(test_mallctl_errors)
-{
+TEST_BEGIN(test_mallctl_errors) {
uint64_t epoch;
size_t sz;
@@ -28,8 +27,7 @@ TEST_BEGIN(test_mallctl_errors)
}
TEST_END
-TEST_BEGIN(test_mallctlnametomib_errors)
-{
+TEST_BEGIN(test_mallctlnametomib_errors) {
size_t mib[1];
size_t miblen;
@@ -39,8 +37,7 @@ TEST_BEGIN(test_mallctlnametomib_errors)
}
TEST_END
-TEST_BEGIN(test_mallctlbymib_errors)
-{
+TEST_BEGIN(test_mallctlbymib_errors) {
uint64_t epoch;
size_t sz;
size_t mib[1];
@@ -76,8 +73,7 @@ TEST_BEGIN(test_mallctlbymib_errors)
}
TEST_END
-TEST_BEGIN(test_mallctl_read_write)
-{
+TEST_BEGIN(test_mallctl_read_write) {
uint64_t old_epoch, new_epoch;
size_t sz = sizeof(old_epoch);
@@ -104,8 +100,7 @@ TEST_BEGIN(test_mallctl_read_write)
}
TEST_END
-TEST_BEGIN(test_mallctlnametomib_short_mib)
-{
+TEST_BEGIN(test_mallctlnametomib_short_mib) {
size_t mib[4];
size_t miblen;
@@ -119,8 +114,7 @@ TEST_BEGIN(test_mallctlnametomib_short_mib)
}
TEST_END
-TEST_BEGIN(test_mallctl_config)
-{
+TEST_BEGIN(test_mallctl_config) {
#define TEST_MALLCTL_CONFIG(config, t) do { \
t oldval; \
size_t sz = sizeof(oldval); \
@@ -149,8 +143,7 @@ TEST_BEGIN(test_mallctl_config)
}
TEST_END
-TEST_BEGIN(test_mallctl_opt)
-{
+TEST_BEGIN(test_mallctl_opt) {
bool config_always = true;
#define TEST_MALLCTL_OPT(t, opt, config) do { \
@@ -189,8 +182,7 @@ TEST_BEGIN(test_mallctl_opt)
}
TEST_END
-TEST_BEGIN(test_manpage_example)
-{
+TEST_BEGIN(test_manpage_example) {
unsigned nbins, i;
size_t mib[4];
size_t len, miblen;
@@ -214,8 +206,7 @@ TEST_BEGIN(test_manpage_example)
}
TEST_END
-TEST_BEGIN(test_tcache_none)
-{
+TEST_BEGIN(test_tcache_none) {
void *p0, *q, *p1;
test_skip_if(!config_tcache);
@@ -240,8 +231,7 @@ TEST_BEGIN(test_tcache_none)
}
TEST_END
-TEST_BEGIN(test_tcache)
-{
+TEST_BEGIN(test_tcache) {
#define NTCACHES 10
unsigned tis[NTCACHES];
void *ps[NTCACHES];
@@ -312,11 +302,13 @@ TEST_BEGIN(test_tcache)
assert_ptr_eq(qs[i], q0,
"Expected rallocx() to allocate cached region, i=%u", i);
/* Avoid undefined behavior in case of test failure. */
- if (qs[i] == NULL)
+ if (qs[i] == NULL) {
qs[i] = ps[i];
+ }
}
- for (i = 0; i < NTCACHES; i++)
+ for (i = 0; i < NTCACHES; i++) {
dallocx(qs[i], MALLOCX_TCACHE(tis[i]));
+ }
/* Flush some non-empty tcaches. */
for (i = 0; i < NTCACHES/2; i++) {
@@ -334,8 +326,7 @@ TEST_BEGIN(test_tcache)
}
TEST_END
-TEST_BEGIN(test_thread_arena)
-{
+TEST_BEGIN(test_thread_arena) {
unsigned arena_old, arena_new, narenas;
size_t sz = sizeof(unsigned);
@@ -353,8 +344,7 @@ TEST_BEGIN(test_thread_arena)
}
TEST_END
-TEST_BEGIN(test_arena_i_initialized)
-{
+TEST_BEGIN(test_arena_i_initialized) {
unsigned narenas, i;
size_t sz;
size_t mib[3];
@@ -392,8 +382,7 @@ TEST_BEGIN(test_arena_i_initialized)
}
TEST_END
-TEST_BEGIN(test_arena_i_decay_time)
-{
+TEST_BEGIN(test_arena_i_decay_time) {
ssize_t decay_time, orig_decay_time, prev_decay_time;
size_t sz = sizeof(ssize_t);
@@ -423,8 +412,7 @@ TEST_BEGIN(test_arena_i_decay_time)
}
TEST_END
-TEST_BEGIN(test_arena_i_purge)
-{
+TEST_BEGIN(test_arena_i_purge) {
unsigned narenas;
size_t sz = sizeof(unsigned);
size_t mib[3];
@@ -447,8 +435,7 @@ TEST_BEGIN(test_arena_i_purge)
}
TEST_END
-TEST_BEGIN(test_arena_i_decay)
-{
+TEST_BEGIN(test_arena_i_decay) {
unsigned narenas;
size_t sz = sizeof(unsigned);
size_t mib[3];
@@ -471,8 +458,7 @@ TEST_BEGIN(test_arena_i_decay)
}
TEST_END
-TEST_BEGIN(test_arena_i_dss)
-{
+TEST_BEGIN(test_arena_i_dss) {
const char *dss_prec_old, *dss_prec_new;
size_t sz = sizeof(dss_prec_old);
size_t mib[3];
@@ -517,8 +503,7 @@ TEST_BEGIN(test_arena_i_dss)
}
TEST_END
-TEST_BEGIN(test_arenas_decay_time)
-{
+TEST_BEGIN(test_arenas_decay_time) {
ssize_t decay_time, orig_decay_time, prev_decay_time;
size_t sz = sizeof(ssize_t);
@@ -548,8 +533,7 @@ TEST_BEGIN(test_arenas_decay_time)
}
TEST_END
-TEST_BEGIN(test_arenas_constants)
-{
+TEST_BEGIN(test_arenas_constants) {
#define TEST_ARENAS_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
@@ -567,8 +551,7 @@ TEST_BEGIN(test_arenas_constants)
}
TEST_END
-TEST_BEGIN(test_arenas_bin_constants)
-{
+TEST_BEGIN(test_arenas_bin_constants) {
#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
@@ -586,8 +569,7 @@ TEST_BEGIN(test_arenas_bin_constants)
}
TEST_END
-TEST_BEGIN(test_arenas_lextent_constants)
-{
+TEST_BEGIN(test_arenas_lextent_constants) {
#define TEST_ARENAS_LEXTENT_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
@@ -602,8 +584,7 @@ TEST_BEGIN(test_arenas_lextent_constants)
}
TEST_END
-TEST_BEGIN(test_arenas_create)
-{
+TEST_BEGIN(test_arenas_create) {
unsigned narenas_before, arena, narenas_after;
size_t sz = sizeof(unsigned);
@@ -620,8 +601,7 @@ TEST_BEGIN(test_arenas_create)
}
TEST_END
-TEST_BEGIN(test_stats_arenas)
-{
+TEST_BEGIN(test_stats_arenas) {
#define TEST_STATS_ARENAS(t, name) do { \
t name; \
size_t sz = sizeof(t); \
@@ -640,8 +620,7 @@ TEST_BEGIN(test_stats_arenas)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_mallctl_errors,
test_mallctlnametomib_errors,
diff --git a/test/unit/math.c b/test/unit/math.c
index 8e5ec61..15fc7d5 100644
--- a/test/unit/math.c
+++ b/test/unit/math.c
@@ -14,30 +14,29 @@
#endif
static bool
-double_eq_rel(double a, double b, double max_rel_err, double max_abs_err)
-{
+double_eq_rel(double a, double b, double max_rel_err, double max_abs_err) {
double rel_err;
- if (fabs(a - b) < max_abs_err)
+ if (fabs(a - b) < max_abs_err) {
return (true);
+ }
rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a);
return (rel_err < max_rel_err);
}
static uint64_t
-factorial(unsigned x)
-{
+factorial(unsigned x) {
uint64_t ret = 1;
unsigned i;
- for (i = 2; i <= x; i++)
+ for (i = 2; i <= x; i++) {
ret *= (uint64_t)i;
+ }
return (ret);
}
-TEST_BEGIN(test_ln_gamma_factorial)
-{
+TEST_BEGIN(test_ln_gamma_factorial) {
unsigned x;
/* exp(ln_gamma(x)) == (x-1)! for integer x. */
@@ -188,8 +187,7 @@ static const double ln_gamma_misc_expected[] = {
359.13420536957539753
};
-TEST_BEGIN(test_ln_gamma_misc)
-{
+TEST_BEGIN(test_ln_gamma_misc) {
unsigned i;
for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) {
@@ -239,8 +237,7 @@ static const double pt_norm_expected[] = {
1.88079360815125041, 2.05374891063182208, 2.32634787404084076
};
-TEST_BEGIN(test_pt_norm)
-{
+TEST_BEGIN(test_pt_norm) {
unsigned i;
for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) {
@@ -289,8 +286,7 @@ static const double pt_chi2_expected[] = {
1046.4872561869577, 1063.5717461999654, 1107.0741966053859
};
-TEST_BEGIN(test_pt_chi2)
-{
+TEST_BEGIN(test_pt_chi2) {
unsigned i, j;
unsigned e = 0;
@@ -351,8 +347,7 @@ static const double pt_gamma_expected[] = {
4.7230515633946677, 5.6417477865306020, 8.4059469148854635
};
-TEST_BEGIN(test_pt_gamma_shape)
-{
+TEST_BEGIN(test_pt_gamma_shape) {
unsigned i, j;
unsigned e = 0;
@@ -371,8 +366,7 @@ TEST_BEGIN(test_pt_gamma_shape)
}
TEST_END
-TEST_BEGIN(test_pt_gamma_scale)
-{
+TEST_BEGIN(test_pt_gamma_scale) {
double shape = 1.0;
double ln_gamma_shape = ln_gamma(shape);
@@ -385,8 +379,7 @@ TEST_BEGIN(test_pt_gamma_scale)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_ln_gamma_factorial,
test_ln_gamma_misc,
diff --git a/test/unit/mq.c b/test/unit/mq.c
index bd289c5..95c9c50 100644
--- a/test/unit/mq.c
+++ b/test/unit/mq.c
@@ -9,8 +9,7 @@ struct mq_msg_s {
};
mq_gen(static, mq_, mq_t, mq_msg_t, link)
-TEST_BEGIN(test_mq_basic)
-{
+TEST_BEGIN(test_mq_basic) {
mq_t mq;
mq_msg_t msg;
@@ -31,8 +30,7 @@ TEST_BEGIN(test_mq_basic)
TEST_END
static void *
-thd_receiver_start(void *arg)
-{
+thd_receiver_start(void *arg) {
mq_t *mq = (mq_t *)arg;
unsigned i;
@@ -45,8 +43,7 @@ thd_receiver_start(void *arg)
}
static void *
-thd_sender_start(void *arg)
-{
+thd_sender_start(void *arg) {
mq_t *mq = (mq_t *)arg;
unsigned i;
@@ -61,8 +58,7 @@ thd_sender_start(void *arg)
return (NULL);
}
-TEST_BEGIN(test_mq_threaded)
-{
+TEST_BEGIN(test_mq_threaded) {
mq_t mq;
thd_t receiver;
thd_t senders[NSENDERS];
@@ -71,20 +67,21 @@ TEST_BEGIN(test_mq_threaded)
assert_false(mq_init(&mq), "Unexpected mq_init() failure");
thd_create(&receiver, thd_receiver_start, (void *)&mq);
- for (i = 0; i < NSENDERS; i++)
+ for (i = 0; i < NSENDERS; i++) {
thd_create(&senders[i], thd_sender_start, (void *)&mq);
+ }
thd_join(receiver, NULL);
- for (i = 0; i < NSENDERS; i++)
+ for (i = 0; i < NSENDERS; i++) {
thd_join(senders[i], NULL);
+ }
mq_fini(&mq);
}
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_mq_basic,
test_mq_threaded));
diff --git a/test/unit/mtx.c b/test/unit/mtx.c
index 2eccc98..0813a69 100644
--- a/test/unit/mtx.c
+++ b/test/unit/mtx.c
@@ -3,8 +3,7 @@
#define NTHREADS 2
#define NINCRS 2000000
-TEST_BEGIN(test_mtx_basic)
-{
+TEST_BEGIN(test_mtx_basic) {
mtx_t mtx;
assert_false(mtx_init(&mtx), "Unexpected mtx_init() failure");
@@ -20,8 +19,7 @@ typedef struct {
} thd_start_arg_t;
static void *
-thd_start(void *varg)
-{
+thd_start(void *varg) {
thd_start_arg_t *arg = (thd_start_arg_t *)varg;
unsigned i;
@@ -33,26 +31,26 @@ thd_start(void *varg)
return (NULL);
}
-TEST_BEGIN(test_mtx_race)
-{
+TEST_BEGIN(test_mtx_race) {
thd_start_arg_t arg;
thd_t thds[NTHREADS];
unsigned i;
assert_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure");
arg.x = 0;
- for (i = 0; i < NTHREADS; i++)
+ for (i = 0; i < NTHREADS; i++) {
thd_create(&thds[i], thd_start, (void *)&arg);
- for (i = 0; i < NTHREADS; i++)
+ }
+ for (i = 0; i < NTHREADS; i++) {
thd_join(thds[i], NULL);
+ }
assert_u_eq(arg.x, NTHREADS * NINCRS,
"Race-related counter corruption");
}
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_mtx_basic,
test_mtx_race));
diff --git a/test/unit/nstime.c b/test/unit/nstime.c
index 6548ba2..f628a8f 100644
--- a/test/unit/nstime.c
+++ b/test/unit/nstime.c
@@ -2,8 +2,7 @@
#define BILLION UINT64_C(1000000000)
-TEST_BEGIN(test_nstime_init)
-{
+TEST_BEGIN(test_nstime_init) {
nstime_t nst;
nstime_init(&nst, 42000000043);
@@ -13,8 +12,7 @@ TEST_BEGIN(test_nstime_init)
}
TEST_END
-TEST_BEGIN(test_nstime_init2)
-{
+TEST_BEGIN(test_nstime_init2) {
nstime_t nst;
nstime_init2(&nst, 42, 43);
@@ -23,8 +21,7 @@ TEST_BEGIN(test_nstime_init2)
}
TEST_END
-TEST_BEGIN(test_nstime_copy)
-{
+TEST_BEGIN(test_nstime_copy) {
nstime_t nsta, nstb;
nstime_init2(&nsta, 42, 43);
@@ -35,8 +32,7 @@ TEST_BEGIN(test_nstime_copy)
}
TEST_END
-TEST_BEGIN(test_nstime_compare)
-{
+TEST_BEGIN(test_nstime_compare) {
nstime_t nsta, nstb;
nstime_init2(&nsta, 42, 43);
@@ -70,8 +66,7 @@ TEST_BEGIN(test_nstime_compare)
}
TEST_END
-TEST_BEGIN(test_nstime_add)
-{
+TEST_BEGIN(test_nstime_add) {
nstime_t nsta, nstb;
nstime_init2(&nsta, 42, 43);
@@ -90,8 +85,7 @@ TEST_BEGIN(test_nstime_add)
}
TEST_END
-TEST_BEGIN(test_nstime_subtract)
-{
+TEST_BEGIN(test_nstime_subtract) {
nstime_t nsta, nstb;
nstime_init2(&nsta, 42, 43);
@@ -110,8 +104,7 @@ TEST_BEGIN(test_nstime_subtract)
}
TEST_END
-TEST_BEGIN(test_nstime_imultiply)
-{
+TEST_BEGIN(test_nstime_imultiply) {
nstime_t nsta, nstb;
nstime_init2(&nsta, 42, 43);
@@ -128,8 +121,7 @@ TEST_BEGIN(test_nstime_imultiply)
}
TEST_END
-TEST_BEGIN(test_nstime_idivide)
-{
+TEST_BEGIN(test_nstime_idivide) {
nstime_t nsta, nstb;
nstime_init2(&nsta, 42, 43);
@@ -148,8 +140,7 @@ TEST_BEGIN(test_nstime_idivide)
}
TEST_END
-TEST_BEGIN(test_nstime_divide)
-{
+TEST_BEGIN(test_nstime_divide) {
nstime_t nsta, nstb, nstc;
nstime_init2(&nsta, 42, 43);
@@ -176,14 +167,12 @@ TEST_BEGIN(test_nstime_divide)
}
TEST_END
-TEST_BEGIN(test_nstime_monotonic)
-{
+TEST_BEGIN(test_nstime_monotonic) {
nstime_monotonic();
}
TEST_END
-TEST_BEGIN(test_nstime_update)
-{
+TEST_BEGIN(test_nstime_update) {
nstime_t nst;
nstime_init(&nst, 0);
@@ -208,8 +197,7 @@ TEST_BEGIN(test_nstime_update)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_nstime_init,
test_nstime_init2,
diff --git a/test/unit/pack.c b/test/unit/pack.c
index 316b6df..9237ba2 100644
--- a/test/unit/pack.c
+++ b/test/unit/pack.c
@@ -20,8 +20,7 @@ const char *malloc_conf = "decay_time:-1";
#define NSLABS 8
static unsigned
-binind_compute(void)
-{
+binind_compute(void) {
size_t sz;
unsigned nbins, i;
@@ -41,8 +40,9 @@ binind_compute(void)
sz = sizeof(size);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL,
0), 0, "Unexpected mallctlbymib failure");
- if (size == SZ)
+ if (size == SZ) {
return (i);
+ }
}
test_fail("Unable to compute nregs_per_run");
@@ -50,8 +50,7 @@ binind_compute(void)
}
static size_t
-nregs_per_run_compute(void)
-{
+nregs_per_run_compute(void) {
uint32_t nregs;
size_t sz;
unsigned binind = binind_compute();
@@ -68,8 +67,7 @@ nregs_per_run_compute(void)
}
static unsigned
-arenas_create_mallctl(void)
-{
+arenas_create_mallctl(void) {
unsigned arena_ind;
size_t sz;
@@ -81,8 +79,7 @@ arenas_create_mallctl(void)
}
static void
-arena_reset_mallctl(unsigned arena_ind)
-{
+arena_reset_mallctl(unsigned arena_ind) {
size_t mib[3];
size_t miblen = sizeof(mib)/sizeof(size_t);
@@ -93,8 +90,7 @@ arena_reset_mallctl(unsigned arena_ind)
"Unexpected mallctlbymib() failure");
}
-TEST_BEGIN(test_pack)
-{
+TEST_BEGIN(test_pack) {
unsigned arena_ind = arenas_create_mallctl();
size_t nregs_per_run = nregs_per_run_compute();
size_t nregs = nregs_per_run * NSLABS;
@@ -125,8 +121,9 @@ TEST_BEGIN(test_pack)
i++, offset = (offset + 1) % nregs_per_run) {
for (j = 0; j < nregs_per_run; j++) {
void *p = ptrs[(i * nregs_per_run) + j];
- if (offset == j)
+ if (offset == j) {
continue;
+ }
dallocx(p, MALLOCX_ARENA(arena_ind) |
MALLOCX_TCACHE_NONE);
}
@@ -143,8 +140,9 @@ TEST_BEGIN(test_pack)
for (j = 0; j < nregs_per_run; j++) {
void *p;
- if (offset == j)
+ if (offset == j) {
continue;
+ }
p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
MALLOCX_TCACHE_NONE);
assert_ptr_eq(p, ptrs[(i * nregs_per_run) + j],
@@ -159,8 +157,7 @@ TEST_BEGIN(test_pack)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_pack));
}
diff --git a/test/unit/pages.c b/test/unit/pages.c
index 1e6add9..b6092de 100644
--- a/test/unit/pages.c
+++ b/test/unit/pages.c
@@ -1,7 +1,6 @@
#include "test/jemalloc_test.h"
-TEST_BEGIN(test_pages_huge)
-{
+TEST_BEGIN(test_pages_huge) {
size_t alloc_size;
bool commit;
void *pages, *hugepage;
@@ -22,8 +21,7 @@ TEST_BEGIN(test_pages_huge)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_pages_huge));
}
diff --git a/test/unit/ph.c b/test/unit/ph.c
index 10bf99e..e49a0e7 100644
--- a/test/unit/ph.c
+++ b/test/unit/ph.c
@@ -10,8 +10,7 @@ struct node_s {
};
static int
-node_cmp(const node_t *a, const node_t *b)
-{
+node_cmp(const node_t *a, const node_t *b) {
int ret;
ret = (a->key > b->key) - (a->key < b->key);
@@ -39,18 +38,19 @@ typedef ph(node_t) heap_t;
ph_gen(static, heap_, heap_t, node_t, link, node_cmp_magic);
static void
-node_print(const node_t *node, unsigned depth)
-{
+node_print(const node_t *node, unsigned depth) {
unsigned i;
node_t *leftmost_child, *sibling;
- for (i = 0; i < depth; i++)
+ for (i = 0; i < depth; i++) {
malloc_printf("\t");
+ }
malloc_printf("%2"FMTu64"\n", node->key);
leftmost_child = phn_lchild_get(node_t, link, node);
- if (leftmost_child == NULL)
+ if (leftmost_child == NULL) {
return;
+ }
node_print(leftmost_child, depth + 1);
for (sibling = phn_next_get(node_t, link, leftmost_child); sibling !=
@@ -60,13 +60,13 @@ node_print(const node_t *node, unsigned depth)
}
static void
-heap_print(const heap_t *heap)
-{
+heap_print(const heap_t *heap) {
node_t *auxelm;
malloc_printf("vvv heap %p vvv\n", heap);
- if (heap->ph_root == NULL)
+ if (heap->ph_root == NULL) {
goto label_return;
+ }
node_print(heap->ph_root, 0);
@@ -83,8 +83,7 @@ label_return:
}
static unsigned
-node_validate(const node_t *node, const node_t *parent)
-{
+node_validate(const node_t *node, const node_t *parent) {
unsigned nnodes = 1;
node_t *leftmost_child, *sibling;
@@ -94,8 +93,9 @@ node_validate(const node_t *node, const node_t *parent)
}
leftmost_child = phn_lchild_get(node_t, link, node);
- if (leftmost_child == NULL)
+ if (leftmost_child == NULL) {
return (nnodes);
+ }
assert_ptr_eq((void *)phn_prev_get(node_t, link, leftmost_child),
(void *)node, "Leftmost child does not link to node");
nnodes += node_validate(leftmost_child, node);
@@ -111,13 +111,13 @@ node_validate(const node_t *node, const node_t *parent)
}
static unsigned
-heap_validate(const heap_t *heap)
-{
+heap_validate(const heap_t *heap) {
unsigned nnodes = 0;
node_t *auxelm;
- if (heap->ph_root == NULL)
+ if (heap->ph_root == NULL) {
goto label_return;
+ }
nnodes += node_validate(heap->ph_root, NULL);
@@ -130,13 +130,13 @@ heap_validate(const heap_t *heap)
}
label_return:
- if (false)
+ if (false) {
heap_print(heap);
+ }
return (nnodes);
}
-TEST_BEGIN(test_ph_empty)
-{
+TEST_BEGIN(test_ph_empty) {
heap_t heap;
heap_new(&heap);
@@ -146,23 +146,20 @@ TEST_BEGIN(test_ph_empty)
TEST_END
static void
-node_remove(heap_t *heap, node_t *node)
-{
+node_remove(heap_t *heap, node_t *node) {
heap_remove(heap, node);
node->magic = 0;
}
static node_t *
-node_remove_first(heap_t *heap)
-{
+node_remove_first(heap_t *heap) {
node_t *node = heap_remove_first(heap);
node->magic = 0;
return (node);
}
-TEST_BEGIN(test_ph_random)
-{
+TEST_BEGIN(test_ph_random) {
#define NNODES 25
#define NBAGS 250
#define SEED 42
@@ -177,17 +174,20 @@ TEST_BEGIN(test_ph_random)
switch (i) {
case 0:
/* Insert in order. */
- for (j = 0; j < NNODES; j++)
+ for (j = 0; j < NNODES; j++) {
bag[j] = j;
+ }
break;
case 1:
/* Insert in reverse order. */
- for (j = 0; j < NNODES; j++)
+ for (j = 0; j < NNODES; j++) {
bag[j] = NNODES - j - 1;
+ }
break;
default:
- for (j = 0; j < NNODES; j++)
+ for (j = 0; j < NNODES; j++) {
bag[j] = gen_rand64_range(sfmt, NNODES);
+ }
}
for (j = 1; j <= NNODES; j++) {
@@ -280,8 +280,7 @@ TEST_BEGIN(test_ph_random)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_ph_empty,
test_ph_random));
diff --git a/test/unit/prng.c b/test/unit/prng.c
index f32d82a..b26da36 100644
--- a/test/unit/prng.c
+++ b/test/unit/prng.c
@@ -1,8 +1,7 @@
#include "test/jemalloc_test.h"
static void
-test_prng_lg_range_u32(bool atomic)
-{
+test_prng_lg_range_u32(bool atomic) {
uint32_t sa, sb, ra, rb;
unsigned lg_range;
@@ -38,8 +37,7 @@ test_prng_lg_range_u32(bool atomic)
}
static void
-test_prng_lg_range_u64(void)
-{
+test_prng_lg_range_u64(void) {
uint64_t sa, sb, ra, rb;
unsigned lg_range;
@@ -75,8 +73,7 @@ test_prng_lg_range_u64(void)
}
static void
-test_prng_lg_range_zu(bool atomic)
-{
+test_prng_lg_range_zu(bool atomic) {
size_t sa, sb, ra, rb;
unsigned lg_range;
@@ -112,39 +109,33 @@ test_prng_lg_range_zu(bool atomic)
}
}
-TEST_BEGIN(test_prng_lg_range_u32_nonatomic)
-{
+TEST_BEGIN(test_prng_lg_range_u32_nonatomic) {
test_prng_lg_range_u32(false);
}
TEST_END
-TEST_BEGIN(test_prng_lg_range_u32_atomic)
-{
+TEST_BEGIN(test_prng_lg_range_u32_atomic) {
test_prng_lg_range_u32(true);
}
TEST_END
-TEST_BEGIN(test_prng_lg_range_u64_nonatomic)
-{
+TEST_BEGIN(test_prng_lg_range_u64_nonatomic) {
test_prng_lg_range_u64();
}
TEST_END
-TEST_BEGIN(test_prng_lg_range_zu_nonatomic)
-{
+TEST_BEGIN(test_prng_lg_range_zu_nonatomic) {
test_prng_lg_range_zu(false);
}
TEST_END
-TEST_BEGIN(test_prng_lg_range_zu_atomic)
-{
+TEST_BEGIN(test_prng_lg_range_zu_atomic) {
test_prng_lg_range_zu(true);
}
TEST_END
static void
-test_prng_range_u32(bool atomic)
-{
+test_prng_range_u32(bool atomic) {
uint32_t range;
#define MAX_RANGE 10000000
#define RANGE_STEP 97
@@ -164,8 +155,7 @@ test_prng_range_u32(bool atomic)
}
static void
-test_prng_range_u64(void)
-{
+test_prng_range_u64(void) {
uint64_t range;
#define MAX_RANGE 10000000
#define RANGE_STEP 97
@@ -185,8 +175,7 @@ test_prng_range_u64(void)
}
static void
-test_prng_range_zu(bool atomic)
-{
+test_prng_range_zu(bool atomic) {
size_t range;
#define MAX_RANGE 10000000
#define RANGE_STEP 97
@@ -205,39 +194,33 @@ test_prng_range_zu(bool atomic)
}
}
-TEST_BEGIN(test_prng_range_u32_nonatomic)
-{
+TEST_BEGIN(test_prng_range_u32_nonatomic) {
test_prng_range_u32(false);
}
TEST_END
-TEST_BEGIN(test_prng_range_u32_atomic)
-{
+TEST_BEGIN(test_prng_range_u32_atomic) {
test_prng_range_u32(true);
}
TEST_END
-TEST_BEGIN(test_prng_range_u64_nonatomic)
-{
+TEST_BEGIN(test_prng_range_u64_nonatomic) {
test_prng_range_u64();
}
TEST_END
-TEST_BEGIN(test_prng_range_zu_nonatomic)
-{
+TEST_BEGIN(test_prng_range_zu_nonatomic) {
test_prng_range_zu(false);
}
TEST_END
-TEST_BEGIN(test_prng_range_zu_atomic)
-{
+TEST_BEGIN(test_prng_range_zu_atomic) {
test_prng_range_zu(true);
}
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_prng_lg_range_u32_nonatomic,
test_prng_lg_range_u32_atomic,
diff --git a/test/unit/prof_accum.c b/test/unit/prof_accum.c
index 41ebeea..bed0c9a 100644
--- a/test/unit/prof_accum.c
+++ b/test/unit/prof_accum.c
@@ -11,8 +11,7 @@ const char *malloc_conf =
#endif
static int
-prof_dump_open_intercept(bool propagate_err, const char *filename)
-{
+prof_dump_open_intercept(bool propagate_err, const char *filename) {
int fd;
fd = open("/dev/null", O_WRONLY);
@@ -22,14 +21,12 @@ prof_dump_open_intercept(bool propagate_err, const char *filename)
}
static void *
-alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration)
-{
+alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration) {
return (btalloc(1, thd_ind*NALLOCS_PER_THREAD + iteration));
}
static void *
-thd_start(void *varg)
-{
+thd_start(void *varg) {
unsigned thd_ind = *(unsigned *)varg;
size_t bt_count_prev, bt_count;
unsigned i_prev, i;
@@ -57,8 +54,7 @@ thd_start(void *varg)
return (NULL);
}
-TEST_BEGIN(test_idump)
-{
+TEST_BEGIN(test_idump) {
bool active;
thd_t thds[NTHREADS];
unsigned thd_args[NTHREADS];
@@ -77,14 +73,14 @@ TEST_BEGIN(test_idump)
thd_args[i] = i;
thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
}
- for (i = 0; i < NTHREADS; i++)
+ for (i = 0; i < NTHREADS; i++) {
thd_join(thds[i], NULL);
+ }
}
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_idump));
}
diff --git a/test/unit/prof_active.c b/test/unit/prof_active.c
index d3b341d..422024f 100644
--- a/test/unit/prof_active.c
+++ b/test/unit/prof_active.c
@@ -6,8 +6,7 @@ const char *malloc_conf =
#endif
static void
-mallctl_bool_get(const char *name, bool expected, const char *func, int line)
-{
+mallctl_bool_get(const char *name, bool expected, const char *func, int line) {
bool old;
size_t sz;
@@ -20,8 +19,7 @@ mallctl_bool_get(const char *name, bool expected, const char *func, int line)
static void
mallctl_bool_set(const char *name, bool old_expected, bool val_new,
- const char *func, int line)
-{
+ const char *func, int line) {
bool old;
size_t sz;
@@ -36,8 +34,7 @@ mallctl_bool_set(const char *name, bool old_expected, bool val_new,
static void
mallctl_prof_active_get_impl(bool prof_active_old_expected, const char *func,
- int line)
-{
+ int line) {
mallctl_bool_get("prof.active", prof_active_old_expected, func, line);
}
#define mallctl_prof_active_get(a) \
@@ -45,8 +42,7 @@ mallctl_prof_active_get_impl(bool prof_active_old_expected, const char *func,
static void
mallctl_prof_active_set_impl(bool prof_active_old_expected,
- bool prof_active_new, const char *func, int line)
-{
+ bool prof_active_new, const char *func, int line) {
mallctl_bool_set("prof.active", prof_active_old_expected,
prof_active_new, func, line);
}
@@ -55,8 +51,7 @@ mallctl_prof_active_set_impl(bool prof_active_old_expected,
static void
mallctl_thread_prof_active_get_impl(bool thread_prof_active_old_expected,
- const char *func, int line)
-{
+ const char *func, int line) {
mallctl_bool_get("thread.prof.active", thread_prof_active_old_expected,
func, line);
}
@@ -65,8 +60,7 @@ mallctl_thread_prof_active_get_impl(bool thread_prof_active_old_expected,
static void
mallctl_thread_prof_active_set_impl(bool thread_prof_active_old_expected,
- bool thread_prof_active_new, const char *func, int line)
-{
+ bool thread_prof_active_new, const char *func, int line) {
mallctl_bool_set("thread.prof.active", thread_prof_active_old_expected,
thread_prof_active_new, func, line);
}
@@ -74,8 +68,7 @@ mallctl_thread_prof_active_set_impl(bool thread_prof_active_old_expected,
mallctl_thread_prof_active_set_impl(a, b, __func__, __LINE__)
static void
-prof_sampling_probe_impl(bool expect_sample, const char *func, int line)
-{
+prof_sampling_probe_impl(bool expect_sample, const char *func, int line) {
void *p;
size_t expected_backtraces = expect_sample ? 1 : 0;
@@ -90,8 +83,7 @@ prof_sampling_probe_impl(bool expect_sample, const char *func, int line)
#define prof_sampling_probe(a) \
prof_sampling_probe_impl(a, __func__, __LINE__)
-TEST_BEGIN(test_prof_active)
-{
+TEST_BEGIN(test_prof_active) {
test_skip_if(!config_prof);
mallctl_prof_active_get(true);
@@ -124,8 +116,7 @@ TEST_BEGIN(test_prof_active)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_prof_active));
}
diff --git a/test/unit/prof_gdump.c b/test/unit/prof_gdump.c
index 53f7cad..0d8ec71 100644
--- a/test/unit/prof_gdump.c
+++ b/test/unit/prof_gdump.c
@@ -7,8 +7,7 @@ const char *malloc_conf = "prof:true,prof_active:false,prof_gdump:true";
static bool did_prof_dump_open;
static int
-prof_dump_open_intercept(bool propagate_err, const char *filename)
-{
+prof_dump_open_intercept(bool propagate_err, const char *filename) {
int fd;
did_prof_dump_open = true;
@@ -19,8 +18,7 @@ prof_dump_open_intercept(bool propagate_err, const char *filename)
return (fd);
}
-TEST_BEGIN(test_gdump)
-{
+TEST_BEGIN(test_gdump) {
bool active, gdump, gdump_old;
void *p, *q, *r, *s;
size_t sz;
@@ -74,8 +72,7 @@ TEST_BEGIN(test_gdump)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_gdump));
}
diff --git a/test/unit/prof_idump.c b/test/unit/prof_idump.c
index 43824c6..393211e 100644
--- a/test/unit/prof_idump.c
+++ b/test/unit/prof_idump.c
@@ -16,8 +16,7 @@ const char *malloc_conf = ""
static bool did_prof_dump_open;
static int
-prof_dump_open_intercept(bool propagate_err, const char *filename)
-{
+prof_dump_open_intercept(bool propagate_err, const char *filename) {
int fd;
did_prof_dump_open = true;
@@ -28,8 +27,7 @@ prof_dump_open_intercept(bool propagate_err, const char *filename)
return (fd);
}
-TEST_BEGIN(test_idump)
-{
+TEST_BEGIN(test_idump) {
bool active;
void *p;
@@ -51,8 +49,7 @@ TEST_BEGIN(test_idump)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_idump));
}
diff --git a/test/unit/prof_reset.c b/test/unit/prof_reset.c
index cc13e37..463f689 100644
--- a/test/unit/prof_reset.c
+++ b/test/unit/prof_reset.c
@@ -6,8 +6,7 @@ const char *malloc_conf =
#endif
static int
-prof_dump_open_intercept(bool propagate_err, const char *filename)
-{
+prof_dump_open_intercept(bool propagate_err, const char *filename) {
int fd;
fd = open("/dev/null", O_WRONLY);
@@ -17,15 +16,13 @@ prof_dump_open_intercept(bool propagate_err, const char *filename)
}
static void
-set_prof_active(bool active)
-{
+set_prof_active(bool active) {
assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
sizeof(active)), 0, "Unexpected mallctl failure");
}
static size_t
-get_lg_prof_sample(void)
-{
+get_lg_prof_sample(void) {
size_t lg_prof_sample;
size_t sz = sizeof(size_t);
@@ -36,8 +33,7 @@ get_lg_prof_sample(void)
}
static void
-do_prof_reset(size_t lg_prof_sample)
-{
+do_prof_reset(size_t lg_prof_sample) {
assert_d_eq(mallctl("prof.reset", NULL, NULL,
(void *)&lg_prof_sample, sizeof(size_t)), 0,
"Unexpected mallctl failure while resetting profile data");
@@ -45,8 +41,7 @@ do_prof_reset(size_t lg_prof_sample)
"Expected profile sample rate change");
}
-TEST_BEGIN(test_prof_reset_basic)
-{
+TEST_BEGIN(test_prof_reset_basic) {
size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next;
size_t sz;
unsigned i;
@@ -95,16 +90,14 @@ bool prof_dump_header_intercepted = false;
prof_cnt_t cnt_all_copy = {0, 0, 0, 0};
static bool
prof_dump_header_intercept(tsdn_t *tsdn, bool propagate_err,
- const prof_cnt_t *cnt_all)
-{
+ const prof_cnt_t *cnt_all) {
prof_dump_header_intercepted = true;
memcpy(&cnt_all_copy, cnt_all, sizeof(prof_cnt_t));
return (false);
}
-TEST_BEGIN(test_prof_reset_cleanup)
-{
+TEST_BEGIN(test_prof_reset_cleanup) {
void *p;
prof_dump_header_t *prof_dump_header_orig;
@@ -148,8 +141,7 @@ TEST_END
#define RESET_INTERVAL (1U << 10)
#define DUMP_INTERVAL 3677
static void *
-thd_start(void *varg)
-{
+thd_start(void *varg) {
unsigned thd_ind = *(unsigned *)varg;
unsigned i;
void *objs[OBJ_RING_BUF_COUNT];
@@ -192,8 +184,7 @@ thd_start(void *varg)
return (NULL);
}
-TEST_BEGIN(test_prof_reset)
-{
+TEST_BEGIN(test_prof_reset) {
size_t lg_prof_sample_orig;
thd_t thds[NTHREADS];
unsigned thd_args[NTHREADS];
@@ -216,8 +207,9 @@ TEST_BEGIN(test_prof_reset)
thd_args[i] = i;
thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
}
- for (i = 0; i < NTHREADS; i++)
+ for (i = 0; i < NTHREADS; i++) {
thd_join(thds[i], NULL);
+ }
assert_zu_eq(prof_bt_count(), bt_count,
"Unexpected bactrace count change");
@@ -237,8 +229,7 @@ TEST_END
/* Test sampling at the same allocation site across resets. */
#define NITER 10
-TEST_BEGIN(test_xallocx)
-{
+TEST_BEGIN(test_xallocx) {
size_t lg_prof_sample_orig;
unsigned i;
void *ptrs[NITER];
@@ -288,8 +279,7 @@ TEST_END
#undef NITER
int
-main(void)
-{
+main(void) {
/* Intercept dumping prior to running any tests. */
prof_dump_open = prof_dump_open_intercept;
diff --git a/test/unit/prof_tctx.c b/test/unit/prof_tctx.c
index 8f928eb..2e35b7e 100644
--- a/test/unit/prof_tctx.c
+++ b/test/unit/prof_tctx.c
@@ -4,8 +4,7 @@
const char *malloc_conf = "prof:true,lg_prof_sample:0";
#endif
-TEST_BEGIN(test_prof_realloc)
-{
+TEST_BEGIN(test_prof_realloc) {
tsdn_t *tsdn;
int flags;
void *p, *q;
@@ -50,8 +49,7 @@ TEST_BEGIN(test_prof_realloc)
TEST_END
int
-main(void)
-{
+main(void) {
return test(
test_prof_realloc);
}
diff --git a/test/unit/prof_thread_name.c b/test/unit/prof_thread_name.c
index 8699936..ba86e10 100644
--- a/test/unit/prof_thread_name.c
+++ b/test/unit/prof_thread_name.c
@@ -6,8 +6,7 @@ const char *malloc_conf = "prof:true,prof_active:false";
static void
mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func,
- int line)
-{
+ int line) {
const char *thread_name_old;
size_t sz;
@@ -24,8 +23,7 @@ mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func,
static void
mallctl_thread_name_set_impl(const char *thread_name, const char *func,
- int line)
-{
+ int line) {
assert_d_eq(mallctl("thread.prof.name", NULL, NULL,
(void *)&thread_name, sizeof(thread_name)), 0,
"%s():%d: Unexpected mallctl failure reading thread.prof.name",
@@ -35,8 +33,7 @@ mallctl_thread_name_set_impl(const char *thread_name, const char *func,
#define mallctl_thread_name_set(a) \
mallctl_thread_name_set_impl(a, __func__, __LINE__)
-TEST_BEGIN(test_prof_thread_name_validation)
-{
+TEST_BEGIN(test_prof_thread_name_validation) {
const char *thread_name;
test_skip_if(!config_prof);
@@ -78,8 +75,7 @@ TEST_END
#define NTHREADS 4
#define NRESET 25
static void *
-thd_start(void *varg)
-{
+thd_start(void *varg) {
unsigned thd_ind = *(unsigned *)varg;
char thread_name[16] = "";
unsigned i;
@@ -101,8 +97,7 @@ thd_start(void *varg)
return (NULL);
}
-TEST_BEGIN(test_prof_thread_name_threaded)
-{
+TEST_BEGIN(test_prof_thread_name_threaded) {
thd_t thds[NTHREADS];
unsigned thd_args[NTHREADS];
unsigned i;
@@ -113,16 +108,16 @@ TEST_BEGIN(test_prof_thread_name_threaded)
thd_args[i] = i;
thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
}
- for (i = 0; i < NTHREADS; i++)
+ for (i = 0; i < NTHREADS; i++) {
thd_join(thds[i], NULL);
+ }
}
TEST_END
#undef NTHREADS
#undef NRESET
int
-main(void)
-{
+main(void) {
return (test(
test_prof_thread_name_validation,
test_prof_thread_name_threaded));
diff --git a/test/unit/ql.c b/test/unit/ql.c
index 2ebb450..0bb896c 100644
--- a/test/unit/ql.c
+++ b/test/unit/ql.c
@@ -12,8 +12,7 @@ struct list_s {
};
static void
-test_empty_list(list_head_t *head)
-{
+test_empty_list(list_head_t *head) {
list_t *t;
unsigned i;
@@ -34,8 +33,7 @@ test_empty_list(list_head_t *head)
assert_u_eq(i, 0, "Unexpected element for empty list");
}
-TEST_BEGIN(test_ql_empty)
-{
+TEST_BEGIN(test_ql_empty) {
list_head_t head;
ql_new(&head);
@@ -44,8 +42,7 @@ TEST_BEGIN(test_ql_empty)
TEST_END
static void
-init_entries(list_t *entries, unsigned nentries)
-{
+init_entries(list_t *entries, unsigned nentries) {
unsigned i;
for (i = 0; i < nentries; i++) {
@@ -55,8 +52,7 @@ init_entries(list_t *entries, unsigned nentries)
}
static void
-test_entries_list(list_head_t *head, list_t *entries, unsigned nentries)
-{
+test_entries_list(list_head_t *head, list_t *entries, unsigned nentries) {
list_t *t;
unsigned i;
@@ -91,31 +87,31 @@ test_entries_list(list_head_t *head, list_t *entries, unsigned nentries)
}
}
-TEST_BEGIN(test_ql_tail_insert)
-{
+TEST_BEGIN(test_ql_tail_insert) {
list_head_t head;
list_t entries[NENTRIES];
unsigned i;
ql_new(&head);
init_entries(entries, sizeof(entries)/sizeof(list_t));
- for (i = 0; i < NENTRIES; i++)
+ for (i = 0; i < NENTRIES; i++) {
ql_tail_insert(&head, &entries[i], link);
+ }
test_entries_list(&head, entries, NENTRIES);
}
TEST_END
-TEST_BEGIN(test_ql_tail_remove)
-{
+TEST_BEGIN(test_ql_tail_remove) {
list_head_t head;
list_t entries[NENTRIES];
unsigned i;
ql_new(&head);
init_entries(entries, sizeof(entries)/sizeof(list_t));
- for (i = 0; i < NENTRIES; i++)
+ for (i = 0; i < NENTRIES; i++) {
ql_tail_insert(&head, &entries[i], link);
+ }
for (i = 0; i < NENTRIES; i++) {
test_entries_list(&head, entries, NENTRIES-i);
@@ -125,31 +121,31 @@ TEST_BEGIN(test_ql_tail_remove)
}
TEST_END
-TEST_BEGIN(test_ql_head_insert)
-{
+TEST_BEGIN(test_ql_head_insert) {
list_head_t head;
list_t entries[NENTRIES];
unsigned i;
ql_new(&head);
init_entries(entries, sizeof(entries)/sizeof(list_t));
- for (i = 0; i < NENTRIES; i++)
+ for (i = 0; i < NENTRIES; i++) {
ql_head_insert(&head, &entries[NENTRIES-i-1], link);
+ }
test_entries_list(&head, entries, NENTRIES);
}
TEST_END
-TEST_BEGIN(test_ql_head_remove)
-{
+TEST_BEGIN(test_ql_head_remove) {
list_head_t head;
list_t entries[NENTRIES];
unsigned i;
ql_new(&head);
init_entries(entries, sizeof(entries)/sizeof(list_t));
- for (i = 0; i < NENTRIES; i++)
+ for (i = 0; i < NENTRIES; i++) {
ql_head_insert(&head, &entries[NENTRIES-i-1], link);
+ }
for (i = 0; i < NENTRIES; i++) {
test_entries_list(&head, &entries[i], NENTRIES-i);
@@ -159,8 +155,7 @@ TEST_BEGIN(test_ql_head_remove)
}
TEST_END
-TEST_BEGIN(test_ql_insert)
-{
+TEST_BEGIN(test_ql_insert) {
list_head_t head;
list_t entries[8];
list_t *a, *b, *c, *d, *e, *f, *g, *h;
@@ -196,8 +191,7 @@ TEST_BEGIN(test_ql_insert)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_ql_empty,
test_ql_tail_insert,
diff --git a/test/unit/qr.c b/test/unit/qr.c
index 7c9c102..8061a34 100644
--- a/test/unit/qr.c
+++ b/test/unit/qr.c
@@ -13,8 +13,7 @@ struct ring_s {
};
static void
-init_entries(ring_t *entries)
-{
+init_entries(ring_t *entries) {
unsigned i;
for (i = 0; i < NENTRIES; i++) {
@@ -24,8 +23,7 @@ init_entries(ring_t *entries)
}
static void
-test_independent_entries(ring_t *entries)
-{
+test_independent_entries(ring_t *entries) {
ring_t *t;
unsigned i, j;
@@ -61,8 +59,7 @@ test_independent_entries(ring_t *entries)
}
}
-TEST_BEGIN(test_qr_one)
-{
+TEST_BEGIN(test_qr_one) {
ring_t entries[NENTRIES];
init_entries(entries);
@@ -71,8 +68,7 @@ TEST_BEGIN(test_qr_one)
TEST_END
static void
-test_entries_ring(ring_t *entries)
-{
+test_entries_ring(ring_t *entries) {
ring_t *t;
unsigned i, j;
@@ -104,27 +100,27 @@ test_entries_ring(ring_t *entries)
}
}
-TEST_BEGIN(test_qr_after_insert)
-{
+TEST_BEGIN(test_qr_after_insert) {
ring_t entries[NENTRIES];
unsigned i;
init_entries(entries);
- for (i = 1; i < NENTRIES; i++)
+ for (i = 1; i < NENTRIES; i++) {
qr_after_insert(&entries[i - 1], &entries[i], link);
+ }
test_entries_ring(entries);
}
TEST_END
-TEST_BEGIN(test_qr_remove)
-{
+TEST_BEGIN(test_qr_remove) {
ring_t entries[NENTRIES];
ring_t *t;
unsigned i, j;
init_entries(entries);
- for (i = 1; i < NENTRIES; i++)
+ for (i = 1; i < NENTRIES; i++) {
qr_after_insert(&entries[i - 1], &entries[i], link);
+ }
for (i = 0; i < NENTRIES; i++) {
j = 0;
@@ -145,15 +141,15 @@ TEST_BEGIN(test_qr_remove)
}
TEST_END
-TEST_BEGIN(test_qr_before_insert)
-{
+TEST_BEGIN(test_qr_before_insert) {
ring_t entries[NENTRIES];
ring_t *t;
unsigned i, j;
init_entries(entries);
- for (i = 1; i < NENTRIES; i++)
+ for (i = 1; i < NENTRIES; i++) {
qr_before_insert(&entries[i - 1], &entries[i], link);
+ }
for (i = 0; i < NENTRIES; i++) {
j = 0;
qr_foreach(t, &entries[i], link) {
@@ -184,8 +180,7 @@ TEST_BEGIN(test_qr_before_insert)
TEST_END
static void
-test_split_entries(ring_t *entries)
-{
+test_split_entries(ring_t *entries) {
ring_t *t;
unsigned i, j;
@@ -206,14 +201,14 @@ test_split_entries(ring_t *entries)
}
}
-TEST_BEGIN(test_qr_meld_split)
-{
+TEST_BEGIN(test_qr_meld_split) {
ring_t entries[NENTRIES];
unsigned i;
init_entries(entries);
- for (i = 1; i < NENTRIES; i++)
+ for (i = 1; i < NENTRIES; i++) {
qr_after_insert(&entries[i - 1], &entries[i], link);
+ }
qr_split(&entries[0], &entries[SPLIT_INDEX], ring_t, link);
test_split_entries(entries);
@@ -236,8 +231,7 @@ TEST_BEGIN(test_qr_meld_split)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_qr_one,
test_qr_after_insert,
diff --git a/test/unit/rb.c b/test/unit/rb.c
index 56e0021..dea86c6 100644
--- a/test/unit/rb.c
+++ b/test/unit/rb.c
@@ -1,14 +1,14 @@
#include "test/jemalloc_test.h"
#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \
- a_type *rbp_bh_t; \
- for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \
- rbp_bh_t != NULL; \
- rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \
- if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \
- (r_height)++; \
+ a_type *rbp_bh_t; \
+ for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; rbp_bh_t != \
+ NULL; rbp_bh_t = rbtn_left_get(a_type, a_field, \
+ rbp_bh_t)) { \
+ if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \
+ (r_height)++; \
+ } \
} \
- } \
} while (0)
typedef struct node_s node_t;
@@ -42,8 +42,7 @@ node_cmp(const node_t *a, const node_t *b) {
typedef rb_tree(node_t) tree_t;
rb_gen(static, tree_, tree_t, node_t, link, node_cmp);
-TEST_BEGIN(test_rb_empty)
-{
+TEST_BEGIN(test_rb_empty) {
tree_t tree;
node_t key;
@@ -68,52 +67,56 @@ TEST_BEGIN(test_rb_empty)
TEST_END
static unsigned
-tree_recurse(node_t *node, unsigned black_height, unsigned black_depth)
-{
+tree_recurse(node_t *node, unsigned black_height, unsigned black_depth) {
unsigned ret = 0;
node_t *left_node;
node_t *right_node;
- if (node == NULL)
+ if (node == NULL) {
return (ret);
+ }
left_node = rbtn_left_get(node_t, link, node);
right_node = rbtn_right_get(node_t, link, node);
- if (!rbtn_red_get(node_t, link, node))
+ if (!rbtn_red_get(node_t, link, node)) {
black_depth++;
+ }
/* Red nodes must be interleaved with black nodes. */
if (rbtn_red_get(node_t, link, node)) {
- if (left_node != NULL)
+ if (left_node != NULL) {
assert_false(rbtn_red_get(node_t, link, left_node),
"Node should be black");
- if (right_node != NULL)
+ }
+ if (right_node != NULL) {
assert_false(rbtn_red_get(node_t, link, right_node),
"Node should be black");
+ }
}
/* Self. */
assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
/* Left subtree. */
- if (left_node != NULL)
+ if (left_node != NULL) {
ret += tree_recurse(left_node, black_height, black_depth);
- else
+ } else {
ret += (black_depth != black_height);
+ }
/* Right subtree. */
- if (right_node != NULL)
+ if (right_node != NULL) {
ret += tree_recurse(right_node, black_height, black_depth);
- else
+ } else {
ret += (black_depth != black_height);
+ }
return (ret);
}
static node_t *
-tree_iterate_cb(tree_t *tree, node_t *node, void *data)
-{
+tree_iterate_cb(tree_t *tree, node_t *node, void *data) {
unsigned *i = (unsigned *)data;
node_t *search_node;
@@ -140,8 +143,7 @@ tree_iterate_cb(tree_t *tree, node_t *node, void *data)
}
static unsigned
-tree_iterate(tree_t *tree)
-{
+tree_iterate(tree_t *tree) {
unsigned i;
i = 0;
@@ -151,8 +153,7 @@ tree_iterate(tree_t *tree)
}
static unsigned
-tree_iterate_reverse(tree_t *tree)
-{
+tree_iterate_reverse(tree_t *tree) {
unsigned i;
i = 0;
@@ -162,8 +163,7 @@ tree_iterate_reverse(tree_t *tree)
}
static void
-node_remove(tree_t *tree, node_t *node, unsigned nnodes)
-{
+node_remove(tree_t *tree, node_t *node, unsigned nnodes) {
node_t *search_node;
unsigned black_height, imbalances;
@@ -195,8 +195,7 @@ node_remove(tree_t *tree, node_t *node, unsigned nnodes)
}
static node_t *
-remove_iterate_cb(tree_t *tree, node_t *node, void *data)
-{
+remove_iterate_cb(tree_t *tree, node_t *node, void *data) {
unsigned *nnodes = (unsigned *)data;
node_t *ret = tree_next(tree, node);
@@ -206,8 +205,7 @@ remove_iterate_cb(tree_t *tree, node_t *node, void *data)
}
static node_t *
-remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data)
-{
+remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data) {
unsigned *nnodes = (unsigned *)data;
node_t *ret = tree_prev(tree, node);
@@ -217,16 +215,14 @@ remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data)
}
static void
-destroy_cb(node_t *node, void *data)
-{
+destroy_cb(node_t *node, void *data) {
unsigned *nnodes = (unsigned *)data;
assert_u_gt(*nnodes, 0, "Destruction removed too many nodes");
(*nnodes)--;
}
-TEST_BEGIN(test_rb_random)
-{
+TEST_BEGIN(test_rb_random) {
#define NNODES 25
#define NBAGS 250
#define SEED 42
@@ -241,17 +237,20 @@ TEST_BEGIN(test_rb_random)
switch (i) {
case 0:
/* Insert in order. */
- for (j = 0; j < NNODES; j++)
+ for (j = 0; j < NNODES; j++) {
bag[j] = j;
+ }
break;
case 1:
/* Insert in reverse order. */
- for (j = 0; j < NNODES; j++)
+ for (j = 0; j < NNODES; j++) {
bag[j] = NNODES - j - 1;
+ }
break;
default:
- for (j = 0; j < NNODES; j++)
+ for (j = 0; j < NNODES; j++) {
bag[j] = gen_rand64_range(sfmt, NNODES);
+ }
}
for (j = 1; j <= NNODES; j++) {
@@ -292,12 +291,14 @@ TEST_BEGIN(test_rb_random)
/* Remove nodes. */
switch (i % 5) {
case 0:
- for (k = 0; k < j; k++)
+ for (k = 0; k < j; k++) {
node_remove(&tree, &nodes[k], j - k);
+ }
break;
case 1:
- for (k = j; k > 0; k--)
+ for (k = j; k > 0; k--) {
node_remove(&tree, &nodes[k-1], k);
+ }
break;
case 2: {
node_t *start;
@@ -345,8 +346,7 @@ TEST_BEGIN(test_rb_random)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_rb_empty,
test_rb_random));
diff --git a/test/unit/rtree.c b/test/unit/rtree.c
index d2f3705..ca99f8a 100644
--- a/test/unit/rtree.c
+++ b/test/unit/rtree.c
@@ -6,12 +6,12 @@ rtree_node_dalloc_t *rtree_node_dalloc_orig;
rtree_t *test_rtree;
static rtree_elm_t *
-rtree_node_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms)
-{
+rtree_node_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
rtree_elm_t *node;
- if (rtree != test_rtree)
+ if (rtree != test_rtree) {
return rtree_node_alloc_orig(tsdn, rtree, nelms);
+ }
malloc_mutex_unlock(tsdn, &rtree->init_lock);
node = (rtree_elm_t *)calloc(nelms, sizeof(rtree_elm_t));
@@ -22,8 +22,7 @@ rtree_node_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms)
}
static void
-rtree_node_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node)
-{
+rtree_node_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node) {
if (rtree != test_rtree) {
rtree_node_dalloc_orig(tsdn, rtree, node);
return;
@@ -32,8 +31,7 @@ rtree_node_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node)
free(node);
}
-TEST_BEGIN(test_rtree_read_empty)
-{
+TEST_BEGIN(test_rtree_read_empty) {
tsdn_t *tsdn;
unsigned i;
@@ -65,8 +63,7 @@ typedef struct {
} thd_start_arg_t;
static void *
-thd_start(void *varg)
-{
+thd_start(void *varg) {
thd_start_arg_t *arg = (thd_start_arg_t *)varg;
rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER;
sfmt_t *sfmt;
@@ -98,8 +95,9 @@ thd_start(void *varg)
"Unexpected rtree_elm_acquire() failure");
rtree_elm_read_acquired(tsdn, &arg->rtree, elm);
rtree_elm_release(tsdn, &arg->rtree, elm);
- } else
+ } else {
rtree_read(tsdn, &arg->rtree, &rtree_ctx, key, false);
+ }
}
free(extent);
@@ -107,8 +105,7 @@ thd_start(void *varg)
return (NULL);
}
-TEST_BEGIN(test_rtree_concurrent)
-{
+TEST_BEGIN(test_rtree_concurrent) {
thd_start_arg_t arg;
thd_t thds[NTHREADS];
sfmt_t *sfmt;
@@ -123,10 +120,12 @@ TEST_BEGIN(test_rtree_concurrent)
assert_false(rtree_new(&arg.rtree, arg.nbits),
"Unexpected rtree_new() failure");
arg.seed = gen_rand32(sfmt);
- for (j = 0; j < NTHREADS; j++)
+ for (j = 0; j < NTHREADS; j++) {
thd_create(&thds[j], thd_start, (void *)&arg);
- for (j = 0; j < NTHREADS; j++)
+ }
+ for (j = 0; j < NTHREADS; j++) {
thd_join(thds[j], NULL);
+ }
rtree_delete(tsdn, &arg.rtree);
test_rtree = NULL;
}
@@ -139,8 +138,7 @@ TEST_END
#undef NITERS
#undef SEED
-TEST_BEGIN(test_rtree_extrema)
-{
+TEST_BEGIN(test_rtree_extrema) {
unsigned i;
extent_t extent_a, extent_b;
tsdn_t *tsdn;
@@ -173,8 +171,7 @@ TEST_BEGIN(test_rtree_extrema)
}
TEST_END
-TEST_BEGIN(test_rtree_bits)
-{
+TEST_BEGIN(test_rtree_bits) {
tsdn_t *tsdn;
unsigned i, j, k;
@@ -217,8 +214,7 @@ TEST_BEGIN(test_rtree_bits)
}
TEST_END
-TEST_BEGIN(test_rtree_random)
-{
+TEST_BEGIN(test_rtree_random) {
unsigned i;
sfmt_t *sfmt;
tsdn_t *tsdn;
@@ -280,8 +276,7 @@ TEST_BEGIN(test_rtree_random)
TEST_END
int
-main(void)
-{
+main(void) {
rtree_node_alloc_orig = rtree_node_alloc;
rtree_node_alloc = rtree_node_alloc_intercept;
rtree_node_dalloc_orig = rtree_node_dalloc;
diff --git a/test/unit/size_classes.c b/test/unit/size_classes.c
index f7c14bc..38ea9be 100644
--- a/test/unit/size_classes.c
+++ b/test/unit/size_classes.c
@@ -1,8 +1,7 @@
#include "test/jemalloc_test.h"
static size_t
-get_max_size_class(void)
-{
+get_max_size_class(void) {
unsigned nlextents;
size_t mib[4];
size_t sz, miblen, max_size_class;
@@ -23,8 +22,7 @@ get_max_size_class(void)
return (max_size_class);
}
-TEST_BEGIN(test_size_classes)
-{
+TEST_BEGIN(test_size_classes) {
size_t size_class, max_size_class;
szind_t index, max_index;
@@ -80,8 +78,7 @@ TEST_BEGIN(test_size_classes)
}
TEST_END
-TEST_BEGIN(test_psize_classes)
-{
+TEST_BEGIN(test_psize_classes) {
size_t size_class, max_psz;
pszind_t pind, max_pind;
@@ -136,8 +133,7 @@ TEST_BEGIN(test_psize_classes)
}
TEST_END
-TEST_BEGIN(test_overflow)
-{
+TEST_BEGIN(test_overflow) {
size_t max_size_class, max_psz;
max_size_class = get_max_size_class();
@@ -176,8 +172,7 @@ TEST_BEGIN(test_overflow)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_size_classes,
test_psize_classes,
diff --git a/test/unit/slab.c b/test/unit/slab.c
index 7e6a62f..a5036f5 100644
--- a/test/unit/slab.c
+++ b/test/unit/slab.c
@@ -1,7 +1,6 @@
#include "test/jemalloc_test.h"
-TEST_BEGIN(test_arena_slab_regind)
-{
+TEST_BEGIN(test_arena_slab_regind) {
szind_t binind;
for (binind = 0; binind < NBINS; binind++) {
@@ -27,8 +26,7 @@ TEST_BEGIN(test_arena_slab_regind)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_arena_slab_regind));
}
diff --git a/test/unit/smoothstep.c b/test/unit/smoothstep.c
index 071aede..ac27915 100644
--- a/test/unit/smoothstep.c
+++ b/test/unit/smoothstep.c
@@ -7,8 +7,7 @@ static const uint64_t smoothstep_tab[] = {
#undef STEP
};
-TEST_BEGIN(test_smoothstep_integral)
-{
+TEST_BEGIN(test_smoothstep_integral) {
uint64_t sum, min, max;
unsigned i;
@@ -20,8 +19,9 @@ TEST_BEGIN(test_smoothstep_integral)
* integral may be off by as much as SMOOTHSTEP_NSTEPS ulps.
*/
sum = 0;
- for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
+ for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
sum += smoothstep_tab[i];
+ }
max = (KQU(1) << (SMOOTHSTEP_BFP-1)) * (SMOOTHSTEP_NSTEPS+1);
min = max - SMOOTHSTEP_NSTEPS;
@@ -36,8 +36,7 @@ TEST_BEGIN(test_smoothstep_integral)
}
TEST_END
-TEST_BEGIN(test_smoothstep_monotonic)
-{
+TEST_BEGIN(test_smoothstep_monotonic) {
uint64_t prev_h;
unsigned i;
@@ -58,8 +57,7 @@ TEST_BEGIN(test_smoothstep_monotonic)
}
TEST_END
-TEST_BEGIN(test_smoothstep_slope)
-{
+TEST_BEGIN(test_smoothstep_slope) {
uint64_t prev_h, prev_delta;
unsigned i;
@@ -96,8 +94,7 @@ TEST_BEGIN(test_smoothstep_slope)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_smoothstep_integral,
test_smoothstep_monotonic,
diff --git a/test/unit/stats.c b/test/unit/stats.c
index 18856f1..98673a8 100644
--- a/test/unit/stats.c
+++ b/test/unit/stats.c
@@ -1,7 +1,6 @@
#include "test/jemalloc_test.h"
-TEST_BEGIN(test_stats_summary)
-{
+TEST_BEGIN(test_stats_summary) {
size_t sz, allocated, active, resident, mapped;
int expected = config_stats ? 0 : ENOENT;
@@ -26,8 +25,7 @@ TEST_BEGIN(test_stats_summary)
}
TEST_END
-TEST_BEGIN(test_stats_large)
-{
+TEST_BEGIN(test_stats_large) {
void *p;
uint64_t epoch;
size_t allocated;
@@ -67,8 +65,7 @@ TEST_BEGIN(test_stats_large)
}
TEST_END
-TEST_BEGIN(test_stats_arenas_summary)
-{
+TEST_BEGIN(test_stats_arenas_summary) {
unsigned arena;
void *little, *large;
uint64_t epoch;
@@ -118,22 +115,19 @@ TEST_BEGIN(test_stats_arenas_summary)
TEST_END
void *
-thd_start(void *arg)
-{
+thd_start(void *arg) {
return (NULL);
}
static void
-no_lazy_lock(void)
-{
+no_lazy_lock(void) {
thd_t thd;
thd_create(&thd, thd_start, NULL);
thd_join(thd, NULL);
}
-TEST_BEGIN(test_stats_arenas_small)
-{
+TEST_BEGIN(test_stats_arenas_small) {
unsigned arena;
void *p;
size_t sz, allocated;
@@ -183,8 +177,7 @@ TEST_BEGIN(test_stats_arenas_small)
}
TEST_END
-TEST_BEGIN(test_stats_arenas_large)
-{
+TEST_BEGIN(test_stats_arenas_large) {
unsigned arena;
void *p;
size_t sz, allocated;
@@ -224,8 +217,7 @@ TEST_BEGIN(test_stats_arenas_large)
}
TEST_END
-TEST_BEGIN(test_stats_arenas_bins)
-{
+TEST_BEGIN(test_stats_arenas_bins) {
unsigned arena;
void *p;
size_t sz, curslabs, curregs;
@@ -299,8 +291,7 @@ TEST_BEGIN(test_stats_arenas_bins)
}
TEST_END
-TEST_BEGIN(test_stats_arenas_lextents)
-{
+TEST_BEGIN(test_stats_arenas_lextents) {
unsigned arena;
void *p;
uint64_t epoch, nmalloc, ndalloc;
@@ -347,8 +338,7 @@ TEST_BEGIN(test_stats_arenas_lextents)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_stats_summary,
test_stats_large,
diff --git a/test/unit/stats_print.c b/test/unit/stats_print.c
index 5accd8e..1fb8fe6 100644
--- a/test/unit/stats_print.c
+++ b/test/unit/stats_print.c
@@ -39,8 +39,7 @@ struct parser_s {
static void
token_init(token_t *token, parser_t *parser, token_type_t token_type,
- size_t pos, size_t len, size_t line, size_t col)
-{
+ size_t pos, size_t len, size_t line, size_t col) {
token->parser = parser;
token->token_type = token_type;
token->pos = pos;
@@ -50,8 +49,7 @@ token_init(token_t *token, parser_t *parser, token_type_t token_type,
}
static void
-token_error(token_t *token)
-{
+token_error(token_t *token) {
if (!token->parser->verbose) {
return;
}
@@ -72,8 +70,7 @@ token_error(token_t *token)
}
static void
-parser_init(parser_t *parser, bool verbose)
-{
+parser_init(parser_t *parser, bool verbose) {
parser->verbose = verbose;
parser->buf = NULL;
parser->len = 0;
@@ -83,16 +80,14 @@ parser_init(parser_t *parser, bool verbose)
}
static void
-parser_fini(parser_t *parser)
-{
+parser_fini(parser_t *parser) {
if (parser->buf != NULL) {
dallocx(parser->buf, MALLOCX_TCACHE_NONE);
}
}
static bool
-parser_append(parser_t *parser, const char *str)
-{
+parser_append(parser_t *parser, const char *str) {
size_t len = strlen(str);
char *buf = (parser->buf == NULL) ? mallocx(len + 1,
MALLOCX_TCACHE_NONE) : rallocx(parser->buf, parser->len + len + 1,
@@ -107,8 +102,7 @@ parser_append(parser_t *parser, const char *str)
}
static bool
-parser_tokenize(parser_t *parser)
-{
+parser_tokenize(parser_t *parser) {
enum {
STATE_START,
STATE_EOI,
@@ -667,8 +661,7 @@ static bool parser_parse_array(parser_t *parser);
static bool parser_parse_object(parser_t *parser);
static bool
-parser_parse_value(parser_t *parser)
-{
+parser_parse_value(parser_t *parser) {
switch (parser->token.token_type) {
case TOKEN_TYPE_NULL:
case TOKEN_TYPE_FALSE:
@@ -687,8 +680,7 @@ parser_parse_value(parser_t *parser)
}
static bool
-parser_parse_pair(parser_t *parser)
-{
+parser_parse_pair(parser_t *parser) {
assert_d_eq(parser->token.token_type, TOKEN_TYPE_STRING,
"Pair should start with string");
if (parser_tokenize(parser)) {
@@ -706,8 +698,7 @@ parser_parse_pair(parser_t *parser)
}
static bool
-parser_parse_values(parser_t *parser)
-{
+parser_parse_values(parser_t *parser) {
if (parser_parse_value(parser)) {
return true;
}
@@ -734,8 +725,7 @@ parser_parse_values(parser_t *parser)
}
static bool
-parser_parse_array(parser_t *parser)
-{
+parser_parse_array(parser_t *parser) {
assert_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACKET,
"Array should start with [");
if (parser_tokenize(parser)) {
@@ -751,8 +741,7 @@ parser_parse_array(parser_t *parser)
}
static bool
-parser_parse_pairs(parser_t *parser)
-{
+parser_parse_pairs(parser_t *parser) {
assert_d_eq(parser->token.token_type, TOKEN_TYPE_STRING,
"Object should start with string");
if (parser_parse_pair(parser)) {
@@ -787,8 +776,7 @@ parser_parse_pairs(parser_t *parser)
}
static bool
-parser_parse_object(parser_t *parser)
-{
+parser_parse_object(parser_t *parser) {
assert_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACE,
"Object should start with {");
if (parser_tokenize(parser)) {
@@ -806,8 +794,7 @@ parser_parse_object(parser_t *parser)
}
static bool
-parser_parse(parser_t *parser)
-{
+parser_parse(parser_t *parser) {
if (parser_tokenize(parser)) {
goto label_error;
}
@@ -831,8 +818,7 @@ label_error:
return true;
}
-TEST_BEGIN(test_json_parser)
-{
+TEST_BEGIN(test_json_parser) {
size_t i;
const char *invalid_inputs[] = {
/* Tokenizer error case tests. */
@@ -929,16 +915,14 @@ TEST_BEGIN(test_json_parser)
TEST_END
void
-write_cb(void *opaque, const char *str)
-{
+write_cb(void *opaque, const char *str) {
parser_t *parser = (parser_t *)opaque;
if (parser_append(parser, str)) {
test_fail("Unexpected input appending failure");
}
}
-TEST_BEGIN(test_stats_print_json)
-{
+TEST_BEGIN(test_stats_print_json) {
const char *opts[] = {
"J",
"Jg",
@@ -998,8 +982,7 @@ TEST_BEGIN(test_stats_print_json)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_json_parser,
test_stats_print_json));
diff --git a/test/unit/ticker.c b/test/unit/ticker.c
index b8af46c..be54356 100644
--- a/test/unit/ticker.c
+++ b/test/unit/ticker.c
@@ -1,7 +1,6 @@
#include "test/jemalloc_test.h"
-TEST_BEGIN(test_ticker_tick)
-{
+TEST_BEGIN(test_ticker_tick) {
#define NREPS 2
#define NTICKS 3
ticker_t ticker;
@@ -26,8 +25,7 @@ TEST_BEGIN(test_ticker_tick)
}
TEST_END
-TEST_BEGIN(test_ticker_ticks)
-{
+TEST_BEGIN(test_ticker_ticks) {
#define NTICKS 3
ticker_t ticker;
@@ -45,8 +43,7 @@ TEST_BEGIN(test_ticker_ticks)
}
TEST_END
-TEST_BEGIN(test_ticker_copy)
-{
+TEST_BEGIN(test_ticker_copy) {
#define NTICKS 3
ticker_t ta, tb;
@@ -66,8 +63,7 @@ TEST_BEGIN(test_ticker_copy)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_ticker_tick,
test_ticker_ticks,
diff --git a/test/unit/tsd.c b/test/unit/tsd.c
index 5313ef8..484dc30 100644
--- a/test/unit/tsd.c
+++ b/test/unit/tsd.c
@@ -10,8 +10,7 @@ malloc_tsd_types(data_, data_t)
malloc_tsd_protos(, data_, data_t)
void
-data_cleanup(void *arg)
-{
+data_cleanup(void *arg) {
data_t *data = (data_t *)arg;
if (!data_cleanup_executed) {
@@ -53,8 +52,7 @@ malloc_tsd_data(, data_, data_t, DATA_INIT)
malloc_tsd_funcs(, data_, data_t, DATA_INIT, data_cleanup)
static void *
-thd_start(void *arg)
-{
+thd_start(void *arg) {
data_t d = (data_t)(uintptr_t)arg;
void *p;
@@ -76,14 +74,12 @@ thd_start(void *arg)
return (NULL);
}
-TEST_BEGIN(test_tsd_main_thread)
-{
+TEST_BEGIN(test_tsd_main_thread) {
thd_start((void *)(uintptr_t)0xa5f3e329);
}
TEST_END
-TEST_BEGIN(test_tsd_sub_thread)
-{
+TEST_BEGIN(test_tsd_sub_thread) {
thd_t thd;
data_cleanup_executed = false;
@@ -95,8 +91,7 @@ TEST_BEGIN(test_tsd_sub_thread)
TEST_END
int
-main(void)
-{
+main(void) {
/* Core tsd bootstrapping must happen prior to data_tsd_boot(). */
if (nallocx(1, 0) == 0) {
malloc_printf("Initialization error");
diff --git a/test/unit/util.c b/test/unit/util.c
index b891a19..3d1ecf4 100644
--- a/test/unit/util.c
+++ b/test/unit/util.c
@@ -31,26 +31,22 @@
} \
} while (0)
-TEST_BEGIN(test_pow2_ceil_u64)
-{
+TEST_BEGIN(test_pow2_ceil_u64) {
TEST_POW2_CEIL(uint64_t, u64, FMTu64);
}
TEST_END
-TEST_BEGIN(test_pow2_ceil_u32)
-{
+TEST_BEGIN(test_pow2_ceil_u32) {
TEST_POW2_CEIL(uint32_t, u32, FMTu32);
}
TEST_END
-TEST_BEGIN(test_pow2_ceil_zu)
-{
+TEST_BEGIN(test_pow2_ceil_zu) {
TEST_POW2_CEIL(size_t, zu, "zu");
}
TEST_END
-TEST_BEGIN(test_malloc_strtoumax_no_endptr)
-{
+TEST_BEGIN(test_malloc_strtoumax_no_endptr) {
int err;
set_errno(0);
@@ -60,8 +56,7 @@ TEST_BEGIN(test_malloc_strtoumax_no_endptr)
}
TEST_END
-TEST_BEGIN(test_malloc_strtoumax)
-{
+TEST_BEGIN(test_malloc_strtoumax) {
struct test_s {
const char *input;
const char *expected_remainder;
@@ -155,8 +150,7 @@ TEST_BEGIN(test_malloc_strtoumax)
}
TEST_END
-TEST_BEGIN(test_malloc_snprintf_truncated)
-{
+TEST_BEGIN(test_malloc_snprintf_truncated) {
#define BUFLEN 15
char buf[BUFLEN];
size_t result;
@@ -188,8 +182,7 @@ TEST_BEGIN(test_malloc_snprintf_truncated)
}
TEST_END
-TEST_BEGIN(test_malloc_snprintf)
-{
+TEST_BEGIN(test_malloc_snprintf) {
#define BUFLEN 128
char buf[BUFLEN];
size_t result;
@@ -302,8 +295,7 @@ TEST_BEGIN(test_malloc_snprintf)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_pow2_ceil_u64,
test_pow2_ceil_u32,
diff --git a/test/unit/witness.c b/test/unit/witness.c
index 1359398..d75ca48 100644
--- a/test/unit/witness.c
+++ b/test/unit/witness.c
@@ -12,32 +12,27 @@ static bool saw_lockless_error;
static void
witness_lock_error_intercept(const witness_list_t *witnesses,
- const witness_t *witness)
-{
+ const witness_t *witness) {
saw_lock_error = true;
}
static void
-witness_owner_error_intercept(const witness_t *witness)
-{
+witness_owner_error_intercept(const witness_t *witness) {
saw_owner_error = true;
}
static void
-witness_not_owner_error_intercept(const witness_t *witness)
-{
+witness_not_owner_error_intercept(const witness_t *witness) {
saw_not_owner_error = true;
}
static void
-witness_lockless_error_intercept(const witness_list_t *witnesses)
-{
+witness_lockless_error_intercept(const witness_list_t *witnesses) {
saw_lockless_error = true;
}
static int
-witness_comp(const witness_t *a, void *oa, const witness_t *b, void *ob)
-{
+witness_comp(const witness_t *a, void *oa, const witness_t *b, void *ob) {
assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
assert(oa == (void *)a);
@@ -47,8 +42,8 @@ witness_comp(const witness_t *a, void *oa, const witness_t *b, void *ob)
}
static int
-witness_comp_reverse(const witness_t *a, void *oa, const witness_t *b, void *ob)
-{
+witness_comp_reverse(const witness_t *a, void *oa, const witness_t *b,
+ void *ob) {
assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
assert(oa == (void *)a);
@@ -57,8 +52,7 @@ witness_comp_reverse(const witness_t *a, void *oa, const witness_t *b, void *ob)
return (-strcmp(a->name, b->name));
}
-TEST_BEGIN(test_witness)
-{
+TEST_BEGIN(test_witness) {
witness_t a, b;
tsdn_t *tsdn;
@@ -85,8 +79,7 @@ TEST_BEGIN(test_witness)
}
TEST_END
-TEST_BEGIN(test_witness_comp)
-{
+TEST_BEGIN(test_witness_comp) {
witness_t a, b, c, d;
tsdn_t *tsdn;
@@ -135,8 +128,7 @@ TEST_BEGIN(test_witness_comp)
}
TEST_END
-TEST_BEGIN(test_witness_reversal)
-{
+TEST_BEGIN(test_witness_reversal) {
witness_t a, b;
tsdn_t *tsdn;
@@ -167,8 +159,7 @@ TEST_BEGIN(test_witness_reversal)
}
TEST_END
-TEST_BEGIN(test_witness_recursive)
-{
+TEST_BEGIN(test_witness_recursive) {
witness_t a;
tsdn_t *tsdn;
@@ -205,8 +196,7 @@ TEST_BEGIN(test_witness_recursive)
}
TEST_END
-TEST_BEGIN(test_witness_unlock_not_owned)
-{
+TEST_BEGIN(test_witness_unlock_not_owned) {
witness_t a;
tsdn_t *tsdn;
@@ -232,8 +222,7 @@ TEST_BEGIN(test_witness_unlock_not_owned)
}
TEST_END
-TEST_BEGIN(test_witness_lockful)
-{
+TEST_BEGIN(test_witness_lockful) {
witness_t a;
tsdn_t *tsdn;
@@ -265,8 +254,7 @@ TEST_BEGIN(test_witness_lockful)
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_witness,
test_witness_comp,
diff --git a/test/unit/zero.c b/test/unit/zero.c
index c752954..a802f05 100644
--- a/test/unit/zero.c
+++ b/test/unit/zero.c
@@ -6,8 +6,7 @@ const char *malloc_conf =
#endif
static void
-test_zero(size_t sz_min, size_t sz_max)
-{
+test_zero(size_t sz_min, size_t sz_max) {
uint8_t *s;
size_t sz_prev, sz, i;
#define MAGIC ((uint8_t)0x61)
@@ -45,23 +44,20 @@ test_zero(size_t sz_min, size_t sz_max)
#undef MAGIC
}
-TEST_BEGIN(test_zero_small)
-{
+TEST_BEGIN(test_zero_small) {
test_skip_if(!config_fill);
test_zero(1, SMALL_MAXCLASS-1);
}
TEST_END
-TEST_BEGIN(test_zero_large)
-{
+TEST_BEGIN(test_zero_large) {
test_skip_if(!config_fill);
test_zero(SMALL_MAXCLASS+1, (1U << (LG_LARGE_MINCLASS+1)));
}
TEST_END
int
-main(void)
-{
+main(void) {
return (test(
test_zero_small,
test_zero_large));