diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/arena.c | 124 | ||||
-rw-r--r-- | src/base.c | 28 | ||||
-rw-r--r-- | src/bitmap.c | 4 | ||||
-rw-r--r-- | src/ckh.c | 50 | ||||
-rw-r--r-- | src/ctl.c | 144 | ||||
-rw-r--r-- | src/extent.c | 161 | ||||
-rw-r--r-- | src/extent_dss.c | 26 | ||||
-rw-r--r-- | src/extent_mmap.c | 14 | ||||
-rw-r--r-- | src/jemalloc.c | 164 | ||||
-rw-r--r-- | src/jemalloc_cpp.cpp | 12 | ||||
-rw-r--r-- | src/large.c | 46 | ||||
-rw-r--r-- | src/mutex.c | 16 | ||||
-rw-r--r-- | src/nstime.c | 16 | ||||
-rw-r--r-- | src/pages.c | 36 | ||||
-rw-r--r-- | src/prof.c | 210 | ||||
-rw-r--r-- | src/rtree.c | 22 | ||||
-rw-r--r-- | src/tcache.c | 28 | ||||
-rw-r--r-- | src/tsd.c | 12 | ||||
-rw-r--r-- | src/util.c | 20 | ||||
-rw-r--r-- | src/zone.c | 20 |
20 files changed, 574 insertions, 579 deletions
diff --git a/src/arena.c b/src/arena.c index 5cf9bd0..fe4b5de 100644 --- a/src/arena.c +++ b/src/arena.c @@ -50,8 +50,8 @@ arena_extent_cache_alloc_locked(tsdn_t *tsdn, arena_t *arena, malloc_mutex_assert_owner(tsdn, &arena->lock); - return (extent_alloc_cache(tsdn, arena, r_extent_hooks, new_addr, usize, - pad, alignment, zero, &commit, slab)); + return extent_alloc_cache(tsdn, arena, r_extent_hooks, new_addr, usize, + pad, alignment, zero, &commit, slab); } extent_t * @@ -65,7 +65,7 @@ arena_extent_cache_alloc(tsdn_t *tsdn, arena_t *arena, new_addr, size, 0, alignment, zero, false); malloc_mutex_unlock(tsdn, &arena->lock); - return (extent); + return extent; } static void @@ -122,7 +122,7 @@ arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab, ret = (void *)((uintptr_t)extent_addr_get(slab) + (uintptr_t)(bin_info->reg_size * regind)); slab_data->nfree--; - return (ret); + return ret; } #ifndef JEMALLOC_JET @@ -160,7 +160,7 @@ arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) { assert(regind < arena_bin_info[binind].nregs); - return (regind); + return regind; } JEMALLOC_INLINE_C void @@ -282,7 +282,7 @@ arena_extent_alloc_large_hard(tsdn_t *tsdn, arena_t *arena, malloc_mutex_unlock(tsdn, &arena->lock); } - return (extent); + return extent; } extent_t * @@ -308,7 +308,7 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, &extent_hooks, usize, alignment, zero); } - return (extent); + return extent; } void @@ -409,7 +409,7 @@ arena_decay_backlog_npages_limit(const arena_t *arena) { } npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); - return (npages_limit_backlog); + return npages_limit_backlog; } static void @@ -499,12 +499,12 @@ arena_decay_init(arena_t *arena, ssize_t decay_time) { static bool arena_decay_time_valid(ssize_t decay_time) { if (decay_time < -1) { - return (false); + return false; } if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX) { - return (true); + return true; } - return (false); + return false; } ssize_t @@ -515,13 +515,13 @@ arena_decay_time_get(tsdn_t *tsdn, arena_t *arena) { decay_time = arena->decay.time; malloc_mutex_unlock(tsdn, &arena->lock); - return (decay_time); + return decay_time; } bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) { if (!arena_decay_time_valid(decay_time)) { - return (true); + return true; } malloc_mutex_lock(tsdn, &arena->lock); @@ -537,7 +537,7 @@ arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) { arena_maybe_purge(tsdn, arena); malloc_mutex_unlock(tsdn, &arena->lock); - return (false); + return false; } static void @@ -609,7 +609,7 @@ arena_dirty_count(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_unlock(tsdn, &arena->extents_mtx); - return (ndirty); + return ndirty; } static size_t @@ -648,7 +648,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, } malloc_mutex_unlock(tsdn, &arena->extents_mtx); - return (nstashed); + return nstashed; } static size_t @@ -680,7 +680,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, arena->stats.purged += npurged; } - return (npurged); + return npurged; } /* @@ -757,12 +757,12 @@ static extent_t * arena_bin_slabs_nonfull_tryget(arena_bin_t *bin) { extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull); if (slab == NULL) { - return (NULL); + return NULL; } if (config_stats) { bin->stats.reslabs++; } - return (slab); + return slab; } static void @@ -936,7 +936,7 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, bin_info->slab_size, 0, PAGE, &zero, &commit, true); malloc_mutex_lock(tsdn, &arena->lock); - return (slab); + return slab; } static extent_t * @@ -953,7 +953,7 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks, bin_info); if (slab == NULL) { - return (NULL); + return NULL; } } assert(extent_slab_get(slab)); @@ -970,7 +970,7 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, arena->stats.mapped += extent_size_get(slab); } - return (slab); + return slab; } static extent_t * @@ -982,7 +982,7 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, /* Look for a usable slab. */ slab = arena_bin_slabs_nonfull_tryget(bin); if (slab != NULL) { - return (slab); + return slab; } /* No existing slabs have any space available. */ @@ -1001,7 +1001,7 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, bin->stats.nslabs++; bin->stats.curslabs++; } - return (slab); + return slab; } /* @@ -1011,10 +1011,10 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, */ slab = arena_bin_slabs_nonfull_tryget(bin); if (slab != NULL) { - return (slab); + return slab; } - return (NULL); + return NULL; } /* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */ @@ -1057,7 +1057,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, bin); } } - return (ret); + return ret; } arena_bin_slabs_full_insert(bin, bin->slabcur); @@ -1065,13 +1065,13 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, } if (slab == NULL) { - return (NULL); + return NULL; } bin->slabcur = slab; assert(extent_slab_data_get(bin->slabcur)->nfree > 0); - return (arena_slab_reg_alloc(tsdn, slab, bin_info)); + return arena_slab_reg_alloc(tsdn, slab, bin_info); } void @@ -1172,7 +1172,7 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { if (ret == NULL) { malloc_mutex_unlock(tsdn, &bin->lock); - return (NULL); + return NULL; } if (config_stats) { @@ -1203,7 +1203,7 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { } arena_decay_tick(tsdn, arena); - return (ret); + return ret; } void * @@ -1215,13 +1215,13 @@ arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, arena = arena_choose(tsdn_tsd(tsdn), arena); } if (unlikely(arena == NULL)) { - return (NULL); + return NULL; } if (likely(size <= SMALL_MAXCLASS)) { - return (arena_malloc_small(tsdn, arena, ind, zero)); + return arena_malloc_small(tsdn, arena, ind, zero); } - return (large_malloc(tsdn, arena, index2size(ind), zero)); + return large_malloc(tsdn, arena, index2size(ind), zero); } void * @@ -1241,7 +1241,7 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, ret = large_palloc(tsdn, arena, usize, alignment, zero); } } - return (ret); + return ret; } void @@ -1282,7 +1282,7 @@ arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) { assert(isalloc(tsdn, extent, ptr) == LARGE_MINCLASS); - return (LARGE_MINCLASS); + return LARGE_MINCLASS; } void @@ -1425,7 +1425,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, assert(extra == 0 || size + extra <= LARGE_MAXCLASS); if (unlikely(size > LARGE_MAXCLASS)) { - return (true); + return true; } usize_min = s2u(size); @@ -1440,31 +1440,31 @@ arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, if ((usize_max > SMALL_MAXCLASS || size2index(usize_max) != size2index(oldsize)) && (size > oldsize || usize_max < oldsize)) { - return (true); + return true; } arena_decay_tick(tsdn, extent_arena_get(extent)); - return (false); + return false; } else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) { - return (large_ralloc_no_move(tsdn, extent, usize_min, usize_max, - zero)); + return large_ralloc_no_move(tsdn, extent, usize_min, usize_max, + zero); } - return (true); + return true; } static void * arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache) { if (alignment == 0) { - return (arena_malloc(tsdn, arena, usize, size2index(usize), - zero, tcache, true)); + return arena_malloc(tsdn, arena, usize, size2index(usize), + zero, tcache, true); } usize = sa2u(usize, alignment); if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { - return (NULL); + return NULL; } - return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); + return ipalloct(tsdn, usize, alignment, zero, tcache, arena); } void * @@ -1476,20 +1476,20 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr, usize = s2u(size); if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) { - return (NULL); + return NULL; } if (likely(usize <= SMALL_MAXCLASS)) { /* Try to avoid moving the allocation. */ if (!arena_ralloc_no_move(tsdn, extent, ptr, oldsize, usize, 0, zero)) { - return (ptr); + return ptr; } } if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) { - return (large_ralloc(tsdn, arena, extent, usize, alignment, - zero, tcache)); + return large_ralloc(tsdn, arena, extent, usize, alignment, + zero, tcache); } /* @@ -1499,7 +1499,7 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr, ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, zero, tcache); if (ret == NULL) { - return (NULL); + return NULL; } /* @@ -1510,7 +1510,7 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr, copysize = (usize < oldsize) ? usize : oldsize; memcpy(ret, ptr, copysize); isdalloct(tsdn, extent, ptr, oldsize, tcache, true); - return (ret); + return ret; } dss_prec_t @@ -1520,7 +1520,7 @@ arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_lock(tsdn, &arena->lock); ret = arena->dss_prec; malloc_mutex_unlock(tsdn, &arena->lock); - return (ret); + return ret; } bool @@ -1531,21 +1531,21 @@ arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec) { malloc_mutex_lock(tsdn, &arena->lock); arena->dss_prec = dss_prec; malloc_mutex_unlock(tsdn, &arena->lock); - return (false); + return false; } ssize_t arena_decay_time_default_get(void) { - return ((ssize_t)atomic_read_zu((size_t *)&decay_time_default)); + return (ssize_t)atomic_read_zu((size_t *)&decay_time_default); } bool arena_decay_time_default_set(ssize_t decay_time) { if (!arena_decay_time_valid(decay_time)) { - return (true); + return true; } atomic_write_zu((size_t *)&decay_time_default, (size_t)decay_time); - return (false); + return false; } static void @@ -1642,7 +1642,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, unsigned arena_nthreads_get(arena_t *arena, bool internal) { - return (atomic_read_u(&arena->nthreads[internal])); + return atomic_read_u(&arena->nthreads[internal]); } void @@ -1657,7 +1657,7 @@ arena_nthreads_dec(arena_t *arena, bool internal) { size_t arena_extent_sn_next(arena_t *arena) { - return (atomic_add_zu(&arena->extent_sn_next, 1) - 1); + return atomic_add_zu(&arena->extent_sn_next, 1) - 1; } arena_t * @@ -1671,7 +1671,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { } else { base = base_new(tsdn, ind, extent_hooks); if (base == NULL) { - return (NULL); + return NULL; } } @@ -1762,12 +1762,12 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { arena->base = base; - return (arena); + return arena; label_error: if (ind != 0) { base_delete(base); } - return (NULL); + return NULL; } void @@ -23,7 +23,7 @@ base_map(extent_hooks_t *extent_hooks, unsigned ind, size_t size) { &zero, &commit, ind); } - return (addr); + return addr; } static void @@ -105,7 +105,7 @@ base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size, extent_init(extent, NULL, (void *)((uintptr_t)extent_addr_get(extent) + *gap_size + size), extent_size_get(extent) - *gap_size - size, 0, extent_sn_get(extent), true, true, true, false); - return (ret); + return ret; } static void @@ -142,7 +142,7 @@ base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, extent_t *extent, ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment); base_extent_bump_alloc_post(tsdn, base, extent, gap_size, ret, size); - return (ret); + return ret; } /* @@ -163,14 +163,14 @@ base_block_alloc(extent_hooks_t *extent_hooks, unsigned ind, block_size = HUGEPAGE_CEILING(header_size + gap_size + usize); block = (base_block_t *)base_map(extent_hooks, ind, block_size); if (block == NULL) { - return (NULL); + return NULL; } block->size = block_size; block->next = NULL; assert(block_size >= header_size); base_extent_init(extent_sn_next, &block->extent, (void *)((uintptr_t)block + header_size), block_size - header_size); - return (block); + return block; } /* @@ -187,7 +187,7 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { block = base_block_alloc(extent_hooks, base_ind_get(base), &base->extent_sn_next, size, alignment); if (block == NULL) { - return (NULL); + return NULL; } block->next = base->blocks; base->blocks = block; @@ -198,12 +198,12 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { assert(base->allocated <= base->resident); assert(base->resident <= base->mapped); } - return (&block->extent); + return &block->extent; } base_t * b0get(void) { - return (b0); + return b0; } base_t * @@ -217,7 +217,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { block = base_block_alloc(extent_hooks, ind, &extent_sn_next, sizeof(base_t), QUANTUM); if (block == NULL) { - return (NULL); + return NULL; } base_alignment = CACHELINE; @@ -228,7 +228,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { base->extent_hooks = extent_hooks; if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE)) { base_unmap(extent_hooks, ind, block, block->size); - return (NULL); + return NULL; } base->extent_sn_next = extent_sn_next; base->blocks = block; @@ -245,7 +245,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { base_extent_bump_alloc_post(tsdn, base, &block->extent, gap_size, base, base_size); - return (base); + return base; } void @@ -262,7 +262,7 @@ base_delete(base_t *base) { extent_hooks_t * base_extent_hooks_get(base_t *base) { - return ((extent_hooks_t *)atomic_read_p(&base->extent_hooks_pun)); + return (extent_hooks_t *)atomic_read_p(&base->extent_hooks_pun); } extent_hooks_t * @@ -276,7 +276,7 @@ base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) { u.h = &base->extent_hooks; atomic_write_p(u.v, extent_hooks); - return (old_extent_hooks); + return old_extent_hooks; } /* @@ -319,7 +319,7 @@ base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { ret = base_extent_bump_alloc(tsdn, base, extent, usize, alignment); label_return: malloc_mutex_unlock(tsdn, &base->mtx); - return (ret); + return ret; } void diff --git a/src/bitmap.c b/src/bitmap.c index 7cbc7d4..a9d4868 100644 --- a/src/bitmap.c +++ b/src/bitmap.c @@ -35,7 +35,7 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { static size_t bitmap_info_ngroups(const bitmap_info_t *binfo) { - return (binfo->levels[binfo->nlevels].group_offset); + return binfo->levels[binfo->nlevels].group_offset; } void @@ -80,7 +80,7 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { static size_t bitmap_info_ngroups(const bitmap_info_t *binfo) { - return (binfo->ngroups); + return binfo->ngroups; } void @@ -57,11 +57,11 @@ ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) { for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; if (cell->key != NULL && ckh->keycomp(key, cell->key)) { - return ((bucket << LG_CKH_BUCKET_CELLS) + i); + return (bucket << LG_CKH_BUCKET_CELLS) + i; } } - return (SIZE_T_MAX); + return SIZE_T_MAX; } /* @@ -79,13 +79,13 @@ ckh_isearch(ckh_t *ckh, const void *key) { bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); cell = ckh_bucket_search(ckh, bucket, key); if (cell != SIZE_T_MAX) { - return (cell); + return cell; } /* Search secondary bucket. */ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); cell = ckh_bucket_search(ckh, bucket, key); - return (cell); + return cell; } JEMALLOC_INLINE_C bool @@ -107,11 +107,11 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, cell->key = key; cell->data = data; ckh->count++; - return (false); + return false; } } - return (true); + return true; } /* @@ -181,12 +181,12 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, if (tbucket == argbucket) { *argkey = key; *argdata = data; - return (true); + return true; } bucket = tbucket; if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { - return (false); + return false; } } } @@ -202,19 +202,19 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) { /* Try to insert in primary bucket. */ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { - return (false); + return false; } /* Try to insert in secondary bucket. */ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { - return (false); + return false; } /* * Try to find a place for this item via iterative eviction/relocation. */ - return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata)); + return ckh_evict_reloc_insert(ckh, bucket, argkey, argdata); } /* @@ -234,13 +234,13 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) { data = aTab[i].data; if (ckh_try_insert(ckh, &key, &data)) { ckh->count = count; - return (true); + return true; } nins++; } } - return (false); + return false; } static bool @@ -296,7 +296,7 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) { ret = false; label_return: - return (ret); + return ret; } static void @@ -403,7 +403,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ret = false; label_return: - return (ret); + return ret; } void @@ -433,7 +433,7 @@ size_t ckh_count(ckh_t *ckh) { assert(ckh != NULL); - return (ckh->count); + return ckh->count; } bool @@ -450,11 +450,11 @@ ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) { *data = (void *)ckh->tab[i].data; } *tabind = i + 1; - return (false); + return false; } } - return (true); + return true; } bool @@ -477,7 +477,7 @@ ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) { ret = false; label_return: - return (ret); + return ret; } bool @@ -507,10 +507,10 @@ ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, ckh_shrink(tsd, ckh); } - return (false); + return false; } - return (true); + return true; } bool @@ -527,10 +527,10 @@ ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) { if (data != NULL) { *data = (void *)ckh->tab[cell].data; } - return (false); + return false; } - return (true); + return true; } void @@ -543,7 +543,7 @@ ckh_string_keycomp(const void *k1, const void *k2) { assert(k1 != NULL); assert(k2 != NULL); - return (strcmp((char *)k1, (char *)k2) ? false : true); + return !strcmp((char *)k1, (char *)k2); } void @@ -560,5 +560,5 @@ ckh_pointer_hash(const void *key, size_t r_hash[2]) { bool ckh_pointer_keycomp(const void *k1, const void *k2) { - return ((k1 == k2) ? true : false); + return (k1 == k2); } @@ -464,12 +464,12 @@ arenas_i2a_impl(size_t i, bool compat, bool validate) { break; } - return (a); + return a; } static unsigned arenas_i2a(size_t i) { - return (arenas_i2a_impl(i, true, false)); + return arenas_i2a_impl(i, true, false); } static ctl_arena_t * @@ -505,14 +505,14 @@ arenas_i_impl(tsdn_t *tsdn, size_t i, bool compat, bool init) { } assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i)); - return (ret); + return ret; } static ctl_arena_t * arenas_i(size_t i) { ctl_arena_t *ret = arenas_i_impl(TSDN_NULL, i, true, false); assert(ret != NULL); - return (ret); + return ret; } static void @@ -692,19 +692,19 @@ ctl_arena_init(tsdn_t *tsdn, extent_hooks_t *extent_hooks) { /* Trigger stats allocation. */ if (arenas_i_impl(tsdn, arena_ind, false, true) == NULL) { - return (UINT_MAX); + return UINT_MAX; } /* Initialize new arena. */ if (arena_init(tsdn, arena_ind, extent_hooks) == NULL) { - return (UINT_MAX); + return UINT_MAX; } if (arena_ind == ctl_arenas->narenas) { ctl_arenas->narenas++; } - return (arena_ind); + return arena_ind; } static void @@ -819,7 +819,7 @@ ctl_init(tsdn_t *tsdn) { ret = false; label_return: malloc_mutex_unlock(tsdn, &ctl_mtx); - return (ret); + return ret; } static int @@ -917,7 +917,7 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, ret = 0; label_return: - return (ret); + return ret; } int @@ -1019,12 +1019,12 @@ label_return: bool ctl_boot(void) { if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL)) { - return (true); + return true; } ctl_initialized = false; - return (false); + return false; } void @@ -1110,7 +1110,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ t oldval; \ \ if (!(c)) { \ - return (ENOENT); \ + return ENOENT; \ } \ if (l) { \ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ @@ -1124,7 +1124,7 @@ label_return: \ if (l) { \ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ } \ - return (ret); \ + return ret; \ } #define CTL_RO_CGEN(c, n, v, t) \ @@ -1135,7 +1135,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ t oldval; \ \ if (!(c)) { \ - return (ENOENT); \ + return ENOENT; \ } \ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ READONLY(); \ @@ -1145,7 +1145,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ ret = 0; \ label_return: \ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ - return (ret); \ + return ret; \ } #define CTL_RO_GEN(n, v, t) \ @@ -1163,7 +1163,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ ret = 0; \ label_return: \ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ - return (ret); \ + return ret; \ } /* @@ -1178,7 +1178,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ t oldval; \ \ if (!(c)) { \ - return (ENOENT); \ + return ENOENT; \ } \ READONLY(); \ oldval = (v); \ @@ -1186,7 +1186,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ \ ret = 0; \ label_return: \ - return (ret); \ + return ret; \ } #define CTL_RO_NL_GEN(n, v, t) \ @@ -1202,7 +1202,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ \ ret = 0; \ label_return: \ - return (ret); \ + return ret; \ } #define CTL_TSD_RO_NL_CGEN(c, n, m, t) \ @@ -1213,7 +1213,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ t oldval; \ \ if (!(c)) { \ - return (ENOENT); \ + return ENOENT; \ } \ READONLY(); \ oldval = (m(tsd)); \ @@ -1221,7 +1221,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ \ ret = 0; \ label_return: \ - return (ret); \ + return ret; \ } #define CTL_RO_CONFIG_GEN(n, t) \ @@ -1237,7 +1237,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ \ ret = 0; \ label_return: \ - return (ret); \ + return ret; \ } /******************************************************************************/ @@ -1260,7 +1260,7 @@ epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); - return (ret); + return ret; } /******************************************************************************/ @@ -1316,7 +1316,7 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, oldarena = arena_choose(tsd, NULL); if (oldarena == NULL) { - return (EAGAIN); + return EAGAIN; } newind = oldind = arena_ind_get(oldarena); @@ -1350,7 +1350,7 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: - return (ret); + return ret; } CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get, @@ -1369,7 +1369,7 @@ thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, bool oldval; if (!config_tcache) { - return (ENOENT); + return ENOENT; } oldval = tcache_enabled_get(); @@ -1384,7 +1384,7 @@ thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ret = 0; label_return: - return (ret); + return ret; } static int @@ -1393,7 +1393,7 @@ thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, int ret; if (!config_tcache) { - return (ENOENT); + return ENOENT; } READONLY(); @@ -1403,7 +1403,7 @@ thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ret = 0; label_return: - return (ret); + return ret; } static int @@ -1412,7 +1412,7 @@ thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, int ret; if (!config_prof) { - return (ENOENT); + return ENOENT; } READ_XOR_WRITE(); @@ -1434,7 +1434,7 @@ thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: - return (ret); + return ret; } static int @@ -1444,7 +1444,7 @@ thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, bool oldval; if (!config_prof) { - return (ENOENT); + return ENOENT; } oldval = prof_thread_active_get(tsd); @@ -1462,7 +1462,7 @@ thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: - return (ret); + return ret; } /******************************************************************************/ @@ -1474,7 +1474,7 @@ tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, unsigned tcache_ind; if (!config_tcache) { - return (ENOENT); + return ENOENT; } malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); @@ -1488,7 +1488,7 @@ tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); - return (ret); + return ret; } static int @@ -1498,7 +1498,7 @@ tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, unsigned tcache_ind; if (!config_tcache) { - return (ENOENT); + return ENOENT; } WRITEONLY(); @@ -1512,7 +1512,7 @@ tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: - return (ret); + return ret; } static int @@ -1522,7 +1522,7 @@ tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, unsigned tcache_ind; if (!config_tcache) { - return (ENOENT); + return ENOENT; } WRITEONLY(); @@ -1536,7 +1536,7 @@ tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: - return (ret); + return ret; } /******************************************************************************/ @@ -1560,7 +1560,7 @@ arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ret = 0; label_return: - return (ret); + return ret; } static void @@ -1622,7 +1622,7 @@ arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: - return (ret); + return ret; } static int @@ -1638,7 +1638,7 @@ arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: - return (ret); + return ret; } static int @@ -1664,7 +1664,7 @@ arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen, ret = 0; label_return: - return (ret); + return ret; } static int @@ -1677,12 +1677,12 @@ arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp, newp, newlen, &arena_ind, &arena); if (ret != 0) { - return (ret); + return ret; } arena_reset(tsd, arena); - return (ret); + return ret; } static int @@ -1721,7 +1721,7 @@ arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, assert(ret == 0); label_return: - return (ret); + return ret; } static int @@ -1782,7 +1782,7 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); - return (ret); + return ret; } static int @@ -1817,7 +1817,7 @@ arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: - return (ret); + return ret; } static int @@ -1851,7 +1851,7 @@ arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ret = 0; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); - return (ret); + return ret; } static const ctl_named_node_t * @@ -1874,7 +1874,7 @@ arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { ret = super_arena_i_node; label_return: malloc_mutex_unlock(tsdn, &ctl_mtx); - return (ret); + return ret; } /******************************************************************************/ @@ -1897,7 +1897,7 @@ arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); - return (ret); + return ret; } static int @@ -1922,7 +1922,7 @@ arenas_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: - return (ret); + return ret; } CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) @@ -1936,9 +1936,9 @@ CTL_RO_NL_GEN(arenas_bin_i_slab_size, arena_bin_info[mib[2]].slab_size, size_t) static const ctl_named_node_t * arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { if (i > NBINS) { - return (NULL); + return NULL; } - return (super_arenas_bin_i_node); + return super_arenas_bin_i_node; } CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned) @@ -1947,9 +1947,9 @@ static const ctl_named_node_t * arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { if (i > NSIZES - NBINS) { - return (NULL); + return NULL; } - return (super_arenas_lextent_i_node); + return super_arenas_lextent_i_node; } static int @@ -1973,7 +1973,7 @@ arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); - return (ret); + return ret; } /******************************************************************************/ @@ -1985,7 +1985,7 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, bool oldval; if (!config_prof) { - return (ENOENT); + return ENOENT; } if (newp != NULL) { @@ -2002,7 +2002,7 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ret = 0; label_return: - return (ret); + return ret; } static int @@ -2012,7 +2012,7 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, bool oldval; if (!config_prof) { - return (ENOENT); + return ENOENT; } if (newp != NULL) { @@ -2028,7 +2028,7 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: - return (ret); + return ret; } static int @@ -2038,7 +2038,7 @@ prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, const char *filename = NULL; if (!config_prof) { - return (ENOENT); + return ENOENT; } WRITEONLY(); @@ -2051,7 +2051,7 @@ prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: - return (ret); + return ret; } static int @@ -2061,7 +2061,7 @@ prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, bool oldval; if (!config_prof) { - return (ENOENT); + return ENOENT; } if (newp != NULL) { @@ -2077,7 +2077,7 @@ prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: - return (ret); + return ret; } static int @@ -2087,7 +2087,7 @@ prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t lg_sample = lg_prof_sample; if (!config_prof) { - return (ENOENT); + return ENOENT; } WRITEONLY(); @@ -2100,7 +2100,7 @@ prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: - return (ret); + return ret; } CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) @@ -2180,9 +2180,9 @@ static const ctl_named_node_t * stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t j) { if (j > NBINS) { - return (NULL); + return NULL; } - return (super_stats_arenas_i_bins_j_node); + return super_stats_arenas_i_bins_j_node; } CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc, @@ -2198,9 +2198,9 @@ static const ctl_named_node_t * stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t j) { if (j > NSIZES - NBINS) { - return (NULL); + return NULL; } - return (super_stats_arenas_i_lextents_j_node); + return super_stats_arenas_i_lextents_j_node; } static const ctl_named_node_t * @@ -2218,5 +2218,5 @@ stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { ret = super_stats_arenas_i_node; label_return: malloc_mutex_unlock(tsdn, &ctl_mtx); - return (ret); + return ret; } diff --git a/src/extent.c b/src/extent.c index 5cf2e25..bcdaccf 100644 --- a/src/extent.c +++ b/src/extent.c @@ -82,12 +82,12 @@ extent_alloc(tsdn_t *tsdn, arena_t *arena) { extent = ql_last(&arena->extent_cache, ql_link); if (extent == NULL) { malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx); - return (base_alloc(tsdn, arena->base, sizeof(extent_t), - QUANTUM)); + return base_alloc(tsdn, arena->base, sizeof(extent_t), + QUANTUM); } ql_tail_remove(&arena->extent_cache, extent_t, ql_link); malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx); - return (extent); + return extent; } void @@ -100,12 +100,12 @@ extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { extent_hooks_t * extent_hooks_get(arena_t *arena) { - return (base_extent_hooks_get(arena->base)); + return base_extent_hooks_get(arena->base); } extent_hooks_t * extent_hooks_set(arena_t *arena, extent_hooks_t *extent_hooks) { - return (base_extent_hooks_set(arena->base, extent_hooks)); + return base_extent_hooks_set(arena->base, extent_hooks); } static void @@ -139,11 +139,11 @@ extent_size_quantize_floor(size_t size) { * PAGE-spaced size classes, but it's simplest to just handle * the one case that would cause erroneous results. */ - return (size); + return size; } ret = pind2sz(pind - 1) + large_pad; assert(ret <= size); - return (ret); + return ret; } #ifdef JEMALLOC_JET #undef extent_size_quantize_floor @@ -176,7 +176,7 @@ extent_size_quantize_ceil(size_t size) { */ ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad; } - return (ret); + return ret; } #ifdef JEMALLOC_JET #undef extent_size_quantize_ceil @@ -217,7 +217,7 @@ extent_rtree_acquire(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, *r_elm_a = rtree_elm_acquire(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)extent_base_get(extent), dependent, init_missing); if (!dependent && *r_elm_a == NULL) { - return (true); + return true; } assert(*r_elm_a != NULL); @@ -227,14 +227,14 @@ extent_rtree_acquire(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, init_missing); if (!dependent && *r_elm_b == NULL) { rtree_elm_release(tsdn, &extents_rtree, *r_elm_a); - return (true); + return true; } assert(*r_elm_b != NULL); } else { *r_elm_b = NULL; } - return (false); + return false; } static void @@ -308,7 +308,7 @@ extent_register(tsdn_t *tsdn, const extent_t *extent) { if (extent_rtree_acquire(tsdn, rtree_ctx, extent, false, true, &elm_a, &elm_b)) { - return (true); + return true; } extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent); if (extent_slab_get(extent)) { @@ -320,7 +320,7 @@ extent_register(tsdn_t *tsdn, const extent_t *extent) { extent_gprof_add(tsdn, extent); } - return (false); + return false; } static void @@ -378,11 +378,11 @@ extent_first_best_fit(tsdn_t *tsdn, arena_t *arena, for (i = pind; i < NPSIZES+1; i++) { extent_t *extent = extent_heap_first(&extent_heaps[i]); if (extent != NULL) { - return (extent); + return extent; } } - return (NULL); + return NULL; } static void @@ -444,7 +444,7 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, alloc_size = size + PAGE_CEILING(alignment) - PAGE; /* Beware size_t wrap-around. */ if (alloc_size < usize) { - return (NULL); + return NULL; } if (!locked) { malloc_mutex_lock(tsdn, &arena->extents_mtx); @@ -479,7 +479,7 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, if (!locked) { malloc_mutex_unlock(tsdn, &arena->extents_mtx); } - return (NULL); + return NULL; } extent_heaps_remove(tsdn, extent_heaps, extent); arena_extent_cache_maybe_remove(tsdn, arena, extent, cache); @@ -508,7 +508,7 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, if (!locked) { malloc_mutex_unlock(tsdn, &arena->extents_mtx); } - return (NULL); + return NULL; } extent_heaps_insert(tsdn, extent_heaps, lead); arena_extent_cache_maybe_insert(tsdn, arena, lead, cache); @@ -525,7 +525,7 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, if (!locked) { malloc_mutex_unlock(tsdn, &arena->extents_mtx); } - return (NULL); + return NULL; } extent_heaps_insert(tsdn, extent_heaps, trail); arena_extent_cache_maybe_insert(tsdn, arena, trail, cache); @@ -545,7 +545,7 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, } extent_record(tsdn, arena, r_extent_hooks, extent_heaps, cache, extent); - return (NULL); + return NULL; } extent_zeroed_set(extent, true); } @@ -577,7 +577,7 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, } } } - return (extent); + return extent; } /* @@ -598,22 +598,22 @@ extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, if (have_dss && dss_prec == dss_prec_primary && (ret = extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, commit)) != NULL) { - return (ret); + return ret; } /* mmap. */ if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit)) != NULL) { - return (ret); + return ret; } /* "secondary" dss. */ if (have_dss && dss_prec == dss_prec_secondary && (ret = extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, commit)) != NULL) { - return (ret); + return ret; } /* All strategies for allocation failed. */ - return (NULL); + return NULL; } static extent_t * @@ -628,7 +628,7 @@ extent_alloc_cache_impl(tsdn_t *tsdn, arena_t *arena, extent = extent_recycle(tsdn, arena, r_extent_hooks, arena->extents_cached, locked, true, new_addr, usize, pad, alignment, zero, commit, slab); - return (extent); + return extent; } extent_t * @@ -637,16 +637,16 @@ extent_alloc_cache_locked(tsdn_t *tsdn, arena_t *arena, size_t alignment, bool *zero, bool *commit, bool slab) { malloc_mutex_assert_owner(tsdn, &arena->extents_mtx); - return (extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, true, - new_addr, usize, pad, alignment, zero, commit, slab)); + return extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, true, + new_addr, usize, pad, alignment, zero, commit, slab); } extent_t * extent_alloc_cache(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad, size_t alignment, bool *zero, bool *commit, bool slab) { - return (extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, false, - new_addr, usize, pad, alignment, zero, commit, slab)); + return extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, false, + new_addr, usize, pad, alignment, zero, commit, slab); } static void * @@ -656,7 +656,7 @@ extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr, ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero, commit, arena->dss_prec); - return (ret); + return ret; } static void * @@ -675,8 +675,8 @@ extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size, */ assert(arena != NULL); - return (extent_alloc_default_impl(tsdn, arena, new_addr, size, - alignment, zero, commit)); + return extent_alloc_default_impl(tsdn, arena, new_addr, size, + alignment, zero, commit); } static void @@ -714,14 +714,14 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, alloc_size_min = size + PAGE_CEILING(alignment) - PAGE; /* Beware size_t wrap-around. */ if (alloc_size_min < usize) { - return (NULL); + return NULL; } if (alloc_size < alloc_size_min) { - return (NULL); + return NULL; } extent = extent_alloc(tsdn, arena); if (extent == NULL) { - return (NULL); + return NULL; } zeroed = false; committed = false; @@ -731,7 +731,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, arena_extent_sn_next(arena), false, zeroed, committed, false); if (ptr == NULL || extent_register(tsdn, extent)) { extent_dalloc(tsdn, arena, extent); - return (NULL); + return NULL; } /* * Set the extent as active *after registration so that no gprof-related @@ -759,7 +759,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, if (extent == NULL) { extent_deregister(tsdn, lead); extent_leak(tsdn, arena, r_extent_hooks, false, lead); - return (NULL); + return NULL; } extent_retain(tsdn, arena, r_extent_hooks, lead); } @@ -771,7 +771,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, if (trail == NULL) { extent_deregister(tsdn, extent); extent_leak(tsdn, arena, r_extent_hooks, false, extent); - return (NULL); + return NULL; } extent_retain(tsdn, arena, r_extent_hooks, trail); } else if (leadsize == 0) { @@ -786,7 +786,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, if (extent_commit_wrapper(tsdn, arena, r_extent_hooks, extent, 0, extent_size_get(extent))) { extent_retain(tsdn, arena, r_extent_hooks, extent); - return (NULL); + return NULL; } extent_zeroed_set(extent, true); } @@ -812,7 +812,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, if (arena->extent_grow_next + 1 < NPSIZES) { arena->extent_grow_next++; } - return (extent); + return extent; } static extent_t * @@ -841,7 +841,7 @@ extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, new_addr, usize, pad, alignment, zero, commit, slab); } - return (extent); + return extent; } static extent_t * @@ -855,7 +855,7 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, size = usize + pad; extent = extent_alloc(tsdn, arena); if (extent == NULL) { - return (NULL); + return NULL; } if (*r_extent_hooks == &extent_hooks_default) { /* Call directly to propagate tsdn. */ @@ -867,7 +867,7 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, } if (addr == NULL) { extent_dalloc(tsdn, arena, extent); - return (NULL); + return NULL; } extent_init(extent, arena, addr, size, usize, arena_extent_sn_next(arena), true, zero, commit, slab); @@ -876,10 +876,10 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, } if (extent_register(tsdn, extent)) { extent_leak(tsdn, arena, r_extent_hooks, false, extent); - return (NULL); + return NULL; } - return (extent); + return extent; } extent_t * @@ -897,25 +897,25 @@ extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, new_addr, usize, pad, alignment, zero, commit, slab); } - return (extent); + return extent; } static bool extent_can_coalesce(const extent_t *a, const extent_t *b) { if (extent_arena_get(a) != extent_arena_get(b)) { - return (false); + return false; } if (extent_active_get(a) != extent_active_get(b)) { - return (false); + return false; } if (extent_committed_get(a) != extent_committed_get(b)) { - return (false); + return false; } if (extent_retained_get(a) != extent_retained_get(b)) { - return (false); + return false; } - return (true); + return true; } static void @@ -1016,9 +1016,9 @@ extent_dalloc_cache(tsdn_t *tsdn, arena_t *arena, static bool extent_dalloc_default_impl(void *addr, size_t size) { if (!have_dss || !extent_in_dss(addr)) { - return (extent_dalloc_mmap(addr, size)); + return extent_dalloc_mmap(addr, size); } - return (true); + return true; } static bool @@ -1026,7 +1026,7 @@ extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size, bool committed, unsigned arena_ind) { assert(extent_hooks == &extent_hooks_default); - return (extent_dalloc_default_impl(addr, size)); + return extent_dalloc_default_impl(addr, size); } bool @@ -1060,7 +1060,7 @@ extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena, extent_dalloc(tsdn, arena, extent); } - return (err); + return err; } void @@ -1110,8 +1110,8 @@ extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind) { assert(extent_hooks == &extent_hooks_default); - return (pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset), - length)); + return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset), + length); } bool @@ -1125,7 +1125,7 @@ extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), offset, length, arena_ind_get(arena))); extent_committed_set(extent, extent_committed_get(extent) || !err); - return (err); + return err; } static bool @@ -1133,8 +1133,8 @@ extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind) { assert(extent_hooks == &extent_hooks_default); - return (pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset), - length)); + return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset), + length); } bool @@ -1150,7 +1150,7 @@ extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, extent_base_get(extent), extent_size_get(extent), offset, length, arena_ind_get(arena))); extent_committed_set(extent, extent_committed_get(extent) && err); - return (err); + return err; } #ifdef PAGES_CAN_PURGE_LAZY @@ -1163,8 +1163,8 @@ extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, assert(length != 0); assert((length & PAGE_MASK) == 0); - return (pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset), - length)); + return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset), + length); } #endif @@ -1189,8 +1189,8 @@ extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr, assert(length != 0); assert((length & PAGE_MASK) == 0); - return (pages_purge_forced((void *)((uintptr_t)addr + - (uintptr_t)offset), length)); + return pages_purge_forced((void *)((uintptr_t)addr + + (uintptr_t)offset), length); } #endif @@ -1211,10 +1211,7 @@ extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { assert(extent_hooks == &extent_hooks_default); - if (!maps_coalesce) { - return (true); - } - return (false); + return !maps_coalesce; } #endif @@ -1232,7 +1229,7 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_assure_initialized(arena, r_extent_hooks); if ((*r_extent_hooks)->split == NULL) { - return (NULL); + return NULL; } trail = extent_alloc(tsdn, arena); @@ -1278,7 +1275,7 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, extent_rtree_release(tsdn, lead_elm_a, lead_elm_b); extent_rtree_release(tsdn, trail_elm_a, trail_elm_b); - return (trail); + return trail; label_error_d: extent_rtree_release(tsdn, trail_elm_a, trail_elm_b); label_error_c: @@ -1286,19 +1283,19 @@ label_error_c: label_error_b: extent_dalloc(tsdn, arena, trail); label_error_a: - return (NULL); + return NULL; } static bool extent_merge_default_impl(void *addr_a, void *addr_b) { if (!maps_coalesce) { - return (true); + return true; } if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) { - return (true); + return true; } - return (false); + return false; } #ifdef JEMALLOC_MAPS_COALESCE @@ -1307,7 +1304,7 @@ extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, void *addr_b, size_t size_b, bool committed, unsigned arena_ind) { assert(extent_hooks == &extent_hooks_default); - return (extent_merge_default_impl(addr_a, addr_b)); + return extent_merge_default_impl(addr_a, addr_b); } #endif @@ -1322,7 +1319,7 @@ extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_assure_initialized(arena, r_extent_hooks); if ((*r_extent_hooks)->merge == NULL) { - return (true); + return true; } if (*r_extent_hooks == &extent_hooks_default) { @@ -1337,7 +1334,7 @@ extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, } if (err) { - return (true); + return true; } /* @@ -1372,19 +1369,19 @@ extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, extent_dalloc(tsdn, extent_arena_get(b), b); - return (false); + return false; } bool extent_boot(void) { if (rtree_new(&extents_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) - LG_PAGE))) { - return (true); + return true; } if (have_dss) { extent_dss_boot(); } - return (false); + return false; } diff --git a/src/extent_dss.c b/src/extent_dss.c index d61d546..93bd6fb 100644 --- a/src/extent_dss.c +++ b/src/extent_dss.c @@ -32,10 +32,10 @@ static void *dss_max; static void * extent_dss_sbrk(intptr_t increment) { #ifdef JEMALLOC_DSS - return (sbrk(increment)); + return sbrk(increment); #else not_implemented(); - return (NULL); + return NULL; #endif } @@ -44,10 +44,10 @@ extent_dss_prec_get(void) { dss_prec_t ret; if (!have_dss) { - return (dss_prec_disabled); + return dss_prec_disabled; } ret = (dss_prec_t)atomic_read_u(&dss_prec_default); - return (ret); + return ret; } bool @@ -56,7 +56,7 @@ extent_dss_prec_set(dss_prec_t dss_prec) { return (dss_prec != dss_prec_disabled); } atomic_write_u(&dss_prec_default, (unsigned)dss_prec); - return (false); + return false; } static void * @@ -87,10 +87,10 @@ extent_dss_max_update(void *new_addr) { } /* Fixed new_addr can only be supported if it is at the edge of DSS. */ if (new_addr != NULL && max_cur != new_addr) { - return (NULL); + return NULL; } - return (max_cur); + return max_cur; } void * @@ -107,12 +107,12 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, * interpret a large allocation request as a negative increment. */ if ((intptr_t)size < 0) { - return (NULL); + return NULL; } gap = extent_alloc(tsdn, arena); if (gap == NULL) { - return (NULL); + return NULL; } if (!atomic_read_u(&dss_exhausted)) { @@ -187,7 +187,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, memset(ret, 0, size); } } - return (ret); + return ret; } /* * Failure, whether due to OOM or a race with a raw @@ -207,7 +207,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, } label_oom: extent_dalloc(tsdn, arena, gap); - return (NULL); + return NULL; } static bool @@ -220,7 +220,7 @@ bool extent_in_dss(void *addr) { cassert(have_dss); - return (extent_in_dss_helper(addr, atomic_read_p(&dss_max))); + return extent_in_dss_helper(addr, atomic_read_p(&dss_max)); } bool @@ -231,7 +231,7 @@ extent_dss_mergeable(void *addr_a, void *addr_b) { if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b < (uintptr_t)dss_base) { - return (true); + return true; } max = atomic_read_p(&dss_max); diff --git a/src/extent_mmap.c b/src/extent_mmap.c index 2c00b58..495d9be 100644 --- a/src/extent_mmap.c +++ b/src/extent_mmap.c @@ -12,14 +12,14 @@ extent_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, alloc_size = size + alignment - PAGE; /* Beware size_t wrap-around. */ if (alloc_size < size) { - return (NULL); + return NULL; } do { void *pages; size_t leadsize; pages = pages_map(NULL, alloc_size, commit); if (pages == NULL) { - return (NULL); + return NULL; } leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - (uintptr_t)pages; @@ -28,7 +28,7 @@ extent_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, assert(ret != NULL); *zero = true; - return (ret); + return ret; } void * @@ -54,18 +54,18 @@ extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, ret = pages_map(new_addr, size, commit); if (ret == NULL || ret == new_addr) { - return (ret); + return ret; } assert(new_addr == NULL); offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); if (offset != 0) { pages_unmap(ret, size); - return (extent_alloc_mmap_slow(size, alignment, zero, commit)); + return extent_alloc_mmap_slow(size, alignment, zero, commit); } assert(ret != NULL); *zero = true; - return (ret); + return ret; } bool @@ -73,5 +73,5 @@ extent_dalloc_mmap(void *addr, size_t size) { if (config_munmap) { pages_unmap(addr, size); } - return (!config_munmap); + return !config_munmap; } diff --git a/src/jemalloc.c b/src/jemalloc.c index 2de42c3..67b430f 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -280,17 +280,17 @@ malloc_initialized(void) { JEMALLOC_ALWAYS_INLINE_C bool malloc_init_a0(void) { if (unlikely(malloc_init_state == malloc_init_uninitialized)) { - return (malloc_init_hard_a0()); + return malloc_init_hard_a0(); } - return (false); + return false; } JEMALLOC_ALWAYS_INLINE_C bool malloc_init(void) { if (unlikely(!malloc_initialized()) && malloc_init_hard()) { - return (true); + return true; } - return (false); + return false; } /* @@ -301,11 +301,11 @@ malloc_init(void) { static void * a0ialloc(size_t size, bool zero, bool is_internal) { if (unlikely(malloc_init_a0())) { - return (NULL); + return NULL; } - return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL, - is_internal, arena_get(TSDN_NULL, 0, true), true)); + return iallocztm(TSDN_NULL, size, size2index(size), zero, NULL, + is_internal, arena_get(TSDN_NULL, 0, true), true); } static void @@ -315,7 +315,7 @@ a0idalloc(extent_t *extent, void *ptr, bool is_internal) { void * a0malloc(size_t size) { - return (a0ialloc(size, false, true)); + return a0ialloc(size, false, true); } void @@ -335,7 +335,7 @@ bootstrap_malloc(size_t size) { size = 1; } - return (a0ialloc(size, false, false)); + return a0ialloc(size, false, false); } void * @@ -348,7 +348,7 @@ bootstrap_calloc(size_t num, size_t size) { num_size = 1; } - return (a0ialloc(num_size, true, false)); + return a0ialloc(num_size, true, false); } void @@ -377,7 +377,7 @@ narenas_total_inc(void) { unsigned narenas_total_get(void) { - return (atomic_read_u(&narenas_total)); + return atomic_read_u(&narenas_total); } /* Create a new arena and insert it into the arenas array at index ind. */ @@ -387,7 +387,7 @@ arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { assert(ind <= narenas_total_get()); if (ind > MALLOCX_ARENA_MAX) { - return (NULL); + return NULL; } if (ind == narenas_total_get()) { narenas_total_inc(); @@ -400,13 +400,13 @@ arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { arena = arena_get(tsdn, ind, false); if (arena != NULL) { assert(ind < narenas_auto); - return (arena); + return arena; } /* Actually initialize the arena. */ arena = arena_new(tsdn, ind, extent_hooks); arena_set(ind, arena); - return (arena); + return arena; } arena_t * @@ -416,7 +416,7 @@ arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { malloc_mutex_lock(tsdn, &arenas_lock); arena = arena_init_locked(tsdn, ind, extent_hooks); malloc_mutex_unlock(tsdn, &arenas_lock); - return (arena); + return arena; } static void @@ -534,7 +534,7 @@ label_return: if (arenas_tdata_old != NULL) { a0dalloc(arenas_tdata_old); } - return (tdata); + return tdata; } /* Slow path, called only by arena_choose(). */ @@ -612,7 +612,7 @@ arena_choose_hard(tsd_t *tsd, bool internal) { if (arena == NULL) { malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); - return (NULL); + return NULL; } if (!!j == internal) { ret = arena; @@ -627,7 +627,7 @@ arena_choose_hard(tsd_t *tsd, bool internal) { arena_bind(tsd, 0, true); } - return (ret); + return ret; } void @@ -714,10 +714,10 @@ static char * secure_getenv(const char *name) { # ifdef JEMALLOC_HAVE_ISSETUGID if (issetugid() != 0) { - return (NULL); + return NULL; } # endif - return (getenv(name)); + return getenv(name); } #endif @@ -785,10 +785,10 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, malloc_write("<jemalloc>: Conf string ends " "with key\n"); } - return (true); + return true; default: malloc_write("<jemalloc>: Malformed conf string\n"); - return (true); + return true; } } @@ -821,7 +821,7 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, } *opts_p = opts; - return (false); + return false; } static void @@ -1147,7 +1147,7 @@ malloc_init_hard_needed(void) { * acquired init_lock, or this thread is the initializing * thread, and it is recursively allocating. */ - return (false); + return false; } #ifdef JEMALLOC_THREADED_INIT if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { @@ -1160,10 +1160,10 @@ malloc_init_hard_needed(void) { spin_adaptive(&spinner); malloc_mutex_lock(TSDN_NULL, &init_lock); } while (!malloc_initialized()); - return (false); + return false; } #endif - return (true); + return true; } static bool @@ -1185,23 +1185,23 @@ malloc_init_hard_a0_locked() { } pages_boot(); if (base_boot(TSDN_NULL)) { - return (true); + return true; } if (extent_boot()) { - return (true); + return true; } if (ctl_boot()) { - return (true); + return true; } if (config_prof) { prof_boot1(); } arena_boot(); if (config_tcache && tcache_boot(TSDN_NULL)) { - return (true); + return true; } if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS)) { - return (true); + return true; } /* * Create enough scaffolding to allow recursive allocation in @@ -1217,12 +1217,12 @@ malloc_init_hard_a0_locked() { */ if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default) == NULL) { - return (true); + return true; } malloc_init_state = malloc_init_a0_initialized; - return (false); + return false; } static bool @@ -1232,7 +1232,7 @@ malloc_init_hard_a0(void) { malloc_mutex_lock(TSDN_NULL, &init_lock); ret = malloc_init_hard_a0_locked(); malloc_mutex_unlock(TSDN_NULL, &init_lock); - return (ret); + return ret; } /* Initialize data structures which may trigger recursive allocation. */ @@ -1252,17 +1252,17 @@ malloc_init_hard_recursible(void) { if (opt_abort) { abort(); } - return (true); + return true; } #endif - return (false); + return false; } static bool malloc_init_hard_finish(tsdn_t *tsdn) { if (malloc_mutex_boot()) { - return (true); + return true; } if (opt_narenas == 0) { @@ -1291,7 +1291,7 @@ malloc_init_hard_finish(tsdn_t *tsdn) { arenas = (arena_t **)base_alloc(tsdn, a0->base, sizeof(arena_t *) * (MALLOCX_ARENA_MAX+1), CACHELINE); if (arenas == NULL) { - return (true); + return true; } /* Copy the pointer to the one arena that was already initialized. */ arena_set(0, a0); @@ -1299,7 +1299,7 @@ malloc_init_hard_finish(tsdn_t *tsdn) { malloc_init_state = malloc_init_initialized; malloc_slow_flag_init(); - return (false); + return false; } static bool @@ -1312,39 +1312,39 @@ malloc_init_hard(void) { malloc_mutex_lock(TSDN_NULL, &init_lock); if (!malloc_init_hard_needed()) { malloc_mutex_unlock(TSDN_NULL, &init_lock); - return (false); + return false; } if (malloc_init_state != malloc_init_a0_initialized && malloc_init_hard_a0_locked()) { malloc_mutex_unlock(TSDN_NULL, &init_lock); - return (true); + return true; } malloc_mutex_unlock(TSDN_NULL, &init_lock); /* Recursive allocation relies on functional tsd. */ tsd = malloc_tsd_boot0(); if (tsd == NULL) { - return (true); + return true; } if (malloc_init_hard_recursible()) { - return (true); + return true; } malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); if (config_prof && prof_boot2(tsd)) { malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); - return (true); + return true; } if (malloc_init_hard_finish(tsd_tsdn(tsd))) { malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); - return (true); + return true; } malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); malloc_tsd_boot1(); - return (false); + return false; } /* @@ -1679,8 +1679,6 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts) { witness_assert_lockless(tsd_tsdn(tsd)); - - /* Success! */ *dopts->result = allocation; return 0; @@ -1829,7 +1827,7 @@ je_aligned_alloc(size_t alignment, size_t size) { dopts.alignment = alignment; imalloc(&sopts, &dopts); - return (ret); + return ret; } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN @@ -1864,13 +1862,13 @@ irealloc_prof_sample(tsd_t *tsd, extent_t *extent, void *old_ptr, void *p; if (tctx == NULL) { - return (NULL); + return NULL; } if (usize <= SMALL_MAXCLASS) { p = iralloc(tsd, extent, old_ptr, old_usize, LARGE_MINCLASS, 0, false); if (p == NULL) { - return (NULL); + return NULL; } arena_prof_promote(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p, usize); @@ -1878,7 +1876,7 @@ irealloc_prof_sample(tsd_t *tsd, extent_t *extent, void *old_ptr, p = iralloc(tsd, extent, old_ptr, old_usize, usize, 0, false); } - return (p); + return p; } JEMALLOC_ALWAYS_INLINE_C void * @@ -1901,13 +1899,13 @@ irealloc_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize, } if (unlikely(p == NULL)) { prof_alloc_rollback(tsd, tctx, true); - return (NULL); + return NULL; } extent = (p == old_ptr) ? old_extent : iealloc(tsd_tsdn(tsd), p); prof_realloc(tsd, extent, p, usize, tctx, prof_active, true, old_extent, old_ptr, old_usize, old_tctx); - return (p); + return p; } JEMALLOC_INLINE_C void @@ -1977,7 +1975,7 @@ je_realloc(void *ptr, size_t size) { UTRACE(ptr, 0, 0); tsd = tsd_fetch(); ifree(tsd, ptr, tcache_get(tsd, false), true); - return (NULL); + return NULL; } size = 1; } @@ -2029,7 +2027,7 @@ je_realloc(void *ptr, size_t size) { } UTRACE(ptr, size, ret); witness_assert_lockless(tsdn); - return (ret); + return ret; } JEMALLOC_EXPORT void JEMALLOC_NOTHROW @@ -2113,7 +2111,7 @@ je_valloc(size_t size) { imalloc(&sopts, &dopts); - return (ret); + return ret; } #endif @@ -2226,13 +2224,13 @@ irallocx_prof_sample(tsdn_t *tsdn, extent_t *extent, void *old_ptr, void *p; if (tctx == NULL) { - return (NULL); + return NULL; } if (usize <= SMALL_MAXCLASS) { p = iralloct(tsdn, extent, old_ptr, old_usize, LARGE_MINCLASS, alignment, zero, tcache, arena); if (p == NULL) { - return (NULL); + return NULL; } arena_prof_promote(tsdn, iealloc(tsdn, p), p, usize); } else { @@ -2240,7 +2238,7 @@ irallocx_prof_sample(tsdn_t *tsdn, extent_t *extent, void *old_ptr, zero, tcache, arena); } - return (p); + return p; } JEMALLOC_ALWAYS_INLINE_C void * @@ -2264,7 +2262,7 @@ irallocx_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize, } if (unlikely(p == NULL)) { prof_alloc_rollback(tsd, tctx, false); - return (NULL); + return NULL; } if (p == old_ptr && alignment != 0) { @@ -2284,7 +2282,7 @@ irallocx_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize, prof_realloc(tsd, extent, p, *usize, tctx, prof_active, false, old_extent, old_ptr, old_usize, old_tctx); - return (p); + return p; } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN @@ -2359,7 +2357,7 @@ je_rallocx(void *ptr, size_t size, int flags) { } UTRACE(ptr, size, p); witness_assert_lockless(tsd_tsdn(tsd)); - return (p); + return p; label_oom: if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write("<jemalloc>: Error in rallocx(): out of memory\n"); @@ -2367,7 +2365,7 @@ label_oom: } UTRACE(ptr, size, 0); witness_assert_lockless(tsd_tsdn(tsd)); - return (NULL); + return NULL; } JEMALLOC_ALWAYS_INLINE_C size_t @@ -2377,11 +2375,11 @@ ixallocx_helper(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t old_usize, if (ixalloc(tsdn, extent, ptr, old_usize, size, extra, alignment, zero)) { - return (old_usize); + return old_usize; } usize = isalloc(tsdn, extent, ptr); - return (usize); + return usize; } static size_t @@ -2391,12 +2389,12 @@ ixallocx_prof_sample(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t usize; if (tctx == NULL) { - return (old_usize); + return old_usize; } usize = ixallocx_helper(tsdn, extent, ptr, old_usize, size, extra, alignment, zero); - return (usize); + return usize; } JEMALLOC_ALWAYS_INLINE_C size_t @@ -2440,12 +2438,12 @@ ixallocx_prof(tsd_t *tsd, extent_t *extent, void *ptr, size_t old_usize, } if (usize == old_usize) { prof_alloc_rollback(tsd, tctx, false); - return (usize); + return usize; } prof_realloc(tsd, extent, ptr, usize, tctx, prof_active, false, extent, ptr, old_usize, old_tctx); - return (usize); + return usize; } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @@ -2501,7 +2499,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) { label_not_resized: UTRACE(ptr, size, ptr); witness_assert_lockless(tsd_tsdn(tsd)); - return (usize); + return usize; } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @@ -2522,7 +2520,7 @@ je_sallocx(const void *ptr, int flags) { } witness_assert_lockless(tsdn); - return (usize); + return usize; } JEMALLOC_EXPORT void JEMALLOC_NOTHROW @@ -2566,7 +2564,7 @@ inallocx(tsdn_t *tsdn, size_t size, int flags) { usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); } witness_assert_lockless(tsdn); - return (usize); + return usize; } JEMALLOC_EXPORT void JEMALLOC_NOTHROW @@ -2612,7 +2610,7 @@ je_nallocx(size_t size, int flags) { assert(size != 0); if (unlikely(malloc_init())) { - return (0); + return 0; } tsdn = tsdn_fetch(); @@ -2620,11 +2618,11 @@ je_nallocx(size_t size, int flags) { usize = inallocx(tsdn, size, flags); if (unlikely(usize > LARGE_MAXCLASS)) { - return (0); + return 0; } witness_assert_lockless(tsdn); - return (usize); + return usize; } JEMALLOC_EXPORT int JEMALLOC_NOTHROW @@ -2634,14 +2632,14 @@ je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, tsd_t *tsd; if (unlikely(malloc_init())) { - return (EAGAIN); + return EAGAIN; } tsd = tsd_fetch(); witness_assert_lockless(tsd_tsdn(tsd)); ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); witness_assert_lockless(tsd_tsdn(tsd)); - return (ret); + return ret; } JEMALLOC_EXPORT int JEMALLOC_NOTHROW @@ -2650,14 +2648,14 @@ je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { tsdn_t *tsdn; if (unlikely(malloc_init())) { - return (EAGAIN); + return EAGAIN; } tsdn = tsdn_fetch(); witness_assert_lockless(tsdn); ret = ctl_nametomib(tsdn, name, mibp, miblenp); witness_assert_lockless(tsdn); - return (ret); + return ret; } JEMALLOC_EXPORT int JEMALLOC_NOTHROW @@ -2667,14 +2665,14 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, tsd_t *tsd; if (unlikely(malloc_init())) { - return (EAGAIN); + return EAGAIN; } tsd = tsd_fetch(); witness_assert_lockless(tsd_tsdn(tsd)); ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); witness_assert_lockless(tsd_tsdn(tsd)); - return (ret); + return ret; } JEMALLOC_EXPORT void JEMALLOC_NOTHROW @@ -2706,7 +2704,7 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { } witness_assert_lockless(tsdn); - return (ret); + return ret; } /* diff --git a/src/jemalloc_cpp.cpp b/src/jemalloc_cpp.cpp index 030ff99..394fbff 100644 --- a/src/jemalloc_cpp.cpp +++ b/src/jemalloc_cpp.cpp @@ -36,7 +36,7 @@ void * newImpl(std::size_t size) noexcept(IsNoExcept) { void *ptr = je_malloc(size); if (likely(ptr != nullptr)) - return (ptr); + return ptr; while (ptr == nullptr) { std::new_handler handler; @@ -62,27 +62,27 @@ newImpl(std::size_t size) noexcept(IsNoExcept) { if (ptr == nullptr && !IsNoExcept) std::__throw_bad_alloc(); - return (ptr); + return ptr; } void * operator new(std::size_t size) { - return (newImpl<false>(size)); + return newImpl<false>(size); } void * operator new[](std::size_t size) { - return (newImpl<false>(size)); + return newImpl<false>(size); } void * operator new(std::size_t size, const std::nothrow_t &) noexcept { - return (newImpl<true>(size)); + return newImpl<true>(size); } void * operator new[](std::size_t size, const std::nothrow_t &) noexcept { - return (newImpl<true>(size)); + return newImpl<true>(size); } void diff --git a/src/large.c b/src/large.c index 0f2f176..62d4441 100644 --- a/src/large.c +++ b/src/large.c @@ -7,7 +7,7 @@ void * large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) { assert(usize == s2u(usize)); - return (large_palloc(tsdn, arena, usize, CACHELINE, zero)); + return large_palloc(tsdn, arena, usize, CACHELINE, zero); } void * @@ -22,7 +22,7 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, ausize = sa2u(usize, alignment); if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS)) { - return (NULL); + return NULL; } /* @@ -35,7 +35,7 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, } if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn, arena, usize, alignment, &is_zeroed)) == NULL) { - return (NULL); + return NULL; } /* Insert extent into large. */ @@ -58,7 +58,7 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, } arena_decay_tick(tsdn, arena); - return (extent_addr_get(extent)); + return extent_addr_get(extent); } #ifdef JEMALLOC_JET @@ -108,7 +108,7 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) { assert(oldusize > usize); if (extent_hooks->split == NULL) { - return (true); + return true; } /* Split excess pages. */ @@ -117,7 +117,7 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) { &extent_hooks, extent, usize + large_pad, usize, diff, diff); if (trail == NULL) { - return (true); + return true; } if (config_fill && unlikely(opt_junk_free)) { @@ -130,7 +130,7 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) { arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize); - return (false); + return false; } static bool @@ -144,7 +144,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, extent_t *trail; if (extent_hooks->merge == NULL) { - return (true); + return true; } if ((trail = arena_extent_cache_alloc(tsdn, arena, &extent_hooks, @@ -154,13 +154,13 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks, extent_past_get(extent), trailsize, 0, CACHELINE, &is_zeroed_trail, &commit, false)) == NULL) { - return (true); + return true; } } if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) { extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail); - return (true); + return true; } if (zero || (config_fill && unlikely(opt_zero))) { @@ -191,7 +191,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize); - return (false); + return false; } bool @@ -209,7 +209,7 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, if (!large_ralloc_no_move_expand(tsdn, extent, usize_max, zero)) { arena_decay_tick(tsdn, extent_arena_get(extent)); - return (false); + return false; } /* Try again, this time with usize_min. */ if (usize_min < usize_max && usize_min > @@ -217,7 +217,7 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, large_ralloc_no_move_expand(tsdn, extent, usize_min, zero)) { arena_decay_tick(tsdn, extent_arena_get(extent)); - return (false); + return false; } } @@ -228,26 +228,26 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, if (extent_usize_get(extent) >= usize_min && extent_usize_get(extent) <= usize_max) { arena_decay_tick(tsdn, extent_arena_get(extent)); - return (false); + return false; } /* Attempt to shrink the allocation in-place. */ if (extent_usize_get(extent) > usize_max) { if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) { arena_decay_tick(tsdn, extent_arena_get(extent)); - return (false); + return false; } } - return (true); + return true; } static void * large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero) { if (alignment <= CACHELINE) { - return (large_malloc(tsdn, arena, usize, zero)); + return large_malloc(tsdn, arena, usize, zero); } - return (large_palloc(tsdn, arena, usize, alignment, zero)); + return large_palloc(tsdn, arena, usize, alignment, zero); } void * @@ -264,7 +264,7 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize, /* Try to avoid moving the allocation. */ if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) { - return (extent_addr_get(extent)); + return extent_addr_get(extent); } /* @@ -274,7 +274,7 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize, */ ret = large_ralloc_move_helper(tsdn, arena, usize, alignment, zero); if (ret == NULL) { - return (NULL); + return NULL; } copysize = (usize < extent_usize_get(extent)) ? usize : @@ -282,7 +282,7 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize, memcpy(ret, extent_addr_get(extent), copysize); isdalloct(tsdn, extent, extent_addr_get(extent), extent_usize_get(extent), tcache, true); - return (ret); + return ret; } /* @@ -321,12 +321,12 @@ large_dalloc(tsdn_t *tsdn, extent_t *extent) { size_t large_salloc(tsdn_t *tsdn, const extent_t *extent) { - return (extent_usize_get(extent)); + return extent_usize_get(extent); } prof_tctx_t * large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) { - return (extent_prof_tctx_get(extent)); + return extent_prof_tctx_get(extent); } void diff --git a/src/mutex.c b/src/mutex.c index bc0869f..f883b9d 100644 --- a/src/mutex.c +++ b/src/mutex.c @@ -54,7 +54,7 @@ pthread_create(pthread_t *__restrict thread, pthread_once(&once_control, pthread_create_once); - return (pthread_create_fptr(thread, attr, start_routine, arg)); + return pthread_create_fptr(thread, attr, start_routine, arg); } #endif @@ -74,7 +74,7 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name, # else if (!InitializeCriticalSectionAndSpinCount(&mutex->lock, _CRT_SPINCOUNT)) { - return (true); + return true; } # endif #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) @@ -88,26 +88,26 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name, } else { if (_pthread_mutex_init_calloc_cb(&mutex->lock, bootstrap_calloc) != 0) { - return (true); + return true; } } #else pthread_mutexattr_t attr; if (pthread_mutexattr_init(&attr) != 0) { - return (true); + return true; } pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE); if (pthread_mutex_init(&mutex->lock, &attr) != 0) { pthread_mutexattr_destroy(&attr); - return (true); + return true; } pthread_mutexattr_destroy(&attr); #endif if (config_debug) { witness_init(&mutex->witness, name, rank, NULL, NULL); } - return (false); + return false; } void @@ -143,10 +143,10 @@ malloc_mutex_boot(void) { while (postponed_mutexes != NULL) { if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, bootstrap_calloc) != 0) { - return (true); + return true; } postponed_mutexes = postponed_mutexes->postponed_next; } #endif - return (false); + return false; } diff --git a/src/nstime.c b/src/nstime.c index 66989a0..09cd778 100644 --- a/src/nstime.c +++ b/src/nstime.c @@ -14,17 +14,17 @@ nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) { uint64_t nstime_ns(const nstime_t *time) { - return (time->ns); + return time->ns; } uint64_t nstime_sec(const nstime_t *time) { - return (time->ns / BILLION); + return time->ns / BILLION; } uint64_t nstime_nsec(const nstime_t *time) { - return (time->ns % BILLION); + return time->ns % BILLION; } void @@ -34,7 +34,7 @@ nstime_copy(nstime_t *time, const nstime_t *source) { int nstime_compare(const nstime_t *a, const nstime_t *b) { - return ((a->ns > b->ns) - (a->ns < b->ns)); + return (a->ns > b->ns) - (a->ns < b->ns); } void @@ -70,7 +70,7 @@ uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor) { assert(divisor->ns != 0); - return (time->ns / divisor->ns); + return time->ns / divisor->ns; } #ifdef _WIN32 @@ -126,7 +126,7 @@ nstime_get(nstime_t *time) { #endif bool nstime_monotonic(void) { - return (NSTIME_MONOTONIC); + return NSTIME_MONOTONIC; #undef NSTIME_MONOTONIC } #ifdef JEMALLOC_JET @@ -149,10 +149,10 @@ nstime_update(nstime_t *time) { /* Handle non-monotonic clocks. */ if (unlikely(nstime_compare(&old_time, time) > 0)) { nstime_copy(time, &old_time); - return (true); + return true; } - return (false); + return false; } #ifdef JEMALLOC_JET #undef nstime_update diff --git a/src/pages.c b/src/pages.c index c23dccd..0b678e7 100644 --- a/src/pages.c +++ b/src/pages.c @@ -58,7 +58,7 @@ pages_map(void *addr, size_t size, bool *commit) { #endif assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL && ret == addr)); - return (ret); + return ret; } void @@ -98,12 +98,12 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, pages_unmap(addr, alloc_size); new_addr = pages_map(ret, size, commit); if (new_addr == ret) { - return (ret); + return ret; } if (new_addr) { pages_unmap(new_addr, size); } - return (NULL); + return NULL; } #else { @@ -115,7 +115,7 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, if (trailsize != 0) { pages_unmap((void *)((uintptr_t)ret + size), trailsize); } - return (ret); + return ret; } #endif } @@ -123,7 +123,7 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, static bool pages_commit_impl(void *addr, size_t size, bool commit) { if (os_overcommits) { - return (true); + return true; } #ifdef _WIN32 @@ -135,7 +135,7 @@ pages_commit_impl(void *addr, size_t size, bool commit) { void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED, -1, 0); if (result == MAP_FAILED) { - return (true); + return true; } if (result != addr) { /* @@ -143,27 +143,27 @@ pages_commit_impl(void *addr, size_t size, bool commit) { * place. */ pages_unmap(result, size); - return (true); + return true; } - return (false); + return false; } #endif } bool pages_commit(void *addr, size_t size) { - return (pages_commit_impl(addr, size, true)); + return pages_commit_impl(addr, size, true); } bool pages_decommit(void *addr, size_t size) { - return (pages_commit_impl(addr, size, false)); + return pages_commit_impl(addr, size, false); } bool pages_purge_lazy(void *addr, size_t size) { if (!pages_can_purge_lazy) { - return (true); + return true; } #ifdef _WIN32 @@ -173,13 +173,13 @@ pages_purge_lazy(void *addr, size_t size) { #else not_reached(); #endif - return (false); + return false; } bool pages_purge_forced(void *addr, size_t size) { if (!pages_can_purge_forced) { - return (true); + return true; } #if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) @@ -197,7 +197,7 @@ pages_huge(void *addr, size_t size) { #ifdef JEMALLOC_THP return (madvise(addr, size, MADV_HUGEPAGE) != 0); #else - return (true); + return true; #endif } @@ -209,7 +209,7 @@ pages_nohuge(void *addr, size_t size) { #ifdef JEMALLOC_THP return (madvise(addr, size, MADV_NOHUGEPAGE) != 0); #else - return (false); + return false; #endif } @@ -221,7 +221,7 @@ os_overcommits_sysctl(void) { sz = sizeof(vm_overcommit); if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) { - return (false); /* Error. */ + return false; /* Error. */ } return ((vm_overcommit & 0x3) == 0); @@ -246,7 +246,7 @@ os_overcommits_proc(void) { fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY); #endif if (fd == -1) { - return (false); /* Error. */ + return false; /* Error. */ } #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read) @@ -262,7 +262,7 @@ os_overcommits_proc(void) { #endif if (nread < 1) { - return (false); /* Error. */ + return false; /* Error. */ } /* * /proc/sys/vm/overcommit_memory meanings: @@ -149,7 +149,7 @@ prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) { b_tctx_uid); } } - return (ret); + return ret; } rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t, @@ -164,7 +164,7 @@ prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) { if (ret == 0) { ret = (a_len > b_len) - (a_len < b_len); } - return (ret); + return ret; } rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link, @@ -183,7 +183,7 @@ prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) { ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim)); } - return (ret); + return ret; } rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, @@ -319,7 +319,7 @@ static _Unwind_Reason_Code prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) { cassert(config_prof); - return (_URC_NO_REASON); + return _URC_NO_REASON; } static _Unwind_Reason_Code @@ -331,15 +331,15 @@ prof_unwind_callback(struct _Unwind_Context *context, void *arg) { ip = (void *)_Unwind_GetIP(context); if (ip == NULL) { - return (_URC_END_OF_STACK); + return _URC_END_OF_STACK; } data->bt->vec[data->bt->len] = ip; data->bt->len++; if (data->bt->len == data->max) { - return (_URC_END_OF_STACK); + return _URC_END_OF_STACK; } - return (_URC_NO_REASON); + return _URC_NO_REASON; } void @@ -525,12 +525,12 @@ static malloc_mutex_t * prof_gctx_mutex_choose(void) { unsigned ngctxs = atomic_add_u(&cum_gctxs, 1); - return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]); + return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]; } static malloc_mutex_t * prof_tdata_mutex_choose(uint64_t thr_uid) { - return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]); + return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS]; } static prof_gctx_t * @@ -543,7 +543,7 @@ prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), true); if (gctx == NULL) { - return (NULL); + return NULL; } gctx->lock = prof_gctx_mutex_choose(); /* @@ -556,7 +556,7 @@ prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *)); gctx->bt.vec = gctx->vec; gctx->bt.len = bt->len; - return (gctx); + return gctx; } static void @@ -600,29 +600,29 @@ prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) { malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); if (opt_prof_accum) { - return (false); + return false; } if (tctx->cnts.curobjs != 0) { - return (false); + return false; } if (tctx->prepared) { - return (false); + return false; } - return (true); + return true; } static bool prof_gctx_should_destroy(prof_gctx_t *gctx) { if (opt_prof_accum) { - return (false); + return false; } if (!tctx_tree_empty(&gctx->tctxs)) { - return (false); + return false; } if (gctx->nlimbo != 0) { - return (false); + return false; } - return (true); + return true; } static void @@ -721,7 +721,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, gctx.p = prof_gctx_create(tsd_tsdn(tsd), bt); if (gctx.v == NULL) { prof_leave(tsd, tdata); - return (true); + return true; } btkey.p = &gctx.p->bt; if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { @@ -729,7 +729,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, prof_leave(tsd, tdata); idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), gctx.v), gctx.v, NULL, true, true); - return (true); + return true; } new_gctx = true; } else { @@ -747,7 +747,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, *p_btkey = btkey.v; *p_gctx = gctx.p; *p_new_gctx = new_gctx; - return (false); + return false; } prof_tctx_t * @@ -763,7 +763,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) { tdata = prof_tdata_get(tsd, false); if (tdata == NULL) { - return (NULL); + return NULL; } malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); @@ -783,7 +783,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) { */ if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, &new_gctx)) { - return (NULL); + return NULL; } /* Link a prof_tctx_t into gctx for this thread. */ @@ -794,7 +794,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) { if (new_gctx) { prof_gctx_try_destroy(tsd, tdata, gctx, tdata); } - return (NULL); + return NULL; } ret.p->tdata = tdata; ret.p->thr_uid = tdata->thr_uid; @@ -813,7 +813,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) { } idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), ret.v), ret.v, NULL, true, true); - return (NULL); + return NULL; } malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); ret.p->state = prof_tctx_state_nominal; @@ -822,7 +822,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) { malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); } - return (ret.p); + return ret.p; } /* @@ -887,7 +887,7 @@ prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, (*tdata_count)++; - return (NULL); + return NULL; } size_t @@ -901,7 +901,7 @@ prof_tdata_count(void) { (void *)&tdata_count); malloc_mutex_unlock(tsdn, &tdatas_mtx); - return (tdata_count); + return tdata_count; } #endif @@ -915,14 +915,14 @@ prof_bt_count(void) { tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, false); if (tdata == NULL) { - return (0); + return 0; } malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); bt_count = ckh_count(&bt2gctx); malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); - return (bt_count); + return bt_count; } #endif @@ -943,7 +943,7 @@ prof_dump_open(bool propagate_err, const char *filename) { } } - return (fd); + return fd; } #ifdef JEMALLOC_JET #undef prof_dump_open @@ -971,7 +971,7 @@ prof_dump_flush(bool propagate_err) { } prof_dump_buf_end = 0; - return (ret); + return ret; } static bool @@ -983,7 +983,7 @@ prof_dump_close(bool propagate_err) { close(prof_dump_fd); prof_dump_fd = -1; - return (ret); + return ret; } static bool @@ -998,7 +998,7 @@ prof_dump_write(bool propagate_err, const char *s) { /* Flush the buffer if it is full. */ if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { if (prof_dump_flush(propagate_err) && propagate_err) { - return (true); + return true; } } @@ -1014,7 +1014,7 @@ prof_dump_write(bool propagate_err, const char *s) { i += n; } - return (false); + return false; } JEMALLOC_FORMAT_PRINTF(2, 3) @@ -1029,7 +1029,7 @@ prof_dump_printf(bool propagate_err, const char *format, ...) { va_end(ap); ret = prof_dump_write(propagate_err, buf); - return (ret); + return ret; } static void @@ -1093,7 +1093,7 @@ prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { not_reached(); } - return (NULL); + return NULL; } struct prof_tctx_dump_iter_arg_s { @@ -1120,13 +1120,13 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) { "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs, tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs, tctx->dump_cnts.accumbytes)) { - return (tctx); + return tctx; } break; default: not_reached(); } - return (NULL); + return NULL; } static prof_tctx_t * @@ -1152,7 +1152,7 @@ prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { ret = NULL; label_return: - return (ret); + return ret; } static void @@ -1192,7 +1192,7 @@ prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { } malloc_mutex_unlock(arg->tsdn, gctx->lock); - return (NULL); + return NULL; } static void @@ -1279,7 +1279,7 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, } malloc_mutex_unlock(arg->tsdn, tdata->lock); - return (NULL); + return NULL; } static prof_tdata_t * @@ -1288,7 +1288,7 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, bool propagate_err = *(bool *)arg; if (!tdata->dumping) { - return (NULL); + return NULL; } if (prof_dump_printf(propagate_err, @@ -1298,9 +1298,9 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, tdata->cnt_summed.accumbytes, (tdata->thread_name != NULL) ? " " : "", (tdata->thread_name != NULL) ? tdata->thread_name : "")) { - return (tdata); + return tdata; } - return (NULL); + return NULL; } #ifdef JEMALLOC_JET @@ -1316,14 +1316,14 @@ prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all) { " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs, cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) { - return (true); + return true; } malloc_mutex_lock(tsdn, &tdatas_mtx); ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, (void *)&propagate_err) != NULL); malloc_mutex_unlock(tsdn, &tdatas_mtx); - return (ret); + return ret; } #ifdef JEMALLOC_JET #undef prof_dump_header @@ -1383,7 +1383,7 @@ prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, ret = false; label_return: - return (ret); + return ret; } #ifndef _WIN32 @@ -1399,16 +1399,16 @@ prof_open_maps(const char *format, ...) { va_end(ap); mfd = open(filename, O_RDONLY); - return (mfd); + return mfd; } #endif static int prof_getpid(void) { #ifdef _WIN32 - return (GetCurrentProcessId()); + return GetCurrentProcessId(); #else - return (getpid()); + return getpid(); #endif } @@ -1464,7 +1464,7 @@ label_return: if (mfd != -1) { close(mfd); } - return (ret); + return ret; } /* @@ -1524,7 +1524,7 @@ prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { ret = NULL; label_return: malloc_mutex_unlock(arg->tsdn, gctx->lock); - return (ret); + return ret; } static void @@ -1773,13 +1773,13 @@ prof_mdump(tsd_t *tsd, const char *filename) { cassert(config_prof); if (!opt_prof || !prof_booted) { - return (true); + return true; } if (filename == NULL) { /* No filename specified, so automatically generate one. */ if (opt_prof_prefix[0] == '\0') { - return (true); + return true; } malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename_buf, 'm', prof_dump_mseq); @@ -1787,7 +1787,7 @@ prof_mdump(tsd_t *tsd, const char *filename) { malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); filename = filename_buf; } - return (prof_dump(tsd, true, filename, false)); + return prof_dump(tsd, true, filename, false); } void @@ -1837,7 +1837,7 @@ prof_bt_keycomp(const void *k1, const void *k2) { cassert(config_prof); if (bt1->len != bt2->len) { - return (false); + return false; } return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); } @@ -1851,7 +1851,7 @@ prof_thr_uid_alloc(tsdn_t *tsdn) { next_thr_uid++; malloc_mutex_unlock(tsdn, &next_thr_uid_mtx); - return (thr_uid); + return thr_uid; } static prof_tdata_t * @@ -1866,7 +1866,7 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, size2index(sizeof(prof_tdata_t)), false, NULL, true, arena_get(TSDN_NULL, 0, true), true); if (tdata == NULL) { - return (NULL); + return NULL; } tdata->lock = prof_tdata_mutex_choose(thr_uid); @@ -1881,7 +1881,7 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, prof_bt_keycomp)) { idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), tdata), tdata, NULL, true, true); - return (NULL); + return NULL; } tdata->prng_state = (uint64_t)(uintptr_t)tdata; @@ -1898,24 +1898,24 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, tdata_tree_insert(&tdatas, tdata); malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); - return (tdata); + return tdata; } prof_tdata_t * prof_tdata_init(tsd_t *tsd) { - return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0, - NULL, prof_thread_active_init_get(tsd_tsdn(tsd)))); + return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0, + NULL, prof_thread_active_init_get(tsd_tsdn(tsd))); } static bool prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) { if (tdata->attached && !even_if_attached) { - return (false); + return false; } if (ckh_count(&tdata->bt2tctx) != 0) { - return (false); + return false; } - return (true); + return true; } static bool @@ -1923,7 +1923,7 @@ prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, bool even_if_attached) { malloc_mutex_assert_owner(tsdn, tdata->lock); - return (prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); + return prof_tdata_should_destroy_unlocked(tdata, even_if_attached); } static void @@ -1985,8 +1985,8 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) { bool active = tdata->active; prof_tdata_detach(tsd, tdata); - return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name, - active)); + return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name, + active); } static bool @@ -2003,7 +2003,7 @@ prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) { } malloc_mutex_unlock(tsdn, tdata->lock); - return (destroy_tdata); + return destroy_tdata; } static prof_tdata_t * @@ -2062,7 +2062,7 @@ prof_active_get(tsdn_t *tsdn) { malloc_mutex_lock(tsdn, &prof_active_mtx); prof_active_current = prof_active; malloc_mutex_unlock(tsdn, &prof_active_mtx); - return (prof_active_current); + return prof_active_current; } bool @@ -2073,7 +2073,7 @@ prof_active_set(tsdn_t *tsdn, bool active) { prof_active_old = prof_active; prof_active = active; malloc_mutex_unlock(tsdn, &prof_active_mtx); - return (prof_active_old); + return prof_active_old; } const char * @@ -2082,7 +2082,7 @@ prof_thread_name_get(tsd_t *tsd) { tdata = prof_tdata_get(tsd, true); if (tdata == NULL) { - return (""); + return ""; } return (tdata->thread_name != NULL ? tdata->thread_name : ""); } @@ -2093,21 +2093,21 @@ prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) { size_t size; if (thread_name == NULL) { - return (NULL); + return NULL; } size = strlen(thread_name) + 1; if (size == 1) { - return (""); + return ""; } ret = iallocztm(tsdn, size, size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), true); if (ret == NULL) { - return (NULL); + return NULL; } memcpy(ret, thread_name, size); - return (ret); + return ret; } int @@ -2118,23 +2118,23 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name) { tdata = prof_tdata_get(tsd, true); if (tdata == NULL) { - return (EAGAIN); + return EAGAIN; } /* Validate input. */ if (thread_name == NULL) { - return (EFAULT); + return EFAULT; } for (i = 0; thread_name[i] != '\0'; i++) { char c = thread_name[i]; if (!isgraph(c) && !isblank(c)) { - return (EFAULT); + return EFAULT; } } s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name); if (s == NULL) { - return (EAGAIN); + return EAGAIN; } if (tdata->thread_name != NULL) { @@ -2145,7 +2145,7 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name) { if (strlen(s) > 0) { tdata->thread_name = s; } - return (0); + return 0; } bool @@ -2154,9 +2154,9 @@ prof_thread_active_get(tsd_t *tsd) { tdata = prof_tdata_get(tsd, true); if (tdata == NULL) { - return (false); + return false; } - return (tdata->active); + return tdata->active; } bool @@ -2165,10 +2165,10 @@ prof_thread_active_set(tsd_t *tsd, bool active) { tdata = prof_tdata_get(tsd, true); if (tdata == NULL) { - return (true); + return true; } tdata->active = active; - return (false); + return false; } bool @@ -2178,7 +2178,7 @@ prof_thread_active_init_get(tsdn_t *tsdn) { malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); active_init = prof_thread_active_init; malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); - return (active_init); + return active_init; } bool @@ -2189,7 +2189,7 @@ prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) { active_init_old = prof_thread_active_init; prof_thread_active_init = active_init; malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); - return (active_init_old); + return active_init_old; } bool @@ -2199,7 +2199,7 @@ prof_gdump_get(tsdn_t *tsdn) { malloc_mutex_lock(tsdn, &prof_gdump_mtx); prof_gdump_current = prof_gdump_val; malloc_mutex_unlock(tsdn, &prof_gdump_mtx); - return (prof_gdump_current); + return prof_gdump_current; } bool @@ -2210,7 +2210,7 @@ prof_gdump_set(tsdn_t *tsdn, bool gdump) { prof_gdump_old = prof_gdump_val; prof_gdump_val = gdump; malloc_mutex_unlock(tsdn, &prof_gdump_mtx); - return (prof_gdump_old); + return prof_gdump_old; } void @@ -2257,50 +2257,50 @@ prof_boot2(tsd_t *tsd) { prof_active = opt_prof_active; if (malloc_mutex_init(&prof_active_mtx, "prof_active", WITNESS_RANK_PROF_ACTIVE)) { - return (true); + return true; } prof_gdump_val = opt_prof_gdump; if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump", WITNESS_RANK_PROF_GDUMP)) { - return (true); + return true; } prof_thread_active_init = opt_prof_thread_active_init; if (malloc_mutex_init(&prof_thread_active_init_mtx, "prof_thread_active_init", WITNESS_RANK_PROF_THREAD_ACTIVE_INIT)) { - return (true); + return true; } if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash, prof_bt_keycomp)) { - return (true); + return true; } if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx", WITNESS_RANK_PROF_BT2GCTX)) { - return (true); + return true; } tdata_tree_new(&tdatas); if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas", WITNESS_RANK_PROF_TDATAS)) { - return (true); + return true; } next_thr_uid = 0; if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid", WITNESS_RANK_PROF_NEXT_THR_UID)) { - return (true); + return true; } if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq", WITNESS_RANK_PROF_DUMP_SEQ)) { - return (true); + return true; } if (malloc_mutex_init(&prof_dump_mtx, "prof_dump", WITNESS_RANK_PROF_DUMP)) { - return (true); + return true; } if (opt_prof_final && opt_prof_prefix[0] != '\0' && @@ -2315,12 +2315,12 @@ prof_boot2(tsd_t *tsd) { b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t), CACHELINE); if (gctx_locks == NULL) { - return (true); + return true; } for (i = 0; i < PROF_NCTX_LOCKS; i++) { if (malloc_mutex_init(&gctx_locks[i], "prof_gctx", WITNESS_RANK_PROF_GCTX)) { - return (true); + return true; } } @@ -2328,12 +2328,12 @@ prof_boot2(tsd_t *tsd) { b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t), CACHELINE); if (tdata_locks == NULL) { - return (true); + return true; } for (i = 0; i < PROF_NTDATA_LOCKS; i++) { if (malloc_mutex_init(&tdata_locks[i], "prof_tdata", WITNESS_RANK_PROF_TDATA)) { - return (true); + return true; } } } @@ -2348,7 +2348,7 @@ prof_boot2(tsd_t *tsd) { prof_booted = true; - return (false); + return false; } void diff --git a/src/rtree.c b/src/rtree.c index de3e596..d0c5fe6 100644 --- a/src/rtree.c +++ b/src/rtree.c @@ -60,7 +60,7 @@ rtree_new(rtree_t *rtree, unsigned bits) { malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE); - return (false); + return false; } #ifdef JEMALLOC_JET @@ -69,8 +69,8 @@ rtree_new(rtree_t *rtree, unsigned bits) { #endif static rtree_elm_t * rtree_node_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { - return ((rtree_elm_t *)base_alloc(tsdn, b0get(), nelms * - sizeof(rtree_elm_t), CACHELINE)); + return (rtree_elm_t *)base_alloc(tsdn, b0get(), nelms * + sizeof(rtree_elm_t), CACHELINE); } #ifdef JEMALLOC_JET #undef rtree_node_alloc @@ -137,25 +137,25 @@ rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level, rtree->levels[level].bits); if (node == NULL) { malloc_mutex_unlock(tsdn, &rtree->init_lock); - return (NULL); + return NULL; } atomic_write_p((void **)elmp, node); } malloc_mutex_unlock(tsdn, &rtree->init_lock); - return (node); + return node; } rtree_elm_t * rtree_subtree_read_hard(tsdn_t *tsdn, rtree_t *rtree, unsigned level) { - return (rtree_node_init(tsdn, rtree, level, - &rtree->levels[level].subtree)); + return rtree_node_init(tsdn, rtree, level, + &rtree->levels[level].subtree); } rtree_elm_t * rtree_child_read_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *elm, unsigned level) { - return (rtree_node_init(tsdn, rtree, level+1, &elm->child)); + return rtree_node_init(tsdn, rtree, level+1, &elm->child); } static int @@ -167,7 +167,7 @@ rtree_elm_witness_comp(const witness_t *a, void *oa, const witness_t *b, assert(ka != 0); assert(kb != 0); - return ((ka > kb) - (ka < kb)); + return (ka > kb) - (ka < kb); } static witness_t * @@ -192,7 +192,7 @@ rtree_elm_witness_alloc(tsd_t *tsd, uintptr_t key, const rtree_elm_t *elm) { } } assert(witness != NULL); - return (witness); + return witness; } static witness_t * @@ -205,7 +205,7 @@ rtree_elm_witness_find(tsd_t *tsd, const rtree_elm_t *elm) { rtree_elm_witness_t *rew = &witnesses->witnesses[i]; if (rew->elm == elm) { - return (&rew->witness); + return &rew->witness; } } not_reached(); diff --git a/src/tcache.c b/src/tcache.c index bb6a5a7..0501c3f 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -25,7 +25,7 @@ static tcaches_t *tcaches_avail; size_t tcache_salloc(tsdn_t *tsdn, const void *ptr) { - return (arena_salloc(tsdn, iealloc(tsdn, ptr), ptr)); + return arena_salloc(tsdn, iealloc(tsdn, ptr), ptr); } void @@ -82,7 +82,7 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, } ret = tcache_alloc_easy(tbin, tcache_success); - return (ret); + return ret; } void @@ -297,13 +297,13 @@ tcache_get_hard(tsd_t *tsd) { if (tsd_nominal(tsd)) { tcache_enabled_set(false); /* Memoize. */ } - return (NULL); + return NULL; } arena = arena_choose(tsd, NULL); if (unlikely(arena == NULL)) { - return (NULL); + return NULL; } - return (tcache_create(tsd_tsdn(tsd), arena)); + return tcache_create(tsd_tsdn(tsd), arena); } tcache_t * @@ -323,7 +323,7 @@ tcache_create(tsdn_t *tsdn, arena_t *arena) { tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true, arena_get(TSDN_NULL, 0, true)); if (tcache == NULL) { - return (NULL); + return NULL; } tcache_arena_associate(tsdn, tcache, arena); @@ -343,7 +343,7 @@ tcache_create(tsdn_t *tsdn, arena_t *arena) { (uintptr_t)stack_offset); } - return (tcache); + return tcache; } static void @@ -432,20 +432,20 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind) { tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *) * (MALLOCX_TCACHE_MAX+1), CACHELINE); if (tcaches == NULL) { - return (true); + return true; } } if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) { - return (true); + return true; } arena = arena_ichoose(tsd, NULL); if (unlikely(arena == NULL)) { - return (true); + return true; } tcache = tcache_create(tsd_tsdn(tsd), arena); if (tcache == NULL) { - return (true); + return true; } if (tcaches_avail != NULL) { @@ -460,7 +460,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind) { tcaches_past++; } - return (false); + return false; } static void @@ -503,7 +503,7 @@ tcache_boot(tsdn_t *tsdn) { tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins * sizeof(tcache_bin_info_t), CACHELINE); if (tcache_bin_info == NULL) { - return (true); + return true; } stack_nelms = 0; for (i = 0; i < NBINS; i++) { @@ -525,5 +525,5 @@ tcache_boot(tsdn_t *tsdn) { stack_nelms += tcache_bin_info[i].ncached_max; } - return (false); + return false; } @@ -13,7 +13,7 @@ malloc_tsd_data(, , tsd_t, TSD_INITIALIZER) void * malloc_tsd_malloc(size_t size) { - return (a0malloc(CACHELINE_CEILING(size))); + return a0malloc(CACHELINE_CEILING(size)); } void @@ -109,11 +109,11 @@ malloc_tsd_boot0(void) { ncleanups = 0; if (tsd_boot0()) { - return (NULL); + return NULL; } tsd = tsd_fetch(); *tsd_arenas_tdata_bypassp_get(tsd) = true; - return (tsd); + return tsd; } void @@ -137,7 +137,7 @@ _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) { default: break; } - return (true); + return true; } #ifdef _MSC_VER @@ -167,7 +167,7 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) { ql_foreach(iter, &head->blocks, link) { if (iter->thread == self) { malloc_mutex_unlock(TSDN_NULL, &head->lock); - return (iter->data); + return iter->data; } } /* Insert block into list. */ @@ -175,7 +175,7 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) { block->thread = self; ql_tail_insert(&head->blocks, block, link); malloc_mutex_unlock(TSDN_NULL, &head->lock); - return (NULL); + return NULL; } void @@ -87,16 +87,16 @@ buferror(int err, char *buf, size_t buflen) { #ifdef _WIN32 FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0, (LPSTR)buf, (DWORD)buflen, NULL); - return (0); + return 0; #elif defined(__GLIBC__) && defined(_GNU_SOURCE) char *b = strerror_r(err, buf, buflen); if (b != buf) { strncpy(buf, b, buflen); buf[buflen-1] = '\0'; } - return (0); + return 0; #else - return (strerror_r(err, buf, buflen)); + return strerror_r(err, buf, buflen); #endif } @@ -218,7 +218,7 @@ label_return: *endptr = (char *)p; } } - return (ret); + return ret; } static char * @@ -260,7 +260,7 @@ u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) { }} *slen_p = U2S_BUFSIZE - 1 - i; - return (&s[i]); + return &s[i]; } static char * @@ -288,7 +288,7 @@ d2s(intmax_t x, char sign, char *s, size_t *slen_p) { break; default: not_reached(); } - return (s); + return s; } static char * @@ -299,7 +299,7 @@ o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) { (*slen_p)++; *s = '0'; } - return (s); + return s; } static char * @@ -310,7 +310,7 @@ x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) { (*slen_p) += 2; memcpy(s, uppercase ? "0X" : "0x", 2); } - return (s); + return s; } size_t @@ -593,7 +593,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { #undef APPEND_S #undef APPEND_PADDED_S #undef GET_ARG_NUMERIC - return (i); + return i; } JEMALLOC_FORMAT_PRINTF(3, 4) @@ -606,7 +606,7 @@ malloc_snprintf(char *str, size_t size, const char *format, ...) { ret = malloc_vsnprintf(str, size, format, ap); va_end(ap); - return (ret); + return ret; } void @@ -135,17 +135,17 @@ zone_size(malloc_zone_t *zone, const void *ptr) { * not work in practice, we must check all pointers to assure that they * reside within a mapped extent before determining size. */ - return (ivsalloc(tsdn_fetch(), ptr)); + return ivsalloc(tsdn_fetch(), ptr); } static void * zone_malloc(malloc_zone_t *zone, size_t size) { - return (je_malloc(size)); + return je_malloc(size); } static void * zone_calloc(malloc_zone_t *zone, size_t num, size_t size) { - return (je_calloc(num, size)); + return je_calloc(num, size); } static void * @@ -154,7 +154,7 @@ zone_valloc(malloc_zone_t *zone, size_t size) { je_posix_memalign(&ret, PAGE, size); - return (ret); + return ret; } static void @@ -170,10 +170,10 @@ zone_free(malloc_zone_t *zone, void *ptr) { static void * zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) { if (ivsalloc(tsdn_fetch(), ptr) != 0) { - return (je_realloc(ptr, size)); + return je_realloc(ptr, size); } - return (realloc(ptr, size)); + return realloc(ptr, size); } static void * @@ -182,7 +182,7 @@ zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) { je_posix_memalign(&ret, alignment, size); - return (ret); + return ret; } static void @@ -240,7 +240,7 @@ zone_good_size(malloc_zone_t *zone, size_t size) { if (size == 0) { size = 1; } - return (s2u(size)); + return s2u(size); } static kern_return_t @@ -368,10 +368,10 @@ zone_default_get(void) { } if (num_zones) { - return (zones[0]); + return zones[0]; } - return (malloc_default_zone()); + return malloc_default_zone(); } /* As written, this function can only promote jemalloc_zone. */ |