diff options
author | Jason Evans <jasone@canonware.com> | 2014-10-03 17:16:09 (GMT) |
---|---|---|
committer | Jason Evans <jasone@canonware.com> | 2014-10-03 17:16:09 (GMT) |
commit | 551ebc43647521bdd0bc78558b106762b3388928 (patch) | |
tree | 1cb827cc62f7e664604fe0de673d4006747df2fd /include/jemalloc/internal | |
parent | ebbd0c91f0935421c04d05c8bdc6e38762a1e561 (diff) | |
download | jemalloc-551ebc43647521bdd0bc78558b106762b3388928.zip jemalloc-551ebc43647521bdd0bc78558b106762b3388928.tar.gz jemalloc-551ebc43647521bdd0bc78558b106762b3388928.tar.bz2 |
Convert to uniform style: cond == false --> !cond
Diffstat (limited to 'include/jemalloc/internal')
-rw-r--r-- | include/jemalloc/internal/arena.h | 11 | ||||
-rw-r--r-- | include/jemalloc/internal/bitmap.h | 8 | ||||
-rw-r--r-- | include/jemalloc/internal/jemalloc_internal.h.in | 2 | ||||
-rw-r--r-- | include/jemalloc/internal/prof.h | 2 | ||||
-rw-r--r-- | include/jemalloc/internal/rb.h | 7 | ||||
-rw-r--r-- | include/jemalloc/internal/tcache.h | 8 |
6 files changed, 18 insertions, 20 deletions
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h index 48fd205..2e9920c 100644 --- a/include/jemalloc/internal/arena.h +++ b/include/jemalloc/internal/arena.h @@ -1111,13 +1111,12 @@ arena_salloc(const void *ptr, bool demote) pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; assert(arena_mapbits_allocated_get(chunk, pageind) != 0); binind = arena_mapbits_binind_get(chunk, pageind); - if (unlikely(binind == BININD_INVALID || (config_prof && demote == false - && arena_mapbits_large_get(chunk, pageind) != 0))) { + if (unlikely(binind == BININD_INVALID || (config_prof && !demote && + arena_mapbits_large_get(chunk, pageind) != 0))) { /* - * Large allocation. In the common case (demote == true), and - * as this is an inline function, most callers will only end up - * looking at binind to determine that ptr is a small - * allocation. + * Large allocation. In the common case (demote), and as this + * is an inline function, most callers will only end up looking + * at binind to determine that ptr is a small allocation. */ assert(((uintptr_t)ptr & PAGE_MASK) == 0); ret = arena_mapbits_large_size_get(chunk, pageind); diff --git a/include/jemalloc/internal/bitmap.h b/include/jemalloc/internal/bitmap.h index 4ca40ff..fcc6005 100644 --- a/include/jemalloc/internal/bitmap.h +++ b/include/jemalloc/internal/bitmap.h @@ -139,7 +139,7 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) bitmap_t g; assert(bit < binfo->nbits); - assert(bitmap_get(bitmap, binfo, bit) == false); + assert(!bitmap_get(bitmap, binfo, bit)); goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[goff]; g = *gp; @@ -172,7 +172,7 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) bitmap_t g; unsigned i; - assert(bitmap_full(bitmap, binfo) == false); + assert(!bitmap_full(bitmap, binfo)); i = binfo->nlevels - 1; g = bitmap[binfo->levels[i].group_offset]; @@ -204,7 +204,7 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; - assert(bitmap_get(bitmap, binfo, bit) == false); + assert(!bitmap_get(bitmap, binfo, bit)); /* Propagate group state transitions up the tree. */ if (propagate) { unsigned i; @@ -218,7 +218,7 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) == 0); g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; - if (propagate == false) + if (!propagate) break; } } diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in index bff2bd2..ed25172 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -714,7 +714,7 @@ isalloc(const void *ptr, bool demote) assert(ptr != NULL); /* Demotion only makes sense if config_prof is true. */ - assert(config_prof || demote == false); + assert(config_prof || !demote); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (chunk != ptr) diff --git a/include/jemalloc/internal/prof.h b/include/jemalloc/internal/prof.h index 91c871d..ea52a63 100644 --- a/include/jemalloc/internal/prof.h +++ b/include/jemalloc/internal/prof.h @@ -388,7 +388,7 @@ prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update, /* Compute new sample threshold. */ if (update) prof_sample_threshold_update(tdata); - return (tdata->active == false); + return (!tdata->active); } } diff --git a/include/jemalloc/internal/rb.h b/include/jemalloc/internal/rb.h index ffe3bb0..64fab89 100644 --- a/include/jemalloc/internal/rb.h +++ b/include/jemalloc/internal/rb.h @@ -593,7 +593,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ if (left != &rbtree->rbt_nil) { \ /* node has no successor, but it has a left child. */\ /* Splice node out, without losing the left child. */\ - assert(rbtn_red_get(a_type, a_field, node) == false); \ + assert(!rbtn_red_get(a_type, a_field, node)); \ assert(rbtn_red_get(a_type, a_field, left)); \ rbtn_black_set(a_type, a_field, left); \ if (pathp == path) { \ @@ -629,8 +629,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ if (pathp->cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp->node, \ pathp[1].node); \ - assert(rbtn_red_get(a_type, a_field, pathp[1].node) \ - == false); \ + assert(!rbtn_red_get(a_type, a_field, pathp[1].node)); \ if (rbtn_red_get(a_type, a_field, pathp->node)) { \ a_type *right = rbtn_right_get(a_type, a_field, \ pathp->node); \ @@ -862,7 +861,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ } \ /* Set root. */ \ rbtree->rbt_root = path->node; \ - assert(rbtn_red_get(a_type, a_field, rbtree->rbt_root) == false); \ + assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \ } \ a_attr a_type * \ a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ diff --git a/include/jemalloc/internal/tcache.h b/include/jemalloc/internal/tcache.h index 6804668..bc0b41c 100644 --- a/include/jemalloc/internal/tcache.h +++ b/include/jemalloc/internal/tcache.h @@ -191,9 +191,9 @@ tcache_get(tsd_t *tsd, bool create) { tcache_t *tcache; - if (config_tcache == false) + if (!config_tcache) return (NULL); - if (config_lazy_lock && isthreaded == false) + if (config_lazy_lock && !isthreaded) return (NULL); /* * If create is true, the caller has already assured that tsd is @@ -261,7 +261,7 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero) } assert(tcache_salloc(ret) == size); - if (likely(zero == false)) { + if (likely(!zero)) { if (config_fill) { if (unlikely(opt_junk)) { arena_alloc_junk_small(ret, @@ -315,7 +315,7 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero) arena_mapbits_large_binind_set(chunk, pageind, BININD_INVALID); } - if (likely(zero == false)) { + if (likely(!zero)) { if (config_fill) { if (unlikely(opt_junk)) memset(ret, 0xa5, size); |