diff options
author | Daniel Micay <danielmicay@gmail.com> | 2014-10-23 14:30:52 (GMT) |
---|---|---|
committer | Jason Evans <jasone@canonware.com> | 2014-10-31 00:06:38 (GMT) |
commit | 809b0ac3919da60c20ad59517ef560d0df639f3b (patch) | |
tree | e012217d815bd24e5869d33c9b5886fc9db549c0 | |
parent | c93ed81cd06ae46906ae7a386fd6312caca391fb (diff) | |
download | jemalloc-809b0ac3919da60c20ad59517ef560d0df639f3b.zip jemalloc-809b0ac3919da60c20ad59517ef560d0df639f3b.tar.gz jemalloc-809b0ac3919da60c20ad59517ef560d0df639f3b.tar.bz2 |
mark huge allocations as unlikely
This cleans up the fast path a bit more by moving away more code.
-rw-r--r-- | include/jemalloc/internal/jemalloc_internal.h.in | 20 | ||||
-rw-r--r-- | include/jemalloc/internal/prof.h | 4 | ||||
-rw-r--r-- | src/arena.c | 4 | ||||
-rw-r--r-- | src/jemalloc.c | 4 |
4 files changed, 16 insertions, 16 deletions
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in index 294e2cc..3ce5aba 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -655,7 +655,7 @@ sa2u(size_t size, size_t alignment) } /* Try for a large size class. */ - if (size <= arena_maxclass && alignment < chunksize) { + if (likely(size <= arena_maxclass) && likely(alignment < chunksize)) { /* * We can't achieve subpage alignment, so round up alignment * to the minimum that can actually be supported. @@ -805,7 +805,7 @@ imalloct(tsd_t *tsd, size_t size, bool try_tcache, arena_t *arena) assert(size != 0); - if (size <= arena_maxclass) + if (likely(size <= arena_maxclass)) return (arena_malloc(tsd, arena, size, false, try_tcache)); else return (huge_malloc(tsd, arena, size, false, try_tcache)); @@ -822,7 +822,7 @@ JEMALLOC_ALWAYS_INLINE void * icalloct(tsd_t *tsd, size_t size, bool try_tcache, arena_t *arena) { - if (size <= arena_maxclass) + if (likely(size <= arena_maxclass)) return (arena_malloc(tsd, arena, size, true, try_tcache)); else return (huge_malloc(tsd, arena, size, true, try_tcache)); @@ -847,12 +847,12 @@ ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero, bool try_tcache, if (usize <= SMALL_MAXCLASS && alignment < PAGE) ret = arena_malloc(tsd, arena, usize, zero, try_tcache); else { - if (usize <= arena_maxclass) { + if (likely(usize <= arena_maxclass)) { arena = arena_choose(tsd, arena); if (unlikely(arena == NULL)) return (NULL); ret = arena_palloc(arena, usize, alignment, zero); - } else if (alignment <= chunksize) + } else if (likely(alignment <= chunksize)) ret = huge_malloc(tsd, arena, usize, zero, try_tcache); else { ret = huge_palloc(tsd, arena, usize, alignment, zero, @@ -887,7 +887,7 @@ isalloc(const void *ptr, bool demote) assert(config_prof || !demote); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) + if (likely(chunk != ptr)) ret = arena_salloc(ptr, demote); else ret = huge_salloc(ptr); @@ -936,7 +936,7 @@ idalloct(tsd_t *tsd, void *ptr, bool try_tcache) assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) + if (likely(chunk != ptr)) arena_dalloc(tsd, chunk, ptr, try_tcache); else huge_dalloc(tsd, ptr, try_tcache); @@ -950,7 +950,7 @@ isdalloct(tsd_t *tsd, void *ptr, size_t size, bool try_tcache) assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) + if (likely(chunk != ptr)) arena_sdalloc(tsd, chunk, ptr, size, try_tcache); else huge_dalloc(tsd, ptr, try_tcache); @@ -1038,7 +1038,7 @@ iralloct(tsd_t *tsd, void *ptr, size_t size, size_t alignment, bool zero, zero, try_tcache_alloc, try_tcache_dalloc, arena)); } - if (size <= arena_maxclass) { + if (likely(size <= arena_maxclass)) { return (arena_ralloc(tsd, arena, ptr, oldsize, size, 0, alignment, zero, try_tcache_alloc, try_tcache_dalloc)); } else { @@ -1069,7 +1069,7 @@ ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero) return (true); } - if (size <= arena_maxclass) + if (likely(size <= arena_maxclass)) return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero)); else return (huge_ralloc_no_move(ptr, oldsize, size, extra, zero)); diff --git a/include/jemalloc/internal/prof.h b/include/jemalloc/internal/prof.h index 5103146..e0d5f10 100644 --- a/include/jemalloc/internal/prof.h +++ b/include/jemalloc/internal/prof.h @@ -361,7 +361,7 @@ prof_tctx_get(const void *ptr) assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) { + if (likely(chunk != ptr)) { /* Region. */ ret = arena_prof_tctx_get(ptr); } else @@ -379,7 +379,7 @@ prof_tctx_set(const void *ptr, prof_tctx_t *tctx) assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) { + if (likely(chunk != ptr)) { /* Region. */ arena_prof_tctx_set(ptr, tctx); } else diff --git a/src/arena.c b/src/arena.c index 795f530..347d58e 100644 --- a/src/arena.c +++ b/src/arena.c @@ -2095,7 +2095,7 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, size_t usize; /* Make sure extra can't cause size_t overflow. */ - if (extra >= arena_maxclass) + if (unlikely(extra >= arena_maxclass)) return (true); usize = s2u(size + extra); @@ -2142,7 +2142,7 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, /* * Avoid moving the allocation if the size class can be left the same. */ - if (oldsize <= arena_maxclass) { + if (likely(oldsize <= arena_maxclass)) { if (oldsize <= SMALL_MAXCLASS) { assert(arena_bin_info[size2index(oldsize)].reg_size == oldsize); diff --git a/src/jemalloc.c b/src/jemalloc.c index 4543959..f130e99 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -264,7 +264,7 @@ a0alloc(size_t size, bool zero) if (size == 0) size = 1; - if (size <= arena_maxclass) + if (likely(size <= arena_maxclass)) ret = arena_malloc(NULL, a0get(), size, zero, false); else ret = huge_malloc(NULL, a0get(), size, zero, false); @@ -295,7 +295,7 @@ a0free(void *ptr) return; chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) + if (likely(chunk != ptr)) arena_dalloc(NULL, chunk, ptr, false); else huge_dalloc(NULL, ptr, false); |