diff options
author | Jason Evans <jasone@canonware.com> | 2013-12-13 06:35:52 (GMT) |
---|---|---|
committer | Jason Evans <jasone@canonware.com> | 2013-12-13 06:35:52 (GMT) |
commit | d82a5e6a34f20698ab9368bb2b4953b81d175552 (patch) | |
tree | 23cbe8892adf46196cc6b2cf977704405c7798b7 /src/jemalloc.c | |
parent | 0ac396a06a10f8a8c1d41c8771367625e7d49d07 (diff) | |
download | jemalloc-d82a5e6a34f20698ab9368bb2b4953b81d175552.zip jemalloc-d82a5e6a34f20698ab9368bb2b4953b81d175552.tar.gz jemalloc-d82a5e6a34f20698ab9368bb2b4953b81d175552.tar.bz2 |
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
Diffstat (limited to 'src/jemalloc.c')
-rw-r--r-- | src/jemalloc.c | 456 |
1 files changed, 294 insertions, 162 deletions
diff --git a/src/jemalloc.c b/src/jemalloc.c index f13a7d8..f8c8119 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -1337,73 +1337,8 @@ JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) = * Begin non-standard functions. */ -size_t -je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) -{ - size_t ret; - - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); - - if (config_ivsalloc) - ret = ivsalloc(ptr, config_prof); - else - ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0; - - return (ret); -} - -void -je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts) -{ - - stats_print(write_cb, cbopaque, opts); -} - -int -je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) -{ - - if (malloc_init()) - return (EAGAIN); - - return (ctl_byname(name, oldp, oldlenp, newp, newlen)); -} - -int -je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) -{ - - if (malloc_init()) - return (EAGAIN); - - return (ctl_nametomib(name, mibp, miblenp)); -} - -int -je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - - if (malloc_init()) - return (EAGAIN); - - return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); -} - -/* - * End non-standard functions. - */ -/******************************************************************************/ -/* - * Begin experimental functions. - */ -#ifdef JEMALLOC_EXPERIMENTAL - JEMALLOC_ALWAYS_INLINE_C void * -iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache, +imallocx(size_t usize, size_t alignment, bool zero, bool try_tcache, arena_t *arena) { @@ -1411,26 +1346,25 @@ iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache, alignment))); if (alignment != 0) - return (ipallocx(usize, alignment, zero, try_tcache, arena)); + return (ipalloct(usize, alignment, zero, try_tcache, arena)); else if (zero) - return (icallocx(usize, try_tcache, arena)); + return (icalloct(usize, try_tcache, arena)); else - return (imallocx(usize, try_tcache, arena)); + return (imalloct(usize, try_tcache, arena)); } -int -je_allocm(void **ptr, size_t *rsize, size_t size, int flags) +void * +je_mallocx(size_t size, int flags) { void *p; size_t usize; - size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) + size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) & (SIZE_T_MAX-1)); - bool zero = flags & ALLOCM_ZERO; + bool zero = flags & MALLOCX_ZERO; unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; arena_t *arena; bool try_tcache; - assert(ptr != NULL); assert(size != 0); if (malloc_init()) @@ -1460,61 +1394,149 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags) s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, alignment); assert(usize_promoted != 0); - p = iallocm(usize_promoted, alignment, zero, + p = imallocx(usize_promoted, alignment, zero, try_tcache, arena); if (p == NULL) goto label_oom; arena_prof_promoted(p, usize); } else { - p = iallocm(usize, alignment, zero, try_tcache, arena); + p = imallocx(usize, alignment, zero, try_tcache, arena); if (p == NULL) goto label_oom; } prof_malloc(p, usize, cnt); } else { - p = iallocm(usize, alignment, zero, try_tcache, arena); + p = imallocx(usize, alignment, zero, try_tcache, arena); if (p == NULL) goto label_oom; } - if (rsize != NULL) - *rsize = usize; - *ptr = p; if (config_stats) { assert(usize == isalloc(p, config_prof)); thread_allocated_tsd_get()->allocated += usize; } UTRACE(0, size, p); JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero); - return (ALLOCM_SUCCESS); + return (p); label_oom: if (config_xmalloc && opt_xmalloc) { - malloc_write("<jemalloc>: Error in allocm(): " - "out of memory\n"); + malloc_write("<jemalloc>: Error in mallocx(): out of memory\n"); abort(); } - *ptr = NULL; UTRACE(0, size, 0); - return (ALLOCM_ERR_OOM); + return (NULL); } -int -je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) +void * +je_rallocx(void *ptr, size_t size, int flags) { - void *p, *q; + void *p; size_t usize; size_t old_size; UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); - size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) + size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) & (SIZE_T_MAX-1)); - bool zero = flags & ALLOCM_ZERO; - bool no_move = flags & ALLOCM_NO_MOVE; + bool zero = flags & MALLOCX_ZERO; + unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; + bool try_tcache_alloc, try_tcache_dalloc; + arena_t *arena; + + assert(ptr != NULL); + assert(size != 0); + assert(malloc_initialized || IS_INITIALIZER); + malloc_thread_init(); + + if (arena_ind != UINT_MAX) { + arena_chunk_t *chunk; + try_tcache_alloc = false; + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + try_tcache_dalloc = (chunk == ptr || chunk->arena != + arenas[arena_ind]); + arena = arenas[arena_ind]; + } else { + try_tcache_alloc = true; + try_tcache_dalloc = true; + arena = NULL; + } + + if (config_prof && opt_prof) { + prof_thr_cnt_t *cnt; + + usize = (alignment == 0) ? s2u(size) : sa2u(size, + alignment); + prof_ctx_t *old_ctx = prof_ctx_get(ptr); + old_size = isalloc(ptr, true); + if (config_valgrind && opt_valgrind) + old_rzsize = p2rz(ptr); + PROF_ALLOC_PREP(1, usize, cnt); + if (cnt == NULL) + goto label_oom; + if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= + SMALL_MAXCLASS) { + p = iralloct(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= + size) ? 0 : size - (SMALL_MAXCLASS+1), alignment, + zero, false, try_tcache_alloc, try_tcache_dalloc, + arena); + if (p == NULL) + goto label_oom; + if (usize < PAGE) + arena_prof_promoted(p, usize); + } else { + p = iralloct(ptr, size, 0, alignment, zero, false, + try_tcache_alloc, try_tcache_dalloc, arena); + if (p == NULL) + goto label_oom; + } + prof_realloc(p, usize, cnt, old_size, old_ctx); + } else { + if (config_stats) { + old_size = isalloc(ptr, false); + if (config_valgrind && opt_valgrind) + old_rzsize = u2rz(old_size); + } else if (config_valgrind && opt_valgrind) { + old_size = isalloc(ptr, false); + old_rzsize = u2rz(old_size); + } + p = iralloct(ptr, size, 0, alignment, zero, false, + try_tcache_alloc, try_tcache_dalloc, arena); + if (p == NULL) + goto label_oom; + if (config_stats || (config_valgrind && opt_valgrind)) + usize = isalloc(p, config_prof); + } + + if (config_stats) { + thread_allocated_t *ta; + ta = thread_allocated_tsd_get(); + ta->allocated += usize; + ta->deallocated += old_size; + } + UTRACE(ptr, size, p); + JEMALLOC_VALGRIND_REALLOC(p, usize, ptr, old_size, old_rzsize, zero); + return (p); +label_oom: + if (config_xmalloc && opt_xmalloc) { + malloc_write("<jemalloc>: Error in rallocx(): out of memory\n"); + abort(); + } + UTRACE(ptr, size, 0); + return (NULL); +} + +size_t +je_xallocx(void *ptr, size_t size, size_t extra, int flags) +{ + size_t usize; + size_t old_size; + UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); + size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) + & (SIZE_T_MAX-1)); + bool zero = flags & MALLOCX_ZERO; unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; bool try_tcache_alloc, try_tcache_dalloc; arena_t *arena; assert(ptr != NULL); - assert(*ptr != NULL); assert(size != 0); assert(SIZE_T_MAX - size >= extra); assert(malloc_initialized || IS_INITIALIZER); @@ -1523,8 +1545,8 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) if (arena_ind != UINT_MAX) { arena_chunk_t *chunk; try_tcache_alloc = false; - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr); - try_tcache_dalloc = (chunk == *ptr || chunk->arena != + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + try_tcache_dalloc = (chunk == ptr || chunk->arena != arenas[arena_ind]); arena = arenas[arena_ind]; } else { @@ -1533,7 +1555,6 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) arena = NULL; } - p = *ptr; if (config_prof && opt_prof) { prof_thr_cnt_t *cnt; @@ -1546,109 +1567,87 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) */ size_t max_usize = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra, alignment); - prof_ctx_t *old_ctx = prof_ctx_get(p); - old_size = isalloc(p, true); + prof_ctx_t *old_ctx = prof_ctx_get(ptr); + old_size = isalloc(ptr, true); if (config_valgrind && opt_valgrind) - old_rzsize = p2rz(p); + old_rzsize = p2rz(ptr); PROF_ALLOC_PREP(1, max_usize, cnt); - if (cnt == NULL) - goto label_oom; + if (cnt == NULL) { + usize = isalloc(ptr, config_prof); + goto label_not_moved; + } /* * Use minimum usize to determine whether promotion may happen. */ if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && ((alignment == 0) ? s2u(size) : sa2u(size, alignment)) <= SMALL_MAXCLASS) { - q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= + if (iralloct(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1), - alignment, zero, no_move, try_tcache_alloc, - try_tcache_dalloc, arena); - if (q == NULL) - goto label_err; + alignment, zero, true, try_tcache_alloc, + try_tcache_dalloc, arena) == NULL) + goto label_not_moved; if (max_usize < PAGE) { usize = max_usize; - arena_prof_promoted(q, usize); + arena_prof_promoted(ptr, usize); } else - usize = isalloc(q, config_prof); + usize = isalloc(ptr, config_prof); } else { - q = irallocx(p, size, extra, alignment, zero, no_move, - try_tcache_alloc, try_tcache_dalloc, arena); - if (q == NULL) - goto label_err; - usize = isalloc(q, config_prof); + if (iralloct(ptr, size, extra, alignment, zero, true, + try_tcache_alloc, try_tcache_dalloc, arena) == NULL) + goto label_not_moved; + usize = isalloc(ptr, config_prof); } - prof_realloc(q, usize, cnt, old_size, old_ctx); - if (rsize != NULL) - *rsize = usize; + prof_realloc(ptr, usize, cnt, old_size, old_ctx); } else { if (config_stats) { - old_size = isalloc(p, false); + old_size = isalloc(ptr, false); if (config_valgrind && opt_valgrind) old_rzsize = u2rz(old_size); } else if (config_valgrind && opt_valgrind) { - old_size = isalloc(p, false); + old_size = isalloc(ptr, false); old_rzsize = u2rz(old_size); } - q = irallocx(p, size, extra, alignment, zero, no_move, - try_tcache_alloc, try_tcache_dalloc, arena); - if (q == NULL) - goto label_err; - if (config_stats) - usize = isalloc(q, config_prof); - if (rsize != NULL) { - if (config_stats == false) - usize = isalloc(q, config_prof); - *rsize = usize; + if (iralloct(ptr, size, extra, alignment, zero, true, + try_tcache_alloc, try_tcache_dalloc, arena) == NULL) { + usize = isalloc(ptr, config_prof); + goto label_not_moved; } + usize = isalloc(ptr, config_prof); } - *ptr = q; if (config_stats) { thread_allocated_t *ta; ta = thread_allocated_tsd_get(); ta->allocated += usize; ta->deallocated += old_size; } - UTRACE(p, size, q); - JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero); - return (ALLOCM_SUCCESS); -label_err: - if (no_move) { - UTRACE(p, size, q); - return (ALLOCM_ERR_NOT_MOVED); - } -label_oom: - if (config_xmalloc && opt_xmalloc) { - malloc_write("<jemalloc>: Error in rallocm(): " - "out of memory\n"); - abort(); - } - UTRACE(p, size, 0); - return (ALLOCM_ERR_OOM); + JEMALLOC_VALGRIND_REALLOC(ptr, usize, ptr, old_size, old_rzsize, zero); +label_not_moved: + UTRACE(ptr, size, ptr); + return (usize); } -int -je_sallocm(const void *ptr, size_t *rsize, int flags) +size_t +je_sallocx(const void *ptr, int flags) { - size_t sz; + size_t usize; assert(malloc_initialized || IS_INITIALIZER); malloc_thread_init(); if (config_ivsalloc) - sz = ivsalloc(ptr, config_prof); + usize = ivsalloc(ptr, config_prof); else { assert(ptr != NULL); - sz = isalloc(ptr, config_prof); + usize = isalloc(ptr, config_prof); } - assert(rsize != NULL); - *rsize = sz; - return (ALLOCM_SUCCESS); + return (usize); } -int -je_dallocm(void *ptr, int flags) +void +je_dallocx(void *ptr, int flags) { size_t usize; UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); @@ -1677,28 +1676,161 @@ je_dallocm(void *ptr, int flags) thread_allocated_tsd_get()->deallocated += usize; if (config_valgrind && opt_valgrind) rzsize = p2rz(ptr); - iqallocx(ptr, try_tcache); + iqalloct(ptr, try_tcache); JEMALLOC_VALGRIND_FREE(ptr, rzsize); - - return (ALLOCM_SUCCESS); } -int -je_nallocm(size_t *rsize, size_t size, int flags) +size_t +je_nallocx(size_t size, int flags) { size_t usize; - size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) + size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) & (SIZE_T_MAX-1)); assert(size != 0); if (malloc_init()) - return (ALLOCM_ERR_OOM); + return (0); usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); - if (usize == 0) + return (usize); +} + +int +je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) +{ + + if (malloc_init()) + return (EAGAIN); + + return (ctl_byname(name, oldp, oldlenp, newp, newlen)); +} + +int +je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) +{ + + if (malloc_init()) + return (EAGAIN); + + return (ctl_nametomib(name, mibp, miblenp)); +} + +int +je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + + if (malloc_init()) + return (EAGAIN); + + return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); +} + +void +je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, + const char *opts) +{ + + stats_print(write_cb, cbopaque, opts); +} + +size_t +je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) +{ + size_t ret; + + assert(malloc_initialized || IS_INITIALIZER); + malloc_thread_init(); + + if (config_ivsalloc) + ret = ivsalloc(ptr, config_prof); + else + ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0; + + return (ret); +} + +/* + * End non-standard functions. + */ +/******************************************************************************/ +/* + * Begin experimental functions. + */ +#ifdef JEMALLOC_EXPERIMENTAL + +int +je_allocm(void **ptr, size_t *rsize, size_t size, int flags) +{ + void *p; + + assert(ptr != NULL); + + p = je_mallocx(size, flags); + if (p == NULL) return (ALLOCM_ERR_OOM); + if (rsize != NULL) + *rsize = isalloc(p, config_prof); + *ptr = p; + return (ALLOCM_SUCCESS); +} +int +je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) +{ + int ret; + bool no_move = flags & ALLOCM_NO_MOVE; + + assert(ptr != NULL); + assert(*ptr != NULL); + assert(size != 0); + assert(SIZE_T_MAX - size >= extra); + + if (no_move) { + size_t usize = je_xallocx(*ptr, size, extra, flags); + ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED; + if (rsize != NULL) + *rsize = usize; + } else { + void *p = je_rallocx(*ptr, size+extra, flags); + if (p != NULL) { + *ptr = p; + ret = ALLOCM_SUCCESS; + } else + ret = ALLOCM_ERR_OOM; + if (rsize != NULL) + *rsize = isalloc(*ptr, config_prof); + } + return (ret); +} + +int +je_sallocm(const void *ptr, size_t *rsize, int flags) +{ + + assert(rsize != NULL); + *rsize = je_sallocx(ptr, flags); + return (ALLOCM_SUCCESS); +} + +int +je_dallocm(void *ptr, int flags) +{ + + je_dallocx(ptr, flags); + return (ALLOCM_SUCCESS); +} + +int +je_nallocm(size_t *rsize, size_t size, int flags) +{ + size_t usize; + + usize = je_nallocx(size, flags); + if (usize == 0) + return (ALLOCM_ERR_OOM); if (rsize != NULL) *rsize = usize; return (ALLOCM_SUCCESS); |