summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason Evans <je@fb.com>2014-01-12 23:05:44 (GMT)
committerJason Evans <je@fb.com>2014-01-12 23:41:05 (GMT)
commitb2c31660be917ea6d59cd54e6f650b06b5e812ed (patch)
tree2642e518d63ce4ebd4e50f074ab0a425fa1497ba
parent6b694c4d47278cddfaaedeb7ee49fa5757e35ed5 (diff)
downloadjemalloc-b2c31660be917ea6d59cd54e6f650b06b5e812ed.zip
jemalloc-b2c31660be917ea6d59cd54e6f650b06b5e812ed.tar.gz
jemalloc-b2c31660be917ea6d59cd54e6f650b06b5e812ed.tar.bz2
Extract profiling code from [re]allocation functions.
Extract profiling code from malloc(), imemalign(), calloc(), realloc(), mallocx(), rallocx(), and xallocx(). This slightly reduces the amount of code compiled into the fast paths, but the primary benefit is the combinatorial complexity reduction. Simplify iralloc[t]() by creating a separate ixalloc() that handles the no-move cases. Further simplify [mrxn]allocx() (and by implication [mrn]allocm()) to make request size overflows due to size class and/or alignment constraints trigger undefined behavior (detected by debug-only assertions). Report ENOMEM rather than EINVAL if an OOM occurs during heap profiling backtrace creation in imemalign(). This bug impacted posix_memalign() and aligned_alloc().
-rw-r--r--doc/jemalloc.xml.in26
-rw-r--r--include/jemalloc/internal/arena.h2
-rw-r--r--include/jemalloc/internal/huge.h2
-rw-r--r--include/jemalloc/internal/jemalloc_internal.h.in126
-rw-r--r--include/jemalloc/internal/private_symbols.txt2
-rw-r--r--src/arena.c13
-rw-r--r--src/huge.c11
-rw-r--r--src/jemalloc.c803
-rw-r--r--test/integration/allocm.c33
-rw-r--r--test/integration/mallocx.c30
10 files changed, 552 insertions, 496 deletions
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
index 5fc7653..c7e2e87 100644
--- a/doc/jemalloc.xml.in
+++ b/doc/jemalloc.xml.in
@@ -321,14 +321,16 @@
<para>The <function>mallocx<parameter/></function> function allocates at
least <parameter>size</parameter> bytes of memory, and returns a pointer
to the base address of the allocation. Behavior is undefined if
- <parameter>size</parameter> is <constant>0</constant>.</para>
+ <parameter>size</parameter> is <constant>0</constant>, or if request size
+ overflows due to size class and/or alignment constraints.</para>
<para>The <function>rallocx<parameter/></function> function resizes the
allocation at <parameter>ptr</parameter> to be at least
<parameter>size</parameter> bytes, and returns a pointer to the base
address of the resulting allocation, which may or may not have moved from
its original location. Behavior is undefined if
- <parameter>size</parameter> is <constant>0</constant>.</para>
+ <parameter>size</parameter> is <constant>0</constant>, or if request size
+ overflows due to size class and/or alignment constraints.</para>
<para>The <function>xallocx<parameter/></function> function resizes the
allocation at <parameter>ptr</parameter> in place to be at least
@@ -355,8 +357,9 @@
<function>mallocx<parameter/></function> function, and returns the real
size of the allocation that would result from the equivalent
<function>mallocx<parameter/></function> function call. Behavior is
- undefined if <parameter>size</parameter> is
- <constant>0</constant>.</para>
+ undefined if <parameter>size</parameter> is <constant>0</constant>, or if
+ request size overflows due to size class and/or alignment
+ constraints.</para>
<para>The <function>mallctl<parameter/></function> function provides a
general interface for introspecting the memory allocator, as well as
@@ -518,8 +521,9 @@ for (i = 0; i < nbins; i++) {
<parameter>*ptr</parameter> to the base address of the allocation, and
sets <parameter>*rsize</parameter> to the real size of the allocation if
<parameter>rsize</parameter> is not <constant>NULL</constant>. Behavior
- is undefined if <parameter>size</parameter> is
- <constant>0</constant>.</para>
+ is undefined if <parameter>size</parameter> is <constant>0</constant>, or
+ if request size overflows due to size class and/or alignment
+ constraints.</para>
<para>The <function>rallocm<parameter/></function> function resizes the
allocation at <parameter>*ptr</parameter> to be at least
@@ -532,8 +536,9 @@ for (i = 0; i < nbins; i++) {
language="C">(<parameter>size</parameter> +
<parameter>extra</parameter>)</code> bytes, though inability to allocate
the extra byte(s) will not by itself result in failure. Behavior is
- undefined if <parameter>size</parameter> is <constant>0</constant>, or if
- <code language="C">(<parameter>size</parameter> +
+ undefined if <parameter>size</parameter> is <constant>0</constant>, if
+ request size overflows due to size class and/or alignment constraints, or
+ if <code language="C">(<parameter>size</parameter> +
<parameter>extra</parameter> &gt;
<constant>SIZE_T_MAX</constant>)</code>.</para>
@@ -550,8 +555,9 @@ for (i = 0; i < nbins; i++) {
<parameter>rsize</parameter> is not <constant>NULL</constant> it sets
<parameter>*rsize</parameter> to the real size of the allocation that
would result from the equivalent <function>allocm<parameter/></function>
- function call. Behavior is undefined if
- <parameter>size</parameter> is <constant>0</constant>.</para>
+ function call. Behavior is undefined if <parameter>size</parameter> is
+ <constant>0</constant>, or if request size overflows due to size class
+ and/or alignment constraints.</para>
</refsect2>
</refsect1>
<refsect1 id="tuning">
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index f092155..20dfd8c 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -436,7 +436,7 @@ void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
#endif
-void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
+bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
diff --git a/include/jemalloc/internal/huge.h b/include/jemalloc/internal/huge.h
index dac23c6..ddf1313 100644
--- a/include/jemalloc/internal/huge.h
+++ b/include/jemalloc/internal/huge.h
@@ -19,7 +19,7 @@ extern malloc_mutex_t huge_mtx;
void *huge_malloc(size_t size, bool zero);
void *huge_palloc(size_t size, size_t alignment, bool zero);
-void *huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
+bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra);
void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero, bool try_tcache_dalloc);
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index caadc1e..7c4397f 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -747,11 +747,15 @@ void idalloct(void *ptr, bool try_tcache);
void idalloc(void *ptr);
void iqalloct(void *ptr, bool try_tcache);
void iqalloc(void *ptr);
-void *iralloct(void *ptr, size_t size, size_t extra, size_t alignment,
- bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc,
+void *iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
+ size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
arena_t *arena);
+void *iralloct(void *ptr, size_t size, size_t extra, size_t alignment,
+ bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena);
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
- bool zero, bool no_move);
+ bool zero);
+bool ixalloc(void *ptr, size_t size, size_t extra, size_t alignment,
+ bool zero);
malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
#endif
@@ -920,10 +924,42 @@ iqalloc(void *ptr)
}
JEMALLOC_ALWAYS_INLINE void *
+iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
+ size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
+ arena_t *arena)
+{
+ void *p;
+ size_t usize, copysize;
+
+ usize = sa2u(size + extra, alignment);
+ if (usize == 0)
+ return (NULL);
+ p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
+ if (p == NULL) {
+ if (extra == 0)
+ return (NULL);
+ /* Try again, without extra this time. */
+ usize = sa2u(size, alignment);
+ if (usize == 0)
+ return (NULL);
+ p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
+ if (p == NULL)
+ return (NULL);
+ }
+ /*
+ * Copy at most size bytes (not size+extra), since the caller has no
+ * expectation that the extra bytes will be reliably preserved.
+ */
+ copysize = (size < oldsize) ? size : oldsize;
+ memcpy(p, ptr, copysize);
+ iqalloct(ptr, try_tcache_dalloc);
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
- bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
+ bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
{
- void *ret;
size_t oldsize;
assert(ptr != NULL);
@@ -933,68 +969,50 @@ iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
- size_t usize, copysize;
-
/*
* Existing object alignment is inadequate; allocate new space
* and copy.
*/
- if (no_move)
- return (NULL);
- usize = sa2u(size + extra, alignment);
- if (usize == 0)
- return (NULL);
- ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
- if (ret == NULL) {
- if (extra == 0)
- return (NULL);
- /* Try again, without extra this time. */
- usize = sa2u(size, alignment);
- if (usize == 0)
- return (NULL);
- ret = ipalloct(usize, alignment, zero, try_tcache_alloc,
- arena);
- if (ret == NULL)
- return (NULL);
- }
- /*
- * Copy at most size bytes (not size+extra), since the caller
- * has no expectation that the extra bytes will be reliably
- * preserved.
- */
- copysize = (size < oldsize) ? size : oldsize;
- memcpy(ret, ptr, copysize);
- iqalloct(ptr, try_tcache_dalloc);
- return (ret);
+ return (iralloct_realign(ptr, oldsize, size, extra, alignment,
+ zero, try_tcache_alloc, try_tcache_dalloc, arena));
}
- if (no_move) {
- if (size <= arena_maxclass) {
- return (arena_ralloc_no_move(ptr, oldsize, size,
- extra, zero));
- } else {
- return (huge_ralloc_no_move(ptr, oldsize, size,
- extra));
- }
+ if (size + extra <= arena_maxclass) {
+ return (arena_ralloc(arena, ptr, oldsize, size, extra,
+ alignment, zero, try_tcache_alloc,
+ try_tcache_dalloc));
} else {
- if (size + extra <= arena_maxclass) {
- return (arena_ralloc(arena, ptr, oldsize, size, extra,
- alignment, zero, try_tcache_alloc,
- try_tcache_dalloc));
- } else {
- return (huge_ralloc(ptr, oldsize, size, extra,
- alignment, zero, try_tcache_dalloc));
- }
+ return (huge_ralloc(ptr, oldsize, size, extra,
+ alignment, zero, try_tcache_dalloc));
}
}
JEMALLOC_ALWAYS_INLINE void *
-iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
- bool no_move)
+iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
+{
+
+ return (iralloct(ptr, size, extra, alignment, zero, true, true, NULL));
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
{
+ size_t oldsize;
- return (iralloct(ptr, size, extra, alignment, zero, no_move, true, true,
- NULL));
+ assert(ptr != NULL);
+ assert(size != 0);
+
+ oldsize = isalloc(ptr, config_prof);
+ if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
+ != 0) {
+ /* Existing object alignment is inadequate. */
+ return (true);
+ }
+
+ if (size <= arena_maxclass)
+ return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
+ else
+ return (huge_ralloc_no_move(ptr, oldsize, size, extra));
}
malloc_tsd_externs(thread_allocated, thread_allocated_t)
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index 10ac549..6cc811d 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -223,9 +223,11 @@ iqalloc
iqalloct
iralloc
iralloct
+iralloct_realign
isalloc
isthreaded
ivsalloc
+ixalloc
jemalloc_postfork_child
jemalloc_postfork_parent
jemalloc_prefork
diff --git a/src/arena.c b/src/arena.c
index 536be29..ca5b4fe 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -2061,7 +2061,7 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
}
}
-void *
+bool
arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
bool zero)
{
@@ -2077,19 +2077,19 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
SMALL_SIZE2BIN(size + extra) ==
SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
size + extra >= oldsize))
- return (ptr);
+ return (false);
} else {
assert(size <= arena_maxclass);
if (size + extra > SMALL_MAXCLASS) {
if (arena_ralloc_large(ptr, oldsize, size,
extra, zero) == false)
- return (ptr);
+ return (false);
}
}
}
/* Reallocation would require a move. */
- return (NULL);
+ return (true);
}
void *
@@ -2101,9 +2101,8 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t copysize;
/* Try to avoid moving the allocation. */
- ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero);
- if (ret != NULL)
- return (ret);
+ if (arena_ralloc_no_move(ptr, oldsize, size, extra, zero) == false)
+ return (ptr);
/*
* size and oldsize are different enough that we need to move the
diff --git a/src/huge.c b/src/huge.c
index 766c80c..cecaf2d 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -78,7 +78,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
return (ret);
}
-void *
+bool
huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
{
@@ -89,11 +89,11 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
&& CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
assert(CHUNK_CEILING(oldsize) == oldsize);
- return (ptr);
+ return (false);
}
/* Reallocation would require a move. */
- return (NULL);
+ return (true);
}
void *
@@ -104,9 +104,8 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t copysize;
/* Try to avoid moving the allocation. */
- ret = huge_ralloc_no_move(ptr, oldsize, size, extra);
- if (ret != NULL)
- return (ret);
+ if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false)
+ return (ptr);
/*
* size and oldsize are different enough that we need to use a
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 5845fe9..9fc9b8d 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -100,18 +100,12 @@ typedef struct {
#endif
/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static void stats_print_atexit(void);
-static unsigned malloc_ncpus(void);
-static bool malloc_conf_next(char const **opts_p, char const **k_p,
- size_t *klen_p, char const **v_p, size_t *vlen_p);
-static void malloc_conf_error(const char *msg, const char *k, size_t klen,
- const char *v, size_t vlen);
-static void malloc_conf_init(void);
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
static bool malloc_init_hard(void);
-static int imemalign(void **memptr, size_t alignment, size_t size,
- size_t min_alignment);
/******************************************************************************/
/*
@@ -852,42 +846,88 @@ malloc_init_hard(void)
* Begin malloc(3)-compatible functions.
*/
+static void *
+imalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if (cnt == NULL)
+ return (NULL);
+ if (prof_promote && usize <= SMALL_MAXCLASS) {
+ p = imalloc(SMALL_MAXCLASS+1);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(p, usize);
+ } else
+ p = imalloc(usize);
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+imalloc_prof(size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if ((uintptr_t)cnt != (uintptr_t)1U)
+ p = imalloc_prof_sample(usize, cnt);
+ else
+ p = imalloc(usize);
+ if (p == NULL)
+ return (NULL);
+ prof_malloc(p, usize, cnt);
+
+ return (p);
+}
+
+/*
+ * MALLOC_BODY() is a macro rather than a function because its contents are in
+ * the fast path, but inlining would cause reliability issues when determining
+ * how many frames to discard from heap profiling backtraces.
+ */
+#define MALLOC_BODY(ret, size, usize) do { \
+ if (malloc_init()) \
+ ret = NULL; \
+ else { \
+ if (config_prof && opt_prof) { \
+ prof_thr_cnt_t *cnt; \
+ \
+ usize = s2u(size); \
+ /* \
+ * Call PROF_ALLOC_PREP() here rather than in \
+ * imalloc_prof() so that imalloc_prof() can be \
+ * inlined without introducing uncertainty \
+ * about the number of backtrace frames to \
+ * ignore. imalloc_prof() is in the fast path \
+ * when heap profiling is enabled, so inlining \
+ * is critical to performance. (For \
+ * consistency all callers of PROF_ALLOC_PREP() \
+ * are structured similarly, even though e.g. \
+ * realloc() isn't called enough for inlining \
+ * to be critical.) \
+ */ \
+ PROF_ALLOC_PREP(1, usize, cnt); \
+ ret = imalloc_prof(usize, cnt); \
+ } else { \
+ if (config_stats || (config_valgrind && \
+ opt_valgrind)) \
+ usize = s2u(size); \
+ ret = imalloc(size); \
+ } \
+ } \
+} while (0)
+
void *
je_malloc(size_t size)
{
void *ret;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
- prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
-
- if (malloc_init()) {
- ret = NULL;
- goto label_oom;
- }
if (size == 0)
size = 1;
- if (config_prof && opt_prof) {
- usize = s2u(size);
- PROF_ALLOC_PREP(1, usize, cnt);
- if (cnt == NULL) {
- ret = NULL;
- goto label_oom;
- }
- if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
- SMALL_MAXCLASS) {
- ret = imalloc(SMALL_MAXCLASS+1);
- if (ret != NULL)
- arena_prof_promoted(ret, usize);
- } else
- ret = imalloc(size);
- } else {
- if (config_stats || (config_valgrind && opt_valgrind))
- usize = s2u(size);
- ret = imalloc(size);
- }
+ MALLOC_BODY(ret, size, usize);
-label_oom:
if (ret == NULL) {
if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in malloc(): "
@@ -896,8 +936,6 @@ label_oom:
}
set_errno(ENOMEM);
}
- if (config_prof && opt_prof && ret != NULL)
- prof_malloc(ret, usize, cnt);
if (config_stats && ret != NULL) {
assert(usize == isalloc(ret, config_prof));
thread_allocated_tsd_get()->allocated += usize;
@@ -907,6 +945,42 @@ label_oom:
return (ret);
}
+static void *
+imemalign_prof_sample(size_t alignment, size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if (cnt == NULL)
+ return (NULL);
+ if (prof_promote && usize <= SMALL_MAXCLASS) {
+ assert(sa2u(SMALL_MAXCLASS+1, alignment) != 0);
+ p = ipalloc(sa2u(SMALL_MAXCLASS+1, alignment), alignment,
+ false);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(p, usize);
+ } else
+ p = ipalloc(usize, alignment, false);
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+imemalign_prof(size_t alignment, size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if ((uintptr_t)cnt != (uintptr_t)1U)
+ p = imemalign_prof_sample(alignment, usize, cnt);
+ else
+ p = ipalloc(usize, alignment, false);
+ if (p == NULL)
+ return (NULL);
+ prof_malloc(p, usize, cnt);
+
+ return (p);
+}
+
JEMALLOC_ATTR(nonnull(1))
#ifdef JEMALLOC_PROF
/*
@@ -916,19 +990,18 @@ JEMALLOC_ATTR(nonnull(1))
JEMALLOC_NOINLINE
#endif
static int
-imemalign(void **memptr, size_t alignment, size_t size,
- size_t min_alignment)
+imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
{
int ret;
size_t usize;
void *result;
- prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
assert(min_alignment != 0);
- if (malloc_init())
+ if (malloc_init()) {
result = NULL;
- else {
+ goto label_oom;
+ } else {
if (size == 0)
size = 1;
@@ -948,57 +1021,38 @@ imemalign(void **memptr, size_t alignment, size_t size,
usize = sa2u(size, alignment);
if (usize == 0) {
result = NULL;
- ret = ENOMEM;
- goto label_return;
+ goto label_oom;
}
if (config_prof && opt_prof) {
+ prof_thr_cnt_t *cnt;
+
PROF_ALLOC_PREP(2, usize, cnt);
- if (cnt == NULL) {
- result = NULL;
- ret = EINVAL;
- } else {
- if (prof_promote && (uintptr_t)cnt !=
- (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
- assert(sa2u(SMALL_MAXCLASS+1,
- alignment) != 0);
- result = ipalloc(sa2u(SMALL_MAXCLASS+1,
- alignment), alignment, false);
- if (result != NULL) {
- arena_prof_promoted(result,
- usize);
- }
- } else {
- result = ipalloc(usize, alignment,
- false);
- }
- }
+ result = imemalign_prof(alignment, usize, cnt);
} else
result = ipalloc(usize, alignment, false);
- }
-
- if (result == NULL) {
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error allocating aligned "
- "memory: out of memory\n");
- abort();
- }
- ret = ENOMEM;
- goto label_return;
+ if (result == NULL)
+ goto label_oom;
}
*memptr = result;
ret = 0;
-
label_return:
if (config_stats && result != NULL) {
assert(usize == isalloc(result, config_prof));
thread_allocated_tsd_get()->allocated += usize;
}
- if (config_prof && opt_prof && result != NULL)
- prof_malloc(result, usize, cnt);
UTRACE(0, size, result);
return (ret);
+label_oom:
+ assert(result == NULL);
+ if (config_xmalloc && opt_xmalloc) {
+ malloc_write("<jemalloc>: Error allocating aligned memory: "
+ "out of memory\n");
+ abort();
+ }
+ ret = ENOMEM;
+ goto label_return;
}
int
@@ -1025,13 +1079,46 @@ je_aligned_alloc(size_t alignment, size_t size)
return (ret);
}
+static void *
+icalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if (cnt == NULL)
+ return (NULL);
+ if (prof_promote && usize <= SMALL_MAXCLASS) {
+ p = icalloc(SMALL_MAXCLASS+1);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(p, usize);
+ } else
+ p = icalloc(usize);
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+icalloc_prof(size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if ((uintptr_t)cnt != (uintptr_t)1U)
+ p = icalloc_prof_sample(usize, cnt);
+ else
+ p = icalloc(usize);
+ if (p == NULL)
+ return (NULL);
+ prof_malloc(p, usize, cnt);
+
+ return (p);
+}
+
void *
je_calloc(size_t num, size_t size)
{
void *ret;
size_t num_size;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
- prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
if (malloc_init()) {
num_size = 0;
@@ -1060,19 +1147,11 @@ je_calloc(size_t num, size_t size)
}
if (config_prof && opt_prof) {
+ prof_thr_cnt_t *cnt;
+
usize = s2u(num_size);
PROF_ALLOC_PREP(1, usize, cnt);
- if (cnt == NULL) {
- ret = NULL;
- goto label_return;
- }
- if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
- <= SMALL_MAXCLASS) {
- ret = icalloc(SMALL_MAXCLASS+1);
- if (ret != NULL)
- arena_prof_promoted(ret, usize);
- } else
- ret = icalloc(num_size);
+ ret = icalloc_prof(usize, cnt);
} else {
if (config_stats || (config_valgrind && opt_valgrind))
usize = s2u(num_size);
@@ -1088,9 +1167,6 @@ label_return:
}
set_errno(ENOMEM);
}
-
- if (config_prof && opt_prof && ret != NULL)
- prof_malloc(ret, usize, cnt);
if (config_stats && ret != NULL) {
assert(usize == isalloc(ret, config_prof));
thread_allocated_tsd_get()->allocated += usize;
@@ -1100,6 +1176,64 @@ label_return:
return (ret);
}
+static void *
+irealloc_prof_sample(void *oldptr, size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if (cnt == NULL)
+ return (NULL);
+ if (prof_promote && usize <= SMALL_MAXCLASS) {
+ p = iralloc(oldptr, SMALL_MAXCLASS+1, 0, 0, false);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(p, usize);
+ } else
+ p = iralloc(oldptr, usize, 0, 0, false);
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+irealloc_prof(void *oldptr, size_t old_usize, size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+ prof_ctx_t *old_ctx;
+
+ old_ctx = prof_ctx_get(oldptr);
+ if ((uintptr_t)cnt != (uintptr_t)1U)
+ p = irealloc_prof_sample(oldptr, usize, cnt);
+ else
+ p = iralloc(oldptr, usize, 0, 0, false);
+ if (p == NULL)
+ return (NULL);
+ prof_realloc(p, usize, cnt, old_usize, old_ctx);
+
+ return (p);
+}
+
+JEMALLOC_INLINE_C void
+ifree(void *ptr)
+{
+ size_t usize;
+ UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
+
+ assert(ptr != NULL);
+ assert(malloc_initialized || IS_INITIALIZER);
+
+ if (config_prof && opt_prof) {
+ usize = isalloc(ptr, config_prof);
+ prof_free(ptr, usize);
+ } else if (config_stats || config_valgrind)
+ usize = isalloc(ptr, config_prof);
+ if (config_stats)
+ thread_allocated_tsd_get()->deallocated += usize;
+ if (config_valgrind && opt_valgrind)
+ rzsize = p2rz(ptr);
+ iqalloc(ptr);
+ JEMALLOC_VALGRIND_FREE(ptr, rzsize);
+}
+
void *
je_realloc(void *ptr, size_t size)
{
@@ -1107,136 +1241,51 @@ je_realloc(void *ptr, size_t size)
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
size_t old_usize = 0;
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
- prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
- prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
if (size == 0) {
if (ptr != NULL) {
- /* realloc(ptr, 0) is equivalent to free(p). */
- assert(malloc_initialized || IS_INITIALIZER);
- if (config_prof) {
- old_usize = isalloc(ptr, true);
- if (config_valgrind && opt_valgrind)
- old_rzsize = p2rz(ptr);
- } else if (config_stats) {
- old_usize = isalloc(ptr, false);
- if (config_valgrind && opt_valgrind)
- old_rzsize = u2rz(old_usize);
- } else if (config_valgrind && opt_valgrind) {
- old_usize = isalloc(ptr, false);
- old_rzsize = u2rz(old_usize);
- }
- if (config_prof && opt_prof) {
- old_ctx = prof_ctx_get(ptr);
- cnt = NULL;
- }
- iqalloc(ptr);
- ret = NULL;
- goto label_return;
- } else
- size = 1;
+ /* realloc(ptr, 0) is equivalent to free(ptr). */
+ UTRACE(ptr, 0, 0);
+ ifree(ptr);
+ return (NULL);
+ }
+ size = 1;
}
if (ptr != NULL) {
assert(malloc_initialized || IS_INITIALIZER);
malloc_thread_init();
- if (config_prof) {
- old_usize = isalloc(ptr, true);
- if (config_valgrind && opt_valgrind)
- old_rzsize = p2rz(ptr);
- } else if (config_stats) {
- old_usize = isalloc(ptr, false);
- if (config_valgrind && opt_valgrind)
- old_rzsize = u2rz(old_usize);
- } else if (config_valgrind && opt_valgrind) {
- old_usize = isalloc(ptr, false);
- old_rzsize = u2rz(old_usize);
- }
+ if ((config_prof && opt_prof) || config_stats ||
+ (config_valgrind && opt_valgrind))
+ old_usize = isalloc(ptr, config_prof);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
+
if (config_prof && opt_prof) {
+ prof_thr_cnt_t *cnt;
+
usize = s2u(size);
- old_ctx = prof_ctx_get(ptr);
PROF_ALLOC_PREP(1, usize, cnt);
- if (cnt == NULL) {
- old_ctx = NULL;
- ret = NULL;
- goto label_oom;
- }
- if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
- usize <= SMALL_MAXCLASS) {
- ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
- false, false);
- if (ret != NULL)
- arena_prof_promoted(ret, usize);
- else
- old_ctx = NULL;
- } else {
- ret = iralloc(ptr, size, 0, 0, false, false);
- if (ret == NULL)
- old_ctx = NULL;
- }
+ ret = irealloc_prof(ptr, old_usize, usize, cnt);
} else {
if (config_stats || (config_valgrind && opt_valgrind))
usize = s2u(size);
- ret = iralloc(ptr, size, 0, 0, false, false);
- }
-
-label_oom:
- if (ret == NULL) {
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error in realloc(): "
- "out of memory\n");
- abort();
- }
- set_errno(ENOMEM);
+ ret = iralloc(ptr, size, 0, 0, false);
}
} else {
/* realloc(NULL, size) is equivalent to malloc(size). */
- if (config_prof && opt_prof)
- old_ctx = NULL;
- if (malloc_init()) {
- if (config_prof && opt_prof)
- cnt = NULL;
- ret = NULL;
- } else {
- if (config_prof && opt_prof) {
- usize = s2u(size);
- PROF_ALLOC_PREP(1, usize, cnt);
- if (cnt == NULL)
- ret = NULL;
- else {
- if (prof_promote && (uintptr_t)cnt !=
- (uintptr_t)1U && usize <=
- SMALL_MAXCLASS) {
- ret = imalloc(SMALL_MAXCLASS+1);
- if (ret != NULL) {
- arena_prof_promoted(ret,
- usize);
- }
- } else
- ret = imalloc(size);
- }
- } else {
- if (config_stats || (config_valgrind &&
- opt_valgrind))
- usize = s2u(size);
- ret = imalloc(size);
- }
- }
+ MALLOC_BODY(ret, size, usize);
+ }
- if (ret == NULL) {
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error in realloc(): "
- "out of memory\n");
- abort();
- }
- set_errno(ENOMEM);
+ if (ret == NULL) {
+ if (config_xmalloc && opt_xmalloc) {
+ malloc_write("<jemalloc>: Error in realloc(): "
+ "out of memory\n");
+ abort();
}
+ set_errno(ENOMEM);
}
-
-label_return:
- if (config_prof && opt_prof)
- prof_realloc(ret, usize, cnt, old_usize, old_ctx);
if (config_stats && ret != NULL) {
thread_allocated_t *ta;
assert(usize == isalloc(ret, config_prof));
@@ -1255,24 +1304,8 @@ je_free(void *ptr)
{
UTRACE(ptr, 0, 0);
- if (ptr != NULL) {
- size_t usize;
- UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
-
- assert(malloc_initialized || IS_INITIALIZER);
-
- if (config_prof && opt_prof) {
- usize = isalloc(ptr, config_prof);
- prof_free(ptr, usize);
- } else if (config_stats || config_valgrind)
- usize = isalloc(ptr, config_prof);
- if (config_stats)
- thread_allocated_tsd_get()->deallocated += usize;
- if (config_valgrind && opt_valgrind)
- rzsize = p2rz(ptr);
- iqalloc(ptr);
- JEMALLOC_VALGRIND_FREE(ptr, rzsize);
- }
+ if (ptr != NULL)
+ ifree(ptr);
}
/*
@@ -1354,6 +1387,47 @@ imallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
return (imalloct(usize, try_tcache, arena));
}
+static void *
+imallocx_prof_sample(size_t usize, size_t alignment, bool zero, bool try_tcache,
+ arena_t *arena, prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if (cnt == NULL)
+ return (NULL);
+ if (prof_promote && usize <= SMALL_MAXCLASS) {
+ size_t usize_promoted = (alignment == 0) ?
+ s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, alignment);
+ assert(usize_promoted != 0);
+ p = imallocx(usize_promoted, alignment, zero, try_tcache,
+ arena);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(p, usize);
+ } else
+ p = imallocx(usize, alignment, zero, try_tcache, arena);
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+imallocx_prof(size_t usize, size_t alignment, bool zero, bool try_tcache,
+ arena_t *arena, prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if ((uintptr_t)cnt != (uintptr_t)1U) {
+ p = imallocx_prof_sample(usize, alignment, zero, try_tcache,
+ arena, cnt);
+ } else
+ p = imallocx(usize, alignment, zero, try_tcache, arena);
+ if (p == NULL)
+ return (NULL);
+ prof_malloc(p, usize, cnt);
+
+ return (p);
+}
+
void *
je_mallocx(size_t size, int flags)
{
@@ -1380,37 +1454,18 @@ je_mallocx(size_t size, int flags)
}
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
- if (usize == 0)
- goto label_oom;
+ assert(usize != 0);
if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt;
PROF_ALLOC_PREP(1, usize, cnt);
- if (cnt == NULL)
- goto label_oom;
- if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
- SMALL_MAXCLASS) {
- size_t usize_promoted = (alignment == 0) ?
- s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
- alignment);
- assert(usize_promoted != 0);
- p = imallocx(usize_promoted, alignment, zero,
- try_tcache, arena);
- if (p == NULL)
- goto label_oom;
- arena_prof_promoted(p, usize);
- } else {
- p = imallocx(usize, alignment, zero, try_tcache, arena);
- if (p == NULL)
- goto label_oom;
- }
- prof_malloc(p, usize, cnt);
- } else {
+ p = imallocx_prof(usize, alignment, zero, try_tcache, arena,
+ cnt);
+ } else
p = imallocx(usize, alignment, zero, try_tcache, arena);
- if (p == NULL)
- goto label_oom;
- }
+ if (p == NULL)
+ goto label_oom;
if (config_stats) {
assert(usize == isalloc(p, config_prof));
@@ -1428,6 +1483,65 @@ label_oom:
return (NULL);
}
+static void *
+irallocx_prof_sample(void *oldptr, size_t size, size_t alignment, size_t usize,
+ bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena,
+ prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if (cnt == NULL)
+ return (NULL);
+ if (prof_promote && usize <= SMALL_MAXCLASS) {
+ p = iralloct(oldptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
+ size) ? 0 : size - (SMALL_MAXCLASS+1), alignment, zero,
+ try_tcache_alloc, try_tcache_dalloc, arena);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(p, usize);
+ } else {
+ p = iralloct(oldptr, size, 0, alignment, zero,
+ try_tcache_alloc, try_tcache_dalloc, arena);
+ }
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment,
+ size_t *usize, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
+ arena_t *arena, prof_thr_cnt_t *cnt)
+{
+ void *p;
+ prof_ctx_t *old_ctx;
+
+ old_ctx = prof_ctx_get(oldptr);
+ if ((uintptr_t)cnt != (uintptr_t)1U)
+ p = irallocx_prof_sample(oldptr, size, alignment, *usize, zero,
+ try_tcache_alloc, try_tcache_dalloc, arena, cnt);
+ else {
+ p = iralloct(oldptr, size, 0, alignment, zero,
+ try_tcache_alloc, try_tcache_dalloc, arena);
+ }
+ if (p == NULL)
+ return (NULL);
+
+ if (p == oldptr && alignment != 0) {
+ /*
+ * The allocation did not move, so it is possible that the size
+ * class is smaller than would guarantee the requested
+ * alignment, and that the alignment constraint was
+ * serendipitously satisfied. Additionally, old_usize may not
+ * be the same as the current usize because of in-place large
+ * reallocation. Therefore, query the actual value of usize.
+ */
+ *usize = isalloc(p, config_prof);
+ }
+ prof_realloc(p, *usize, cnt, old_usize, old_ctx);
+
+ return (p);
+}
+
void *
je_rallocx(void *ptr, size_t size, int flags)
{
@@ -1459,59 +1573,25 @@ je_rallocx(void *ptr, size_t size, int flags)
arena = NULL;
}
+ if ((config_prof && opt_prof) || config_stats ||
+ (config_valgrind && opt_valgrind))
+ old_usize = isalloc(ptr, config_prof);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = u2rz(old_usize);
+
if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt;
- usize = (alignment == 0) ? s2u(size) : sa2u(size,
- alignment);
- prof_ctx_t *old_ctx = prof_ctx_get(ptr);
- old_usize = isalloc(ptr, true);
- if (config_valgrind && opt_valgrind)
- old_rzsize = p2rz(ptr);
+ usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
+ assert(usize != 0);
PROF_ALLOC_PREP(1, usize, cnt);
- if (cnt == NULL)
+ p = irallocx_prof(ptr, old_usize, size, alignment, &usize, zero,
+ try_tcache_alloc, try_tcache_dalloc, arena, cnt);
+ if (p == NULL)
goto label_oom;
- if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
- SMALL_MAXCLASS) {
- p = iralloct(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
- size) ? 0 : size - (SMALL_MAXCLASS+1), alignment,
- zero, false, try_tcache_alloc, try_tcache_dalloc,
- arena);
- if (p == NULL)
- goto label_oom;
- if (usize < PAGE)
- arena_prof_promoted(p, usize);
- } else {
- p = iralloct(ptr, size, 0, alignment, zero, false,
- try_tcache_alloc, try_tcache_dalloc, arena);
- if (p == NULL)
- goto label_oom;
- }
- if (p == ptr && alignment != 0) {
- /*
- * The allocation did not move, so it is possible that
- * the size class is smaller than would guarantee the
- * requested alignment, and that the alignment
- * constraint was serendipitously satisfied.
- * Additionally, old_usize may not be the same as the
- * current usize because of in-place large
- * reallocation. Therefore, query the actual value of
- * usize.
- */
- usize = isalloc(p, true);
- }
- prof_realloc(p, usize, cnt, old_usize, old_ctx);
} else {
- if (config_stats) {
- old_usize = isalloc(ptr, false);
- if (config_valgrind && opt_valgrind)
- old_rzsize = u2rz(old_usize);
- } else if (config_valgrind && opt_valgrind) {
- old_usize = isalloc(ptr, false);
- old_rzsize = u2rz(old_usize);
- }
- p = iralloct(ptr, size, 0, alignment, zero, false,
- try_tcache_alloc, try_tcache_dalloc, arena);
+ p = iralloct(ptr, size, 0, alignment, zero, try_tcache_alloc,
+ try_tcache_dalloc, arena);
if (p == NULL)
goto label_oom;
if (config_stats || (config_valgrind && opt_valgrind))
@@ -1536,6 +1616,69 @@ label_oom:
return (NULL);
}
+JEMALLOC_ALWAYS_INLINE_C size_t
+ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
+ size_t alignment, bool zero, arena_t *arena)
+{
+ size_t usize;
+
+ if (ixalloc(ptr, size, extra, alignment, zero))
+ return (old_usize);
+ usize = isalloc(ptr, config_prof);
+
+ return (usize);
+}
+
+static size_t
+ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
+ size_t alignment, size_t max_usize, bool zero, arena_t *arena,
+ prof_thr_cnt_t *cnt)
+{
+ size_t usize;
+
+ if (cnt == NULL)
+ return (old_usize);
+ /* Use minimum usize to determine whether promotion may happen. */
+ if (prof_promote && ((alignment == 0) ? s2u(size) : sa2u(size,
+ alignment)) <= SMALL_MAXCLASS) {
+ if (ixalloc(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
+ size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
+ alignment, zero))
+ return (old_usize);
+ usize = isalloc(ptr, config_prof);
+ if (max_usize < PAGE)
+ arena_prof_promoted(ptr, usize);
+ } else {
+ usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
+ zero, arena);
+ }
+
+ return (usize);
+}
+
+JEMALLOC_ALWAYS_INLINE_C size_t
+ixallocx_prof(void *ptr, size_t old_usize, size_t size, size_t extra,
+ size_t alignment, size_t max_usize, bool zero, arena_t *arena,
+ prof_thr_cnt_t *cnt)
+{
+ size_t usize;
+ prof_ctx_t *old_ctx;
+
+ old_ctx = prof_ctx_get(ptr);
+ if ((uintptr_t)cnt != (uintptr_t)1U) {
+ usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
+ alignment, zero, max_usize, arena, cnt);
+ } else {
+ usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
+ zero, arena);
+ }
+ if (usize == old_usize)
+ return (usize);
+ prof_realloc(ptr, usize, cnt, old_usize, old_ctx);
+
+ return (usize);
+}
+
size_t
je_xallocx(void *ptr, size_t size, size_t extra, int flags)
{
@@ -1545,7 +1688,6 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
& (SIZE_T_MAX-1));
bool zero = flags & MALLOCX_ZERO;
unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
- bool try_tcache_alloc, try_tcache_dalloc;
arena_t *arena;
assert(ptr != NULL);
@@ -1556,22 +1698,19 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
if (arena_ind != UINT_MAX) {
arena_chunk_t *chunk;
- try_tcache_alloc = false;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- try_tcache_dalloc = (chunk == ptr || chunk->arena !=
- arenas[arena_ind]);
arena = arenas[arena_ind];
- } else {
- try_tcache_alloc = true;
- try_tcache_dalloc = true;
+ } else
arena = NULL;
- }
+
+ old_usize = isalloc(ptr, config_prof);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = u2rz(old_usize);
if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt;
-
/*
- * usize isn't knowable before iralloc() returns when extra is
+ * usize isn't knowable before ixalloc() returns when extra is
* non-zero. Therefore, compute its maximum possible value and
* use that in PROF_ALLOC_PREP() to decide whether to capture a
* backtrace. prof_realloc() will use the actual usize to
@@ -1579,60 +1718,15 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
*/
size_t max_usize = (alignment == 0) ? s2u(size+extra) :
sa2u(size+extra, alignment);
- prof_ctx_t *old_ctx = prof_ctx_get(ptr);
- old_usize = isalloc(ptr, true);
- if (config_valgrind && opt_valgrind)
- old_rzsize = p2rz(ptr);
PROF_ALLOC_PREP(1, max_usize, cnt);
- if (cnt == NULL) {
- usize = old_usize;
- goto label_not_moved;
- }
- /*
- * Use minimum usize to determine whether promotion may happen.
- */
- if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
- && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
- <= SMALL_MAXCLASS) {
- if (iralloct(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
- size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
- alignment, zero, true, try_tcache_alloc,
- try_tcache_dalloc, arena) == NULL) {
- usize = old_usize;
- goto label_not_moved;
- }
- usize = isalloc(ptr, true);
- if (max_usize < PAGE)
- arena_prof_promoted(ptr, usize);
- } else {
- if (iralloct(ptr, size, extra, alignment, zero, true,
- try_tcache_alloc, try_tcache_dalloc, arena) ==
- NULL) {
- usize = old_usize;
- goto label_not_moved;
- }
- usize = isalloc(ptr, true);
- }
- prof_realloc(ptr, usize, cnt, old_usize, old_ctx);
+ usize = ixallocx_prof(ptr, old_usize, size, extra, alignment,
+ max_usize, zero, arena, cnt);
} else {
- if (config_stats) {
- old_usize = isalloc(ptr, false);
- if (config_valgrind && opt_valgrind)
- old_rzsize = u2rz(old_usize);
- } else if (config_valgrind && opt_valgrind) {
- old_usize = isalloc(ptr, false);
- old_rzsize = u2rz(old_usize);
- }
- if (iralloct(ptr, size, extra, alignment, zero, true,
- try_tcache_alloc, try_tcache_dalloc, arena) == NULL) {
- if (config_stats == false && (config_valgrind == false
- || opt_valgrind == false))
- old_usize = isalloc(ptr, false);
- usize = old_usize;
- goto label_not_moved;
- }
- usize = isalloc(ptr, config_prof);
+ usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
+ zero, arena);
}
+ if (usize == old_usize)
+ goto label_not_resized;
if (config_stats) {
thread_allocated_t *ta;
@@ -1641,7 +1735,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
ta->deallocated += old_usize;
}
JEMALLOC_VALGRIND_REALLOC(ptr, usize, ptr, old_usize, old_rzsize, zero);
-label_not_moved:
+label_not_resized:
UTRACE(ptr, size, ptr);
return (usize);
}
@@ -1711,6 +1805,7 @@ je_nallocx(size_t size, int flags)
return (0);
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
+ assert(usize != 0);
return (usize);
}
diff --git a/test/integration/allocm.c b/test/integration/allocm.c
index 3886280..bd7a3ca 100644
--- a/test/integration/allocm.c
+++ b/test/integration/allocm.c
@@ -45,23 +45,6 @@ TEST_BEGIN(test_alignment_errors)
size_t nsz, rsz, sz, alignment;
#if LG_SIZEOF_PTR == 3
- alignment = UINT64_C(0x8000000000000000);
- sz = UINT64_C(0x8000000000000000);
-#else
- alignment = 0x80000000LU;
- sz = 0x80000000LU;
-#endif
- nsz = 0;
- assert_d_ne(nallocm(&nsz, sz, ALLOCM_ALIGN(alignment)), ALLOCM_SUCCESS,
- "Expected error for nallocm(&nsz, %zu, %#x)",
- sz, ALLOCM_ALIGN(alignment));
- rsz = 0;
- assert_d_ne(allocm(&p, &rsz, sz, ALLOCM_ALIGN(alignment)),
- ALLOCM_SUCCESS, "Expected error for allocm(&p, %zu, %#x)",
- sz, ALLOCM_ALIGN(alignment));
- assert_zu_eq(nsz, rsz, "nallocm()/allocm() rsize mismatch");
-
-#if LG_SIZEOF_PTR == 3
alignment = UINT64_C(0x4000000000000000);
sz = UINT64_C(0x8400000000000001);
#else
@@ -75,22 +58,6 @@ TEST_BEGIN(test_alignment_errors)
assert_d_ne(allocm(&p, &rsz, sz, ALLOCM_ALIGN(alignment)),
ALLOCM_SUCCESS, "Expected error for allocm(&p, %zu, %#x)",
sz, ALLOCM_ALIGN(alignment));
-
- alignment = 0x10LU;
-#if LG_SIZEOF_PTR == 3
- sz = UINT64_C(0xfffffffffffffff0);
-#else
- sz = 0xfffffff0LU;
-#endif
- nsz = 0;
- assert_d_ne(nallocm(&nsz, sz, ALLOCM_ALIGN(alignment)), ALLOCM_SUCCESS,
- "Expected error for nallocm(&nsz, %zu, %#x)",
- sz, ALLOCM_ALIGN(alignment));
- rsz = 0;
- assert_d_ne(allocm(&p, &rsz, sz, ALLOCM_ALIGN(alignment)),
- ALLOCM_SUCCESS, "Expected error for allocm(&p, %zu, %#x)",
- sz, ALLOCM_ALIGN(alignment));
- assert_zu_eq(nsz, rsz, "nallocm()/allocm() rsize mismatch");
}
TEST_END
diff --git a/test/integration/mallocx.c b/test/integration/mallocx.c
index f12855e..c26f6c5 100644
--- a/test/integration/mallocx.c
+++ b/test/integration/mallocx.c
@@ -40,20 +40,6 @@ TEST_BEGIN(test_alignment_errors)
size_t nsz, sz, alignment;
#if LG_SIZEOF_PTR == 3
- alignment = UINT64_C(0x8000000000000000);
- sz = UINT64_C(0x8000000000000000);
-#else
- alignment = 0x80000000LU;
- sz = 0x80000000LU;
-#endif
- nsz = nallocx(sz, MALLOCX_ALIGN(alignment));
- assert_zu_eq(nsz, 0, "Expected error for nallocx(%zu, %#x)", sz,
- MALLOCX_ALIGN(alignment));
- p = mallocx(sz, MALLOCX_ALIGN(alignment));
- assert_ptr_null(p, "Expected error for mallocx(%zu, %#x)", sz,
- MALLOCX_ALIGN(alignment));
-
-#if LG_SIZEOF_PTR == 3
alignment = UINT64_C(0x4000000000000000);
sz = UINT64_C(0x8400000000000001);
#else
@@ -65,22 +51,6 @@ TEST_BEGIN(test_alignment_errors)
p = mallocx(sz, MALLOCX_ALIGN(alignment));
assert_ptr_null(p, "Expected error for mallocx(%zu, %#x)", sz,
MALLOCX_ALIGN(alignment));
-
- alignment = 0x10LU;
-#if LG_SIZEOF_PTR == 3
- sz = UINT64_C(0xfffffffffffffff0);
-#else
- sz = 0xfffffff0LU;
-#endif
- nsz = nallocx(sz, MALLOCX_ALIGN(alignment));
- assert_zu_eq(nsz, 0, "Expected error for nallocx(%zu, %#x)", sz,
- MALLOCX_ALIGN(alignment));
- nsz = nallocx(sz, MALLOCX_ALIGN(alignment));
- assert_zu_eq(nsz, 0, "Expected error for nallocx(%zu, %#x)", sz,
- MALLOCX_ALIGN(alignment));
- p = mallocx(sz, MALLOCX_ALIGN(alignment));
- assert_ptr_null(p, "Expected error for mallocx(%zu, %#x)", sz,
- MALLOCX_ALIGN(alignment));
}
TEST_END