summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2013-12-13 06:35:52 (GMT)
committerJason Evans <jasone@canonware.com>2013-12-13 06:35:52 (GMT)
commitd82a5e6a34f20698ab9368bb2b4953b81d175552 (patch)
tree23cbe8892adf46196cc6b2cf977704405c7798b7
parent0ac396a06a10f8a8c1d41c8771367625e7d49d07 (diff)
downloadjemalloc-d82a5e6a34f20698ab9368bb2b4953b81d175552.zip
jemalloc-d82a5e6a34f20698ab9368bb2b4953b81d175552.tar.gz
jemalloc-d82a5e6a34f20698ab9368bb2b4953b81d175552.tar.bz2
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API. The *allocx() functions are slightly simpler to use because they have fewer parameters, they directly return the results of primary interest, and mallocx()/rallocx() avoid the strict aliasing pitfall that allocm()/rallocx() share with posix_memalign(). The following code violates strict aliasing rules: foo_t *foo; allocm((void **)&foo, NULL, 42, 0); whereas the following is safe: foo_t *foo; void *p; allocm(&p, NULL, 42, 0); foo = (foo_t *)p; mallocx() does not have this problem: foo_t *foo = (foo_t *)mallocx(42, 0);
-rw-r--r--Makefile.in7
-rw-r--r--configure.ac2
-rw-r--r--doc/jemalloc.xml.in248
-rw-r--r--include/jemalloc/internal/jemalloc_internal.h.in45
-rw-r--r--include/jemalloc/internal/private_symbols.txt12
-rw-r--r--include/jemalloc/internal/public_symbols.txt10
-rw-r--r--include/jemalloc/jemalloc_macros.h.in21
-rw-r--r--include/jemalloc/jemalloc_mangle.h.in16
-rw-r--r--include/jemalloc/jemalloc_protos.h.in30
-rw-r--r--src/arena.c6
-rw-r--r--src/huge.c2
-rw-r--r--src/jemalloc.c456
-rw-r--r--src/tcache.c4
-rw-r--r--src/tsd.c2
-rw-r--r--test/integration/mallocx.c149
-rw-r--r--test/integration/rallocm.c2
-rw-r--r--test/integration/rallocx.c51
-rw-r--r--test/integration/xallocx.c59
-rw-r--r--test/unit/mq.c7
19 files changed, 859 insertions, 270 deletions
diff --git a/Makefile.in b/Makefile.in
index cd137fd..af60a21 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -112,13 +112,16 @@ TESTS_UNIT := $(srcroot)test/unit/bitmap.c $(srcroot)test/unit/math.c \
$(srcroot)test/unit/SFMT.c $(srcroot)test/unit/tsd.c
TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
$(srcroot)test/integration/allocated.c \
- $(srcroot)test/integration/ALLOCM_ARENA.c \
+ $(srcroot)test/integration/mallocx.c \
$(srcroot)test/integration/mremap.c \
$(srcroot)test/integration/posix_memalign.c \
+ $(srcroot)test/integration/rallocx.c \
$(srcroot)test/integration/thread_arena.c \
- $(srcroot)test/integration/thread_tcache_enabled.c
+ $(srcroot)test/integration/thread_tcache_enabled.c \
+ $(srcroot)test/integration/xallocx.c
ifeq ($(enable_experimental), 1)
TESTS_INTEGRATION += $(srcroot)test/integration/allocm.c \
+ $(srcroot)test/integration/ALLOCM_ARENA.c \
$(srcroot)test/integration/rallocm.c
endif
TESTS_STRESS :=
diff --git a/configure.ac b/configure.ac
index 02842b6..724bc1a 100644
--- a/configure.ac
+++ b/configure.ac
@@ -417,7 +417,7 @@ AC_PROG_RANLIB
AC_PATH_PROG([LD], [ld], [false], [$PATH])
AC_PATH_PROG([AUTOCONF], [autoconf], [false], [$PATH])
-public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free malloc_usable_size malloc_stats_print mallctl mallctlnametomib mallctlbymib"
+public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free mallocx rallocx xallocx sallocx dallocx nallocx mallctl mallctlnametomib mallctlbymib malloc_stats_print malloc_usable_size"
dnl Check for allocator-related functions that should be wrapped.
AC_CHECK_FUNC([memalign],
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
index 596f645..d6f7272 100644
--- a/doc/jemalloc.xml.in
+++ b/doc/jemalloc.xml.in
@@ -33,11 +33,17 @@
<refname>aligned_alloc</refname>
<refname>realloc</refname>
<refname>free</refname>
- <refname>malloc_usable_size</refname>
- <refname>malloc_stats_print</refname>
+ <refname>mallocx</refname>
+ <refname>rallocx</refname>
+ <refname>xallocx</refname>
+ <refname>sallocx</refname>
+ <refname>dallocx</refname>
+ <refname>nallocx</refname>
<refname>mallctl</refname>
<refname>mallctlnametomib</refname>
<refname>mallctlbymib</refname>
+ <refname>malloc_stats_print</refname>
+ <refname>malloc_usable_size</refname>
<refname>allocm</refname>
<refname>rallocm</refname>
<refname>sallocm</refname>
@@ -92,16 +98,37 @@
<refsect2>
<title>Non-standard API</title>
<funcprototype>
- <funcdef>size_t <function>malloc_usable_size</function></funcdef>
- <paramdef>const void *<parameter>ptr</parameter></paramdef>
+ <funcdef>void *<function>mallocx</function></funcdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
</funcprototype>
<funcprototype>
- <funcdef>void <function>malloc_stats_print</function></funcdef>
- <paramdef>void <parameter>(*write_cb)</parameter>
- <funcparams>void *, const char *</funcparams>
- </paramdef>
- <paramdef>void *<parameter>cbopaque</parameter></paramdef>
- <paramdef>const char *<parameter>opts</parameter></paramdef>
+ <funcdef>void *<function>rallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>size_t <function>xallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>extra</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>size_t <function>sallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void <function>dallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>size_t <function>nallocx</function></funcdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
</funcprototype>
<funcprototype>
<funcdef>int <function>mallctl</function></funcdef>
@@ -127,6 +154,18 @@
<paramdef>size_t <parameter>newlen</parameter></paramdef>
</funcprototype>
<funcprototype>
+ <funcdef>void <function>malloc_stats_print</function></funcdef>
+ <paramdef>void <parameter>(*write_cb)</parameter>
+ <funcparams>void *, const char *</funcparams>
+ </paramdef>
+ <paramdef>void *<parameter>cbopaque</parameter></paramdef>
+ <paramdef>const char *<parameter>opts</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>size_t <function>malloc_usable_size</function></funcdef>
+ <paramdef>const void *<parameter>ptr</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
<funcdef>void <function>(*malloc_message)</function></funcdef>
<paramdef>void *<parameter>cbopaque</parameter></paramdef>
<paramdef>const char *<parameter>s</parameter></paramdef>
@@ -225,42 +264,100 @@
</refsect2>
<refsect2>
<title>Non-standard API</title>
+ <para>The <function>mallocx<parameter/></function>,
+ <function>rallocx<parameter/></function>,
+ <function>xallocx<parameter/></function>,
+ <function>sallocx<parameter/></function>,
+ <function>dallocx<parameter/></function>, and
+ <function>nallocx<parameter/></function> functions all have a
+ <parameter>flags</parameter> argument that can be used to specify
+ options. The functions only check the options that are contextually
+ relevant. Use bitwise or (<code language="C">|</code>) operations to
+ specify one or more of the following:
+ <variablelist>
+ <varlistentry>
+ <term><constant>MALLOCX_LG_ALIGN(<parameter>la</parameter>)
+ </constant></term>
- <para>The <function>malloc_usable_size<parameter/></function> function
- returns the usable size of the allocation pointed to by
- <parameter>ptr</parameter>. The return value may be larger than the size
- that was requested during allocation. The
- <function>malloc_usable_size<parameter/></function> function is not a
- mechanism for in-place <function>realloc<parameter/></function>; rather
- it is provided solely as a tool for introspection purposes. Any
- discrepancy between the requested allocation size and the size reported
- by <function>malloc_usable_size<parameter/></function> should not be
- depended on, since such behavior is entirely implementation-dependent.
- </para>
+ <listitem><para>Align the memory allocation to start at an address
+ that is a multiple of <code language="C">(1 &lt;&lt;
+ <parameter>la</parameter>)</code>. This macro does not validate
+ that <parameter>la</parameter> is within the valid
+ range.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><constant>MALLOCX_ALIGN(<parameter>a</parameter>)
+ </constant></term>
- <para>The <function>malloc_stats_print<parameter/></function> function
- writes human-readable summary statistics via the
- <parameter>write_cb</parameter> callback function pointer and
- <parameter>cbopaque</parameter> data passed to
- <parameter>write_cb</parameter>, or
- <function>malloc_message<parameter/></function> if
- <parameter>write_cb</parameter> is <constant>NULL</constant>. This
- function can be called repeatedly. General information that never
- changes during execution can be omitted by specifying "g" as a character
- within the <parameter>opts</parameter> string. Note that
- <function>malloc_message<parameter/></function> uses the
- <function>mallctl*<parameter/></function> functions internally, so
- inconsistent statistics can be reported if multiple threads use these
- functions simultaneously. If <option>--enable-stats</option> is
- specified during configuration, &ldquo;m&rdquo; and &ldquo;a&rdquo; can
- be specified to omit merged arena and per arena statistics, respectively;
- &ldquo;b&rdquo; and &ldquo;l&rdquo; can be specified to omit per size
- class statistics for bins and large objects, respectively. Unrecognized
- characters are silently ignored. Note that thread caching may prevent
- some statistics from being completely up to date, since extra locking
- would be required to merge counters that track thread cache operations.
+ <listitem><para>Align the memory allocation to start at an address
+ that is a multiple of <parameter>a</parameter>, where
+ <parameter>a</parameter> is a power of two. This macro does not
+ validate that <parameter>a</parameter> is a power of 2.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><constant>MALLOCX_ZERO</constant></term>
+
+ <listitem><para>Initialize newly allocated memory to contain zero
+ bytes. In the growing reallocation case, the real size prior to
+ reallocation defines the boundary between untouched bytes and those
+ that are initialized to contain zero bytes. If this macro is
+ absent, newly allocated memory is uninitialized.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><constant>MALLOCX_ARENA(<parameter>a</parameter>)
+ </constant></term>
+
+ <listitem><para>Use the arena specified by the index
+ <parameter>a</parameter> (and by necessity bypass the thread
+ cache). This macro has no effect for huge regions, nor for regions
+ that were allocated via an arena other than the one specified.
+ This macro does not validate that <parameter>a</parameter>
+ specifies an arena index in the valid range.</para></listitem>
+ </varlistentry>
+ </variablelist>
</para>
+ <para>The <function>mallocx<parameter/></function> function allocates at
+ least <parameter>size</parameter> bytes of memory, and returns a pointer
+ to the base address of the allocation. Behavior is undefined if
+ <parameter>size</parameter> is <constant>0</constant>.</para>
+
+ <para>The <function>rallocx<parameter/></function> function resizes the
+ allocation at <parameter>ptr</parameter> to be at least
+ <parameter>size</parameter> bytes, and returns a pointer to the base
+ address of the resulting allocation, which may or may not have moved from
+ its original location. Behavior is undefined if
+ <parameter>size</parameter> is <constant>0</constant>.</para>
+
+ <para>The <function>xallocx<parameter/></function> function resizes the
+ allocation at <parameter>ptr</parameter> in place to be at least
+ <parameter>size</parameter> bytes, and returns the real size of the
+ allocation. If <parameter>extra</parameter> is non-zero, an attempt is
+ made to resize the allocation to be at least <code
+ language="C">(<parameter>size</parameter> +
+ <parameter>extra</parameter>)</code> bytes, though inability to allocate
+ the extra byte(s) will not by itself result in failure to resize.
+ Behavior is undefined if <parameter>size</parameter> is
+ <constant>0</constant>, or if <code
+ language="C">(<parameter>size</parameter> + <parameter>extra</parameter>
+ &gt; <constant>SIZE_T_MAX</constant>)</code>.</para>
+
+ <para>The <function>sallocx<parameter/></function> function returns the
+ real size of the allocation at <parameter>ptr</parameter>.</para>
+
+ <para>The <function>dallocx<parameter/></function> function causes the
+ memory referenced by <parameter>ptr</parameter> to be made available for
+ future allocations.</para>
+
+ <para>The <function>nallocx<parameter/></function> function allocates no
+ memory, but it performs the same size computation as the
+ <function>mallocx<parameter/></function> function, and returns the real
+ size of the allocation that would result from the equivalent
+ <function>mallocx<parameter/></function> function call. Behavior is
+ undefined if <parameter>size</parameter> is
+ <constant>0</constant>.</para>
+
<para>The <function>mallctl<parameter/></function> function provides a
general interface for introspecting the memory allocator, as well as
setting modifiable parameters and triggering actions. The
@@ -314,6 +411,41 @@ for (i = 0; i < nbins; i++) {
mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0);
/* Do something with bin_size... */
}]]></programlisting></para>
+
+ <para>The <function>malloc_stats_print<parameter/></function> function
+ writes human-readable summary statistics via the
+ <parameter>write_cb</parameter> callback function pointer and
+ <parameter>cbopaque</parameter> data passed to
+ <parameter>write_cb</parameter>, or
+ <function>malloc_message<parameter/></function> if
+ <parameter>write_cb</parameter> is <constant>NULL</constant>. This
+ function can be called repeatedly. General information that never
+ changes during execution can be omitted by specifying "g" as a character
+ within the <parameter>opts</parameter> string. Note that
+ <function>malloc_message<parameter/></function> uses the
+ <function>mallctl*<parameter/></function> functions internally, so
+ inconsistent statistics can be reported if multiple threads use these
+ functions simultaneously. If <option>--enable-stats</option> is
+ specified during configuration, &ldquo;m&rdquo; and &ldquo;a&rdquo; can
+ be specified to omit merged arena and per arena statistics, respectively;
+ &ldquo;b&rdquo; and &ldquo;l&rdquo; can be specified to omit per size
+ class statistics for bins and large objects, respectively. Unrecognized
+ characters are silently ignored. Note that thread caching may prevent
+ some statistics from being completely up to date, since extra locking
+ would be required to merge counters that track thread cache operations.
+ </para>
+
+ <para>The <function>malloc_usable_size<parameter/></function> function
+ returns the usable size of the allocation pointed to by
+ <parameter>ptr</parameter>. The return value may be larger than the size
+ that was requested during allocation. The
+ <function>malloc_usable_size<parameter/></function> function is not a
+ mechanism for in-place <function>realloc<parameter/></function>; rather
+ it is provided solely as a tool for introspection purposes. Any
+ discrepancy between the requested allocation size and the size reported
+ by <function>malloc_usable_size<parameter/></function> should not be
+ depended on, since such behavior is entirely implementation-dependent.
+ </para>
</refsect2>
<refsect2>
<title>Experimental API</title>
@@ -398,7 +530,7 @@ for (i = 0; i < nbins; i++) {
<parameter>rsize</parameter> is not <constant>NULL</constant>. If
<parameter>extra</parameter> is non-zero, an attempt is made to resize
the allocation to be at least <code
- language="C"><parameter>size</parameter> +
+ language="C">(<parameter>size</parameter> +
<parameter>extra</parameter>)</code> bytes, though inability to allocate
the extra byte(s) will not by itself result in failure. Behavior is
undefined if <parameter>size</parameter> is <constant>0</constant>, or if
@@ -936,7 +1068,8 @@ for (i = 0; i < nbins; i++) {
<listitem><para>Zero filling enabled/disabled. If enabled, each byte
of uninitialized allocated memory will be initialized to 0. Note that
this initialization only happens once for each byte, so
- <function>realloc<parameter/></function> and
+ <function>realloc<parameter/></function>,
+ <function>rallocx<parameter/></function> and
<function>rallocm<parameter/></function> calls do not zero memory that
was previously allocated. This is intended for debugging and will
impact performance negatively. This option is disabled by default.
@@ -2039,9 +2172,26 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</refsect2>
<refsect2>
<title>Non-standard API</title>
- <para>The <function>malloc_usable_size<parameter/></function> function
- returns the usable size of the allocation pointed to by
- <parameter>ptr</parameter>. </para>
+ <para>The <function>mallocx<parameter/></function> and
+ <function>rallocx<parameter/></function> functions return a pointer to
+ the allocated memory if successful; otherwise a <constant>NULL</constant>
+ pointer is returned to indicate insufficient contiguous memory was
+ available to service the allocation request. </para>
+
+ <para>The <function>xallocx<parameter/></function> function returns the
+ real size of the resulting resized allocation pointed to by
+ <parameter>ptr</parameter>, which is a value less than
+ <parameter>size</parameter> if the allocation could not be adequately
+ grown in place. </para>
+
+ <para>The <function>sallocx<parameter/></function> function returns the
+ real size of the allocation pointed to by <parameter>ptr</parameter>.
+ </para>
+
+ <para>The <function>nallocx<parameter/></function> returns the real size
+ that would result from a successful equivalent
+ <function>mallocx<parameter/></function> function call, or zero if
+ insufficient memory is available to perform the size computation. </para>
<para>The <function>mallctl<parameter/></function>,
<function>mallctlnametomib<parameter/></function>, and
@@ -2092,6 +2242,10 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</varlistentry>
</variablelist>
</para>
+
+ <para>The <function>malloc_usable_size<parameter/></function> function
+ returns the usable size of the allocation pointed to by
+ <parameter>ptr</parameter>. </para>
</refsect2>
<refsect2>
<title>Experimental API</title>
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index 3dd9761..f380bbf 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -228,6 +228,7 @@ static const bool config_ivsalloc =
#include "jemalloc/internal/jemalloc_internal_macros.h"
+#define MALLOCX_LG_ALIGN_MASK ((int)0x3f)
#define ALLOCM_LG_ALIGN_MASK ((int)0x3f)
/* Smallest size class to support. */
@@ -731,22 +732,22 @@ choose_arena(arena_t *arena)
#include "jemalloc/internal/quarantine.h"
#ifndef JEMALLOC_ENABLE_INLINE
-void *imallocx(size_t size, bool try_tcache, arena_t *arena);
+void *imalloct(size_t size, bool try_tcache, arena_t *arena);
void *imalloc(size_t size);
-void *icallocx(size_t size, bool try_tcache, arena_t *arena);
+void *icalloct(size_t size, bool try_tcache, arena_t *arena);
void *icalloc(size_t size);
-void *ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
+void *ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena);
void *ipalloc(size_t usize, size_t alignment, bool zero);
size_t isalloc(const void *ptr, bool demote);
size_t ivsalloc(const void *ptr, bool demote);
size_t u2rz(size_t usize);
size_t p2rz(const void *ptr);
-void idallocx(void *ptr, bool try_tcache);
+void idalloct(void *ptr, bool try_tcache);
void idalloc(void *ptr);
-void iqallocx(void *ptr, bool try_tcache);
+void iqalloct(void *ptr, bool try_tcache);
void iqalloc(void *ptr);
-void *irallocx(void *ptr, size_t size, size_t extra, size_t alignment,
+void *iralloct(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc,
arena_t *arena);
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
@@ -756,7 +757,7 @@ malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_ALWAYS_INLINE void *
-imallocx(size_t size, bool try_tcache, arena_t *arena)
+imalloct(size_t size, bool try_tcache, arena_t *arena)
{
assert(size != 0);
@@ -771,11 +772,11 @@ JEMALLOC_ALWAYS_INLINE void *
imalloc(size_t size)
{
- return (imallocx(size, true, NULL));
+ return (imalloct(size, true, NULL));
}
JEMALLOC_ALWAYS_INLINE void *
-icallocx(size_t size, bool try_tcache, arena_t *arena)
+icalloct(size_t size, bool try_tcache, arena_t *arena)
{
if (size <= arena_maxclass)
@@ -788,11 +789,11 @@ JEMALLOC_ALWAYS_INLINE void *
icalloc(size_t size)
{
- return (icallocx(size, true, NULL));
+ return (icalloct(size, true, NULL));
}
JEMALLOC_ALWAYS_INLINE void *
-ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
+ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena)
{
void *ret;
@@ -820,7 +821,7 @@ JEMALLOC_ALWAYS_INLINE void *
ipalloc(size_t usize, size_t alignment, bool zero)
{
- return (ipallocx(usize, alignment, zero, true, NULL));
+ return (ipalloct(usize, alignment, zero, true, NULL));
}
/*
@@ -881,7 +882,7 @@ p2rz(const void *ptr)
}
JEMALLOC_ALWAYS_INLINE void
-idallocx(void *ptr, bool try_tcache)
+idalloct(void *ptr, bool try_tcache)
{
arena_chunk_t *chunk;
@@ -898,28 +899,28 @@ JEMALLOC_ALWAYS_INLINE void
idalloc(void *ptr)
{
- idallocx(ptr, true);
+ idalloct(ptr, true);
}
JEMALLOC_ALWAYS_INLINE void
-iqallocx(void *ptr, bool try_tcache)
+iqalloct(void *ptr, bool try_tcache)
{
if (config_fill && opt_quarantine)
quarantine(ptr);
else
- idallocx(ptr, try_tcache);
+ idalloct(ptr, try_tcache);
}
JEMALLOC_ALWAYS_INLINE void
iqalloc(void *ptr)
{
- iqallocx(ptr, true);
+ iqalloct(ptr, true);
}
JEMALLOC_ALWAYS_INLINE void *
-irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
+iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
{
void *ret;
@@ -943,7 +944,7 @@ irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
- ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
+ ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
if (ret == NULL) {
if (extra == 0)
return (NULL);
@@ -951,7 +952,7 @@ irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
- ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
+ ret = ipalloct(usize, alignment, zero, try_tcache_alloc,
arena);
if (ret == NULL)
return (NULL);
@@ -963,7 +964,7 @@ irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(ret, ptr, copysize);
- iqallocx(ptr, try_tcache_dalloc);
+ iqalloct(ptr, try_tcache_dalloc);
return (ret);
}
@@ -992,7 +993,7 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
bool no_move)
{
- return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true,
+ return (iralloct(ptr, size, extra, alignment, zero, no_move, true, true,
NULL));
}
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index 9fbc625..541e1b2 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -207,17 +207,17 @@ huge_ralloc_no_move
huge_salloc
iallocm
icalloc
-icallocx
+icalloct
idalloc
-idallocx
+idalloct
imalloc
-imallocx
+imalloct
ipalloc
-ipallocx
+ipalloct
iqalloc
-iqallocx
+iqalloct
iralloc
-irallocx
+iralloct
isalloc
isthreaded
ivsalloc
diff --git a/include/jemalloc/internal/public_symbols.txt b/include/jemalloc/internal/public_symbols.txt
index 7d09742..e27c0e5 100644
--- a/include/jemalloc/internal/public_symbols.txt
+++ b/include/jemalloc/internal/public_symbols.txt
@@ -6,11 +6,17 @@ posix_memalign
aligned_alloc
realloc
free
-malloc_usable_size
-malloc_stats_print
+mallocx
+rallocx
+xallocx
+sallocx
+dallocx
+nallocx
mallctl
mallctlnametomib
mallctlbymib
+malloc_stats_print
+malloc_usable_size
memalign
valloc
allocm
diff --git a/include/jemalloc/jemalloc_macros.h.in b/include/jemalloc/jemalloc_macros.h.in
index d145531..9773bcb 100644
--- a/include/jemalloc/jemalloc_macros.h.in
+++ b/include/jemalloc/jemalloc_macros.h.in
@@ -8,6 +8,17 @@
#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
+# define MALLOCX_LG_ALIGN(la) (la)
+# if LG_SIZEOF_PTR == 2
+# define MALLOCX_ALIGN(a) (ffs(a)-1)
+# else
+# define MALLOCX_ALIGN(a) \
+ ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
+# endif
+# define MALLOCX_ZERO ((int)0x40)
+/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */
+# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8))
+
#ifdef JEMALLOC_EXPERIMENTAL
# define ALLOCM_LG_ALIGN(la) (la)
# if LG_SIZEOF_PTR == 2
@@ -39,11 +50,17 @@
# undef je_aligned_alloc
# undef je_realloc
# undef je_free
-# undef je_malloc_usable_size
-# undef je_malloc_stats_print
+# undef je_mallocx
+# undef je_rallocx
+# undef je_xallocx
+# undef je_sallocx
+# undef je_dallocx
+# undef je_nallocx
# undef je_mallctl
# undef je_mallctlnametomib
# undef je_mallctlbymib
+# undef je_malloc_stats_print
+# undef je_malloc_usable_size
# undef je_memalign
# undef je_valloc
# undef je_allocm
diff --git a/include/jemalloc/jemalloc_mangle.h.in b/include/jemalloc/jemalloc_mangle.h.in
index 215de9d..7018a75 100644
--- a/include/jemalloc/jemalloc_mangle.h.in
+++ b/include/jemalloc/jemalloc_mangle.h.in
@@ -17,11 +17,17 @@
# define aligned_alloc je_aligned_alloc
# define realloc je_realloc
# define free je_free
-# define malloc_usable_size je_malloc_usable_size
-# define malloc_stats_print je_malloc_stats_print
+# define mallocx je_mallocx
+# define rallocx je_rallocx
+# define xallocx je_xallocx
+# define sallocx je_sallocx
+# define dallocx je_dallocx
+# define nallocx je_nallocx
# define mallctl je_mallctl
# define mallctlnametomib je_mallctlnametomib
# define mallctlbymib je_mallctlbymib
+# define malloc_stats_print je_malloc_stats_print
+# define malloc_usable_size je_malloc_usable_size
# define memalign je_memalign
# define valloc je_valloc
# ifdef JEMALLOC_EXPERIMENTAL
@@ -56,6 +62,12 @@
# undef je_mallctlbymib
# undef je_memalign
# undef je_valloc
+# undef je_mallocx
+# undef je_rallocx
+# undef je_xallocx
+# undef je_sallocx
+# undef je_dallocx
+# undef je_nallocx
# ifdef JEMALLOC_EXPERIMENTAL
# undef je_allocm
# undef je_rallocm
diff --git a/include/jemalloc/jemalloc_protos.h.in b/include/jemalloc/jemalloc_protos.h.in
index 3dad859..25446de 100644
--- a/include/jemalloc/jemalloc_protos.h.in
+++ b/include/jemalloc/jemalloc_protos.h.in
@@ -17,6 +17,25 @@ JEMALLOC_EXPORT void *@je_@aligned_alloc(size_t alignment, size_t size)
JEMALLOC_EXPORT void *@je_@realloc(void *ptr, size_t size);
JEMALLOC_EXPORT void @je_@free(void *ptr);
+JEMALLOC_EXPORT void *@je_@mallocx(size_t size, int flags);
+JEMALLOC_EXPORT void *@je_@rallocx(void *ptr, size_t size, int flags);
+JEMALLOC_EXPORT size_t @je_@xallocx(void *ptr, size_t size, size_t extra,
+ int flags);
+JEMALLOC_EXPORT size_t @je_@sallocx(const void *ptr, int flags);
+JEMALLOC_EXPORT void @je_@dallocx(void *ptr, int flags);
+JEMALLOC_EXPORT size_t @je_@nallocx(size_t size, int flags);
+
+JEMALLOC_EXPORT int @je_@mallctl(const char *name, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen);
+JEMALLOC_EXPORT int @je_@mallctlnametomib(const char *name, size_t *mibp,
+ size_t *miblenp);
+JEMALLOC_EXPORT int @je_@mallctlbymib(const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+JEMALLOC_EXPORT void @je_@malloc_stats_print(void (*write_cb)(void *,
+ const char *), void *@je_@cbopaque, const char *opts);
+JEMALLOC_EXPORT size_t @je_@malloc_usable_size(
+ JEMALLOC_USABLE_SIZE_CONST void *ptr);
+
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT void * @je_@memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
@@ -26,17 +45,6 @@ JEMALLOC_EXPORT void * @je_@memalign(size_t alignment, size_t size)
JEMALLOC_EXPORT void * @je_@valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
-JEMALLOC_EXPORT size_t @je_@malloc_usable_size(
- JEMALLOC_USABLE_SIZE_CONST void *ptr);
-JEMALLOC_EXPORT void @je_@malloc_stats_print(void (*write_cb)(void *,
- const char *), void *@je_@cbopaque, const char *opts);
-JEMALLOC_EXPORT int @je_@mallctl(const char *name, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT int @je_@mallctlnametomib(const char *name, size_t *mibp,
- size_t *miblenp);
-JEMALLOC_EXPORT int @je_@mallctlbymib(const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-
#ifdef JEMALLOC_EXPERIMENTAL
JEMALLOC_EXPORT int @je_@allocm(void **ptr, size_t *rsize, size_t size,
int flags) JEMALLOC_ATTR(nonnull(1));
diff --git a/src/arena.c b/src/arena.c
index 145de86..4a46013 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -2031,7 +2031,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
- ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
+ ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
} else
ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
@@ -2043,7 +2043,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
- ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
+ ret = ipalloct(usize, alignment, zero, try_tcache_alloc,
arena);
} else
ret = arena_malloc(arena, size, zero, try_tcache_alloc);
@@ -2061,7 +2061,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
copysize = (size < oldsize) ? size : oldsize;
VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
memcpy(ret, ptr, copysize);
- iqallocx(ptr, try_tcache_dalloc);
+ iqalloct(ptr, try_tcache_dalloc);
return (ret);
}
diff --git a/src/huge.c b/src/huge.c
index 443b400..33fab68 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -181,7 +181,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
#endif
{
memcpy(ret, ptr, copysize);
- iqallocx(ptr, try_tcache_dalloc);
+ iqalloct(ptr, try_tcache_dalloc);
}
return (ret);
}
diff --git a/src/jemalloc.c b/src/jemalloc.c
index f13a7d8..f8c8119 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -1337,73 +1337,8 @@ JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
* Begin non-standard functions.
*/
-size_t
-je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
-{
- size_t ret;
-
- assert(malloc_initialized || IS_INITIALIZER);
- malloc_thread_init();
-
- if (config_ivsalloc)
- ret = ivsalloc(ptr, config_prof);
- else
- ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
-
- return (ret);
-}
-
-void
-je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *opts)
-{
-
- stats_print(write_cb, cbopaque, opts);
-}
-
-int
-je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
- size_t newlen)
-{
-
- if (malloc_init())
- return (EAGAIN);
-
- return (ctl_byname(name, oldp, oldlenp, newp, newlen));
-}
-
-int
-je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
-{
-
- if (malloc_init())
- return (EAGAIN);
-
- return (ctl_nametomib(name, mibp, miblenp));
-}
-
-int
-je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
-
- if (malloc_init())
- return (EAGAIN);
-
- return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
-}
-
-/*
- * End non-standard functions.
- */
-/******************************************************************************/
-/*
- * Begin experimental functions.
- */
-#ifdef JEMALLOC_EXPERIMENTAL
-
JEMALLOC_ALWAYS_INLINE_C void *
-iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache,
+imallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena)
{
@@ -1411,26 +1346,25 @@ iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache,
alignment)));
if (alignment != 0)
- return (ipallocx(usize, alignment, zero, try_tcache, arena));
+ return (ipalloct(usize, alignment, zero, try_tcache, arena));
else if (zero)
- return (icallocx(usize, try_tcache, arena));
+ return (icalloct(usize, try_tcache, arena));
else
- return (imallocx(usize, try_tcache, arena));
+ return (imalloct(usize, try_tcache, arena));
}
-int
-je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
+void *
+je_mallocx(size_t size, int flags)
{
void *p;
size_t usize;
- size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
+ size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
- bool zero = flags & ALLOCM_ZERO;
+ bool zero = flags & MALLOCX_ZERO;
unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
arena_t *arena;
bool try_tcache;
- assert(ptr != NULL);
assert(size != 0);
if (malloc_init())
@@ -1460,61 +1394,149 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
alignment);
assert(usize_promoted != 0);
- p = iallocm(usize_promoted, alignment, zero,
+ p = imallocx(usize_promoted, alignment, zero,
try_tcache, arena);
if (p == NULL)
goto label_oom;
arena_prof_promoted(p, usize);
} else {
- p = iallocm(usize, alignment, zero, try_tcache, arena);
+ p = imallocx(usize, alignment, zero, try_tcache, arena);
if (p == NULL)
goto label_oom;
}
prof_malloc(p, usize, cnt);
} else {
- p = iallocm(usize, alignment, zero, try_tcache, arena);
+ p = imallocx(usize, alignment, zero, try_tcache, arena);
if (p == NULL)
goto label_oom;
}
- if (rsize != NULL)
- *rsize = usize;
- *ptr = p;
if (config_stats) {
assert(usize == isalloc(p, config_prof));
thread_allocated_tsd_get()->allocated += usize;
}
UTRACE(0, size, p);
JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
- return (ALLOCM_SUCCESS);
+ return (p);
label_oom:
if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error in allocm(): "
- "out of memory\n");
+ malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
abort();
}
- *ptr = NULL;
UTRACE(0, size, 0);
- return (ALLOCM_ERR_OOM);
+ return (NULL);
}
-int
-je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
+void *
+je_rallocx(void *ptr, size_t size, int flags)
{
- void *p, *q;
+ void *p;
size_t usize;
size_t old_size;
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
- size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
+ size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
- bool zero = flags & ALLOCM_ZERO;
- bool no_move = flags & ALLOCM_NO_MOVE;
+ bool zero = flags & MALLOCX_ZERO;
+ unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
+ bool try_tcache_alloc, try_tcache_dalloc;
+ arena_t *arena;
+
+ assert(ptr != NULL);
+ assert(size != 0);
+ assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
+
+ if (arena_ind != UINT_MAX) {
+ arena_chunk_t *chunk;
+ try_tcache_alloc = false;
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ try_tcache_dalloc = (chunk == ptr || chunk->arena !=
+ arenas[arena_ind]);
+ arena = arenas[arena_ind];
+ } else {
+ try_tcache_alloc = true;
+ try_tcache_dalloc = true;
+ arena = NULL;
+ }
+
+ if (config_prof && opt_prof) {
+ prof_thr_cnt_t *cnt;
+
+ usize = (alignment == 0) ? s2u(size) : sa2u(size,
+ alignment);
+ prof_ctx_t *old_ctx = prof_ctx_get(ptr);
+ old_size = isalloc(ptr, true);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = p2rz(ptr);
+ PROF_ALLOC_PREP(1, usize, cnt);
+ if (cnt == NULL)
+ goto label_oom;
+ if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
+ SMALL_MAXCLASS) {
+ p = iralloct(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
+ size) ? 0 : size - (SMALL_MAXCLASS+1), alignment,
+ zero, false, try_tcache_alloc, try_tcache_dalloc,
+ arena);
+ if (p == NULL)
+ goto label_oom;
+ if (usize < PAGE)
+ arena_prof_promoted(p, usize);
+ } else {
+ p = iralloct(ptr, size, 0, alignment, zero, false,
+ try_tcache_alloc, try_tcache_dalloc, arena);
+ if (p == NULL)
+ goto label_oom;
+ }
+ prof_realloc(p, usize, cnt, old_size, old_ctx);
+ } else {
+ if (config_stats) {
+ old_size = isalloc(ptr, false);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = u2rz(old_size);
+ } else if (config_valgrind && opt_valgrind) {
+ old_size = isalloc(ptr, false);
+ old_rzsize = u2rz(old_size);
+ }
+ p = iralloct(ptr, size, 0, alignment, zero, false,
+ try_tcache_alloc, try_tcache_dalloc, arena);
+ if (p == NULL)
+ goto label_oom;
+ if (config_stats || (config_valgrind && opt_valgrind))
+ usize = isalloc(p, config_prof);
+ }
+
+ if (config_stats) {
+ thread_allocated_t *ta;
+ ta = thread_allocated_tsd_get();
+ ta->allocated += usize;
+ ta->deallocated += old_size;
+ }
+ UTRACE(ptr, size, p);
+ JEMALLOC_VALGRIND_REALLOC(p, usize, ptr, old_size, old_rzsize, zero);
+ return (p);
+label_oom:
+ if (config_xmalloc && opt_xmalloc) {
+ malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
+ abort();
+ }
+ UTRACE(ptr, size, 0);
+ return (NULL);
+}
+
+size_t
+je_xallocx(void *ptr, size_t size, size_t extra, int flags)
+{
+ size_t usize;
+ size_t old_size;
+ UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
+ size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
+ & (SIZE_T_MAX-1));
+ bool zero = flags & MALLOCX_ZERO;
unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
bool try_tcache_alloc, try_tcache_dalloc;
arena_t *arena;
assert(ptr != NULL);
- assert(*ptr != NULL);
assert(size != 0);
assert(SIZE_T_MAX - size >= extra);
assert(malloc_initialized || IS_INITIALIZER);
@@ -1523,8 +1545,8 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
if (arena_ind != UINT_MAX) {
arena_chunk_t *chunk;
try_tcache_alloc = false;
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr);
- try_tcache_dalloc = (chunk == *ptr || chunk->arena !=
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ try_tcache_dalloc = (chunk == ptr || chunk->arena !=
arenas[arena_ind]);
arena = arenas[arena_ind];
} else {
@@ -1533,7 +1555,6 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
arena = NULL;
}
- p = *ptr;
if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt;
@@ -1546,109 +1567,87 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
*/
size_t max_usize = (alignment == 0) ? s2u(size+extra) :
sa2u(size+extra, alignment);
- prof_ctx_t *old_ctx = prof_ctx_get(p);
- old_size = isalloc(p, true);
+ prof_ctx_t *old_ctx = prof_ctx_get(ptr);
+ old_size = isalloc(ptr, true);
if (config_valgrind && opt_valgrind)
- old_rzsize = p2rz(p);
+ old_rzsize = p2rz(ptr);
PROF_ALLOC_PREP(1, max_usize, cnt);
- if (cnt == NULL)
- goto label_oom;
+ if (cnt == NULL) {
+ usize = isalloc(ptr, config_prof);
+ goto label_not_moved;
+ }
/*
* Use minimum usize to determine whether promotion may happen.
*/
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
&& ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
<= SMALL_MAXCLASS) {
- q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
+ if (iralloct(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
- alignment, zero, no_move, try_tcache_alloc,
- try_tcache_dalloc, arena);
- if (q == NULL)
- goto label_err;
+ alignment, zero, true, try_tcache_alloc,
+ try_tcache_dalloc, arena) == NULL)
+ goto label_not_moved;
if (max_usize < PAGE) {
usize = max_usize;
- arena_prof_promoted(q, usize);
+ arena_prof_promoted(ptr, usize);
} else
- usize = isalloc(q, config_prof);
+ usize = isalloc(ptr, config_prof);
} else {
- q = irallocx(p, size, extra, alignment, zero, no_move,
- try_tcache_alloc, try_tcache_dalloc, arena);
- if (q == NULL)
- goto label_err;
- usize = isalloc(q, config_prof);
+ if (iralloct(ptr, size, extra, alignment, zero, true,
+ try_tcache_alloc, try_tcache_dalloc, arena) == NULL)
+ goto label_not_moved;
+ usize = isalloc(ptr, config_prof);
}
- prof_realloc(q, usize, cnt, old_size, old_ctx);
- if (rsize != NULL)
- *rsize = usize;
+ prof_realloc(ptr, usize, cnt, old_size, old_ctx);
} else {
if (config_stats) {
- old_size = isalloc(p, false);
+ old_size = isalloc(ptr, false);
if (config_valgrind && opt_valgrind)
old_rzsize = u2rz(old_size);
} else if (config_valgrind && opt_valgrind) {
- old_size = isalloc(p, false);
+ old_size = isalloc(ptr, false);
old_rzsize = u2rz(old_size);
}
- q = irallocx(p, size, extra, alignment, zero, no_move,
- try_tcache_alloc, try_tcache_dalloc, arena);
- if (q == NULL)
- goto label_err;
- if (config_stats)
- usize = isalloc(q, config_prof);
- if (rsize != NULL) {
- if (config_stats == false)
- usize = isalloc(q, config_prof);
- *rsize = usize;
+ if (iralloct(ptr, size, extra, alignment, zero, true,
+ try_tcache_alloc, try_tcache_dalloc, arena) == NULL) {
+ usize = isalloc(ptr, config_prof);
+ goto label_not_moved;
}
+ usize = isalloc(ptr, config_prof);
}
- *ptr = q;
if (config_stats) {
thread_allocated_t *ta;
ta = thread_allocated_tsd_get();
ta->allocated += usize;
ta->deallocated += old_size;
}
- UTRACE(p, size, q);
- JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
- return (ALLOCM_SUCCESS);
-label_err:
- if (no_move) {
- UTRACE(p, size, q);
- return (ALLOCM_ERR_NOT_MOVED);
- }
-label_oom:
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error in rallocm(): "
- "out of memory\n");
- abort();
- }
- UTRACE(p, size, 0);
- return (ALLOCM_ERR_OOM);
+ JEMALLOC_VALGRIND_REALLOC(ptr, usize, ptr, old_size, old_rzsize, zero);
+label_not_moved:
+ UTRACE(ptr, size, ptr);
+ return (usize);
}
-int
-je_sallocm(const void *ptr, size_t *rsize, int flags)
+size_t
+je_sallocx(const void *ptr, int flags)
{
- size_t sz;
+ size_t usize;
assert(malloc_initialized || IS_INITIALIZER);
malloc_thread_init();
if (config_ivsalloc)
- sz = ivsalloc(ptr, config_prof);
+ usize = ivsalloc(ptr, config_prof);
else {
assert(ptr != NULL);
- sz = isalloc(ptr, config_prof);
+ usize = isalloc(ptr, config_prof);
}
- assert(rsize != NULL);
- *rsize = sz;
- return (ALLOCM_SUCCESS);
+ return (usize);
}
-int
-je_dallocm(void *ptr, int flags)
+void
+je_dallocx(void *ptr, int flags)
{
size_t usize;
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
@@ -1677,28 +1676,161 @@ je_dallocm(void *ptr, int flags)
thread_allocated_tsd_get()->deallocated += usize;
if (config_valgrind && opt_valgrind)
rzsize = p2rz(ptr);
- iqallocx(ptr, try_tcache);
+ iqalloct(ptr, try_tcache);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
-
- return (ALLOCM_SUCCESS);
}
-int
-je_nallocm(size_t *rsize, size_t size, int flags)
+size_t
+je_nallocx(size_t size, int flags)
{
size_t usize;
- size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
+ size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
assert(size != 0);
if (malloc_init())
- return (ALLOCM_ERR_OOM);
+ return (0);
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
- if (usize == 0)
+ return (usize);
+}
+
+int
+je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen)
+{
+
+ if (malloc_init())
+ return (EAGAIN);
+
+ return (ctl_byname(name, oldp, oldlenp, newp, newlen));
+}
+
+int
+je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
+{
+
+ if (malloc_init())
+ return (EAGAIN);
+
+ return (ctl_nametomib(name, mibp, miblenp));
+}
+
+int
+je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+
+ if (malloc_init())
+ return (EAGAIN);
+
+ return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
+}
+
+void
+je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *opts)
+{
+
+ stats_print(write_cb, cbopaque, opts);
+}
+
+size_t
+je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
+{
+ size_t ret;
+
+ assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
+
+ if (config_ivsalloc)
+ ret = ivsalloc(ptr, config_prof);
+ else
+ ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
+
+ return (ret);
+}
+
+/*
+ * End non-standard functions.
+ */
+/******************************************************************************/
+/*
+ * Begin experimental functions.
+ */
+#ifdef JEMALLOC_EXPERIMENTAL
+
+int
+je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
+{
+ void *p;
+
+ assert(ptr != NULL);
+
+ p = je_mallocx(size, flags);
+ if (p == NULL)
return (ALLOCM_ERR_OOM);
+ if (rsize != NULL)
+ *rsize = isalloc(p, config_prof);
+ *ptr = p;
+ return (ALLOCM_SUCCESS);
+}
+int
+je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
+{
+ int ret;
+ bool no_move = flags & ALLOCM_NO_MOVE;
+
+ assert(ptr != NULL);
+ assert(*ptr != NULL);
+ assert(size != 0);
+ assert(SIZE_T_MAX - size >= extra);
+
+ if (no_move) {
+ size_t usize = je_xallocx(*ptr, size, extra, flags);
+ ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
+ if (rsize != NULL)
+ *rsize = usize;
+ } else {
+ void *p = je_rallocx(*ptr, size+extra, flags);
+ if (p != NULL) {
+ *ptr = p;
+ ret = ALLOCM_SUCCESS;
+ } else
+ ret = ALLOCM_ERR_OOM;
+ if (rsize != NULL)
+ *rsize = isalloc(*ptr, config_prof);
+ }
+ return (ret);
+}
+
+int
+je_sallocm(const void *ptr, size_t *rsize, int flags)
+{
+
+ assert(rsize != NULL);
+ *rsize = je_sallocx(ptr, flags);
+ return (ALLOCM_SUCCESS);
+}
+
+int
+je_dallocm(void *ptr, int flags)
+{
+
+ je_dallocx(ptr, flags);
+ return (ALLOCM_SUCCESS);
+}
+
+int
+je_nallocm(size_t *rsize, size_t size, int flags)
+{
+ size_t usize;
+
+ usize = je_nallocx(size, flags);
+ if (usize == 0)
+ return (ALLOCM_ERR_OOM);
if (rsize != NULL)
*rsize = usize;
return (ALLOCM_SUCCESS);
diff --git a/src/tcache.c b/src/tcache.c
index 88ec481..6de9296 100644
--- a/src/tcache.c
+++ b/src/tcache.c
@@ -292,7 +292,7 @@ tcache_create(arena_t *arena)
else if (size <= tcache_maxclass)
tcache = (tcache_t *)arena_malloc_large(arena, size, true);
else
- tcache = (tcache_t *)icallocx(size, false, arena);
+ tcache = (tcache_t *)icalloct(size, false, arena);
if (tcache == NULL)
return (NULL);
@@ -366,7 +366,7 @@ tcache_destroy(tcache_t *tcache)
arena_dalloc_large(arena, chunk, tcache);
} else
- idallocx(tcache, false);
+ idalloct(tcache, false);
}
void
diff --git a/src/tsd.c b/src/tsd.c
index 8431751..700caab 100644
--- a/src/tsd.c
+++ b/src/tsd.c
@@ -21,7 +21,7 @@ void
malloc_tsd_dalloc(void *wrapper)
{
- idallocx(wrapper, false);
+ idalloct(wrapper, false);
}
void
diff --git a/test/integration/mallocx.c b/test/integration/mallocx.c
new file mode 100644
index 0000000..f12855e
--- /dev/null
+++ b/test/integration/mallocx.c
@@ -0,0 +1,149 @@
+#include "test/jemalloc_test.h"
+
+#define CHUNK 0x400000
+/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */
+#define MAXALIGN ((size_t)0x2000000LU)
+#define NITER 4
+
+TEST_BEGIN(test_basic)
+{
+ size_t nsz, rsz, sz;
+ void *p;
+
+ sz = 42;
+ nsz = nallocx(sz, 0);
+ assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
+ p = mallocx(sz, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ rsz = sallocx(p, 0);
+ assert_zu_ge(rsz, sz, "Real size smaller than expected");
+ assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
+ dallocx(p, 0);
+
+ p = mallocx(sz, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ dallocx(p, 0);
+
+ nsz = nallocx(sz, MALLOCX_ZERO);
+ assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
+ p = mallocx(sz, MALLOCX_ZERO);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ rsz = sallocx(p, 0);
+ assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_errors)
+{
+ void *p;
+ size_t nsz, sz, alignment;
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x8000000000000000);
+ sz = UINT64_C(0x8000000000000000);
+#else
+ alignment = 0x80000000LU;
+ sz = 0x80000000LU;
+#endif
+ nsz = nallocx(sz, MALLOCX_ALIGN(alignment));
+ assert_zu_eq(nsz, 0, "Expected error for nallocx(%zu, %#x)", sz,
+ MALLOCX_ALIGN(alignment));
+ p = mallocx(sz, MALLOCX_ALIGN(alignment));
+ assert_ptr_null(p, "Expected error for mallocx(%zu, %#x)", sz,
+ MALLOCX_ALIGN(alignment));
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x4000000000000000);
+ sz = UINT64_C(0x8400000000000001);
+#else
+ alignment = 0x40000000LU;
+ sz = 0x84000001LU;
+#endif
+ nsz = nallocx(sz, MALLOCX_ALIGN(alignment));
+ assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
+ p = mallocx(sz, MALLOCX_ALIGN(alignment));
+ assert_ptr_null(p, "Expected error for mallocx(%zu, %#x)", sz,
+ MALLOCX_ALIGN(alignment));
+
+ alignment = 0x10LU;
+#if LG_SIZEOF_PTR == 3
+ sz = UINT64_C(0xfffffffffffffff0);
+#else
+ sz = 0xfffffff0LU;
+#endif
+ nsz = nallocx(sz, MALLOCX_ALIGN(alignment));
+ assert_zu_eq(nsz, 0, "Expected error for nallocx(%zu, %#x)", sz,
+ MALLOCX_ALIGN(alignment));
+ nsz = nallocx(sz, MALLOCX_ALIGN(alignment));
+ assert_zu_eq(nsz, 0, "Expected error for nallocx(%zu, %#x)", sz,
+ MALLOCX_ALIGN(alignment));
+ p = mallocx(sz, MALLOCX_ALIGN(alignment));
+ assert_ptr_null(p, "Expected error for mallocx(%zu, %#x)", sz,
+ MALLOCX_ALIGN(alignment));
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size)
+{
+ size_t nsz, rsz, sz, alignment, total;
+ unsigned i;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++)
+ ps[i] = NULL;
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (sz = 1;
+ sz < 3 * alignment && sz < (1U << 31);
+ sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO);
+ assert_zu_ne(nsz, 0,
+ "nallocx() error for alignment=%zu, "
+ "size=%zu (%#zx)", alignment, sz, sz);
+ ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO);
+ assert_ptr_not_null(ps[i],
+ "mallocx() error for alignment=%zu, "
+ "size=%zu (%#zx)", alignment, sz, sz);
+ rsz = sallocx(ps[i], 0);
+ assert_zu_ge(rsz, sz,
+ "Real size smaller than expected for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ assert_zu_eq(nsz, rsz,
+ "nallocx()/sallocx() size mismatch for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ assert_ptr_null(
+ (void *)((uintptr_t)ps[i] & (alignment-1)),
+ "%p inadequately aligned for"
+ " alignment=%zu, size=%zu", ps[i],
+ alignment, sz);
+ total += rsz;
+ if (total >= (MAXALIGN << 1))
+ break;
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ dallocx(ps[i], 0);
+ ps[i] = NULL;
+ }
+ }
+ }
+ }
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_basic,
+ test_alignment_errors,
+ test_alignment_and_size));
+}
diff --git a/test/integration/rallocm.c b/test/integration/rallocm.c
index c13cd69..33c11bb 100644
--- a/test/integration/rallocm.c
+++ b/test/integration/rallocm.c
@@ -1,5 +1,3 @@
-#include <unistd.h>
-
#include "test/jemalloc_test.h"
TEST_BEGIN(test_same_size)
diff --git a/test/integration/rallocx.c b/test/integration/rallocx.c
new file mode 100644
index 0000000..cc9138b
--- /dev/null
+++ b/test/integration/rallocx.c
@@ -0,0 +1,51 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_grow_and_shrink)
+{
+ void *p, *q;
+ size_t tsz;
+#define NCYCLES 3
+ unsigned i, j;
+#define NSZS 2500
+ size_t szs[NSZS];
+#define MAXSZ ZU(12 * 1024 * 1024)
+
+ p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ szs[0] = sallocx(p, 0);
+
+ for (i = 0; i < NCYCLES; i++) {
+ for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) {
+ q = rallocx(p, szs[j-1]+1, 0);
+ assert_ptr_not_null(q,
+ "Unexpected rallocx() error for size=%zu-->%zu",
+ szs[j-1], szs[j-1]+1);
+ szs[j] = sallocx(q, 0);
+ assert_zu_ne(szs[j], szs[j-1]+1,
+ "Expected size to at least: %zu", szs[j-1]+1);
+ p = q;
+ }
+
+ for (j--; j > 0; j--) {
+ q = rallocx(p, szs[j-1], 0);
+ assert_ptr_not_null(q,
+ "Unexpected rallocx() error for size=%zu-->%zu",
+ szs[j], szs[j-1]);
+ tsz = sallocx(q, 0);
+ assert_zu_eq(tsz, szs[j-1],
+ "Expected size=%zu, got size=%zu", szs[j-1], tsz);
+ p = q;
+ }
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_grow_and_shrink));
+}
diff --git a/test/integration/xallocx.c b/test/integration/xallocx.c
new file mode 100644
index 0000000..ab4cf94
--- /dev/null
+++ b/test/integration/xallocx.c
@@ -0,0 +1,59 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_same_size)
+{
+ void *p;
+ size_t sz, tsz;
+
+ p = mallocx(42, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ sz = sallocx(p, 0);
+
+ tsz = xallocx(p, sz, 0, 0);
+ assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_no_move)
+{
+ void *p;
+ size_t sz, tsz;
+
+ p = mallocx(42, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ sz = sallocx(p, 0);
+
+ tsz = xallocx(p, sz, sz-42, 0);
+ assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_no_move_fail)
+{
+ void *p;
+ size_t sz, tsz;
+
+ p = mallocx(42, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ sz = sallocx(p, 0);
+
+ tsz = xallocx(p, sz + 5, 0, 0);
+ assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_same_size,
+ test_extra_no_move,
+ test_no_move_fail));
+}
diff --git a/test/unit/mq.c b/test/unit/mq.c
index 01e72fd..e6cba10 100644
--- a/test/unit/mq.c
+++ b/test/unit/mq.c
@@ -39,8 +39,7 @@ thd_receiver_start(void *arg)
for (i = 0; i < (NSENDERS * NMSGS); i++) {
mq_msg_t *msg = mq_get(mq);
assert_ptr_not_null(msg, "mq_get() should never return NULL");
- assert_d_eq(jet_dallocm(msg, 0), ALLOCM_SUCCESS,
- "Unexpected dallocm() failure");
+ jet_dallocx(msg, 0);
}
return (NULL);
}
@@ -54,8 +53,8 @@ thd_sender_start(void *arg)
for (i = 0; i < NMSGS; i++) {
mq_msg_t *msg;
void *p;
- assert_d_eq(jet_allocm(&p, NULL, sizeof(mq_msg_t), 0),
- ALLOCM_SUCCESS, "Unexpected allocm() failure");
+ p = jet_mallocx(sizeof(mq_msg_t), 0);
+ assert_ptr_not_null(p, "Unexpected allocm() failure");
msg = (mq_msg_t *)p;
mq_put(mq, msg);
}