summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2016-05-28 07:17:28 (GMT)
committerJason Evans <jasone@canonware.com>2016-06-06 03:42:18 (GMT)
commited2c2427a7684bc8f41da54319c5dff00e177f76 (patch)
tree14a37cf17eddf0e8779bbbf0ddbde6a340d62a95
parentb46261d58b449cc4c099ed2384451a2499688f0e (diff)
downloadjemalloc-ed2c2427a7684bc8f41da54319c5dff00e177f76.zip
jemalloc-ed2c2427a7684bc8f41da54319c5dff00e177f76.tar.gz
jemalloc-ed2c2427a7684bc8f41da54319c5dff00e177f76.tar.bz2
Use huge size class infrastructure for large size classes.
-rw-r--r--Makefile.in1
-rw-r--r--doc/jemalloc.xml.in88
-rw-r--r--include/jemalloc/internal/arena.h197
-rw-r--r--include/jemalloc/internal/chunk.h3
-rw-r--r--include/jemalloc/internal/ctl.h3
-rw-r--r--include/jemalloc/internal/extent.h36
-rw-r--r--include/jemalloc/internal/huge.h5
-rw-r--r--include/jemalloc/internal/jemalloc_internal.h.in50
-rw-r--r--include/jemalloc/internal/private_symbols.txt11
-rw-r--r--include/jemalloc/internal/prof.h6
-rw-r--r--include/jemalloc/internal/stats.h34
-rw-r--r--include/jemalloc/internal/tcache.h33
-rw-r--r--src/arena.c870
-rw-r--r--src/base.c3
-rw-r--r--src/chunk.c27
-rw-r--r--src/chunk_dss.c2
-rw-r--r--src/ctl.c175
-rw-r--r--src/extent.c4
-rw-r--r--src/huge.c67
-rw-r--r--src/jemalloc.c46
-rw-r--r--src/stats.c86
-rw-r--r--src/tcache.c41
-rw-r--r--src/zone.c8
-rw-r--r--test/integration/chunk.c28
-rw-r--r--test/integration/xallocx.c103
-rw-r--r--test/unit/arena_reset.c33
-rw-r--r--test/unit/decay.c41
-rw-r--r--test/unit/extent_quantize.c41
-rw-r--r--test/unit/junk.c91
-rw-r--r--test/unit/mallctl.c23
-rw-r--r--test/unit/prof_idump.c13
-rw-r--r--test/unit/run_quantize.c149
-rw-r--r--test/unit/stats.c105
-rw-r--r--test/unit/zero.c11
34 files changed, 459 insertions, 1975 deletions
diff --git a/Makefile.in b/Makefile.in
index 7d73155..2e9bbbc 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -159,7 +159,6 @@ TESTS_UNIT := \
$(srcroot)test/unit/qr.c \
$(srcroot)test/unit/rb.c \
$(srcroot)test/unit/rtree.c \
- $(srcroot)test/unit/run_quantize.c \
$(srcroot)test/unit/SFMT.c \
$(srcroot)test/unit/size_classes.c \
$(srcroot)test/unit/smoothstep.c \
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
index e3c97bd..efb4bfe 100644
--- a/doc/jemalloc.xml.in
+++ b/doc/jemalloc.xml.in
@@ -417,22 +417,21 @@ for (i = 0; i < nbins; i++) {
<parameter>write_cb</parameter>, or
<function>malloc_message<parameter/></function> if
<parameter>write_cb</parameter> is <constant>NULL</constant>. This
- function can be called repeatedly. General information that never
- changes during execution can be omitted by specifying "g" as a character
- within the <parameter>opts</parameter> string. Note that
+ function can be called repeatedly. General information that never changes
+ during execution can be omitted by specifying "g" as a character within
+ the <parameter>opts</parameter> string. Note that
<function>malloc_message<parameter/></function> uses the
<function>mallctl*<parameter/></function> functions internally, so
inconsistent statistics can be reported if multiple threads use these
- functions simultaneously. If <option>--enable-stats</option> is
- specified during configuration, &ldquo;m&rdquo; and &ldquo;a&rdquo; can
- be specified to omit merged arena and per arena statistics, respectively;
- &ldquo;b&rdquo;, &ldquo;l&rdquo;, and &ldquo;h&rdquo; can be specified to
- omit per size class statistics for bins, large objects, and huge objects,
- respectively. Unrecognized characters are silently ignored. Note that
- thread caching may prevent some statistics from being completely up to
- date, since extra locking would be required to merge counters that track
- thread cache operations.
- </para>
+ functions simultaneously. If <option>--enable-stats</option> is specified
+ during configuration, &ldquo;m&rdquo; and &ldquo;a&rdquo; can be specified
+ to omit merged arena and per arena statistics, respectively;
+ &ldquo;b&rdquo; and &ldquo;l&rdquo; can be specified to omit per size
+ class statistics for bins and large objects, respectively. Unrecognized
+ characters are silently ignored. Note that thread caching may prevent
+ some statistics from being completely up to date, since extra locking
+ would be required to merge counters that track thread cache
+ operations.</para>
<para>The <function>malloc_usable_size<parameter/></function> function
returns the usable size of the allocation pointed to by
@@ -1888,25 +1887,6 @@ typedef struct {
<listitem><para>Number of bytes per page run.</para></listitem>
</varlistentry>
- <varlistentry id="arenas.nlruns">
- <term>
- <mallctl>arenas.nlruns</mallctl>
- (<type>unsigned</type>)
- <literal>r-</literal>
- </term>
- <listitem><para>Total number of large size classes.</para></listitem>
- </varlistentry>
-
- <varlistentry id="arenas.lrun.i.size">
- <term>
- <mallctl>arenas.lrun.&lt;i&gt;.size</mallctl>
- (<type>size_t</type>)
- <literal>r-</literal>
- </term>
- <listitem><para>Maximum size supported by this large size
- class.</para></listitem>
- </varlistentry>
-
<varlistentry id="arenas.nhchunks">
<term>
<mallctl>arenas.nhchunks</mallctl>
@@ -2534,50 +2514,6 @@ typedef struct {
<listitem><para>Current number of runs.</para></listitem>
</varlistentry>
- <varlistentry id="stats.arenas.i.lruns.j.nmalloc">
- <term>
- <mallctl>stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.nmalloc</mallctl>
- (<type>uint64_t</type>)
- <literal>r-</literal>
- [<option>--enable-stats</option>]
- </term>
- <listitem><para>Cumulative number of allocation requests for this size
- class served directly by the arena.</para></listitem>
- </varlistentry>
-
- <varlistentry id="stats.arenas.i.lruns.j.ndalloc">
- <term>
- <mallctl>stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.ndalloc</mallctl>
- (<type>uint64_t</type>)
- <literal>r-</literal>
- [<option>--enable-stats</option>]
- </term>
- <listitem><para>Cumulative number of deallocation requests for this
- size class served directly by the arena.</para></listitem>
- </varlistentry>
-
- <varlistentry id="stats.arenas.i.lruns.j.nrequests">
- <term>
- <mallctl>stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.nrequests</mallctl>
- (<type>uint64_t</type>)
- <literal>r-</literal>
- [<option>--enable-stats</option>]
- </term>
- <listitem><para>Cumulative number of allocation requests for this size
- class.</para></listitem>
- </varlistentry>
-
- <varlistentry id="stats.arenas.i.lruns.j.curruns">
- <term>
- <mallctl>stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.curruns</mallctl>
- (<type>size_t</type>)
- <literal>r-</literal>
- [<option>--enable-stats</option>]
- </term>
- <listitem><para>Current number of runs for this size class.
- </para></listitem>
- </varlistentry>
-
<varlistentry id="stats.arenas.i.hchunks.j.nmalloc">
<term>
<mallctl>stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.nmalloc</mallctl>
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index 4d2b25a..bf16e8e 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -294,7 +294,6 @@ struct arena_s {
dss_prec_t dss_prec;
-
/* Extant arena chunks. */
ql_head(extent_t) achunks;
@@ -465,9 +464,6 @@ extern const arena_bin_info_t arena_bin_info[NBINS];
extern size_t map_bias; /* Number of arena chunk header pages. */
extern size_t map_misc_offset;
extern size_t arena_maxrun; /* Max run size for arenas. */
-extern size_t large_maxclass; /* Max large size class. */
-extern unsigned nlclasses; /* Number of large size classes. */
-extern unsigned nhclasses; /* Number of huge size classes. */
#ifdef JEMALLOC_JET
typedef size_t (run_quantize_t)(size_t);
@@ -485,7 +481,8 @@ void arena_chunk_cache_maybe_remove(arena_t *arena, extent_t *extent,
bool cache);
extent_t *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena,
size_t usize, size_t alignment, bool *zero);
-void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
+void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+ bool locked);
void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
extent_t *extent, size_t oldsize);
void arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
@@ -508,33 +505,19 @@ extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
#else
void arena_dalloc_junk_small(void *ptr, const arena_bin_info_t *bin_info);
#endif
-void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind,
- bool zero);
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
szind_t ind, bool zero);
void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache);
-void arena_prof_promoted(tsdn_t *tsdn, const extent_t *extent,
- const void *ptr, size_t size);
+void arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr,
+ size_t usize);
+void arena_dalloc_promoted(tsdn_t *tsdn, extent_t *extent, void *ptr,
+ tcache_t *tcache, bool slow_path);
void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
arena_chunk_t *chunk, extent_t *extent, void *ptr,
arena_chunk_map_bits_t *bitselm);
void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
extent_t *extent, void *ptr, size_t pageind);
-#ifdef JEMALLOC_JET
-typedef void (arena_dalloc_junk_large_t)(void *, size_t);
-extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
-#else
-void arena_dalloc_junk_large(void *ptr, size_t usize);
-#endif
-void arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk, extent_t *extent, void *ptr);
-void arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- extent_t *extent, void *ptr);
-#ifdef JEMALLOC_JET
-typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
-extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
-#endif
bool arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr,
size_t oldsize, size_t size, size_t extra, bool zero);
void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
@@ -551,8 +534,7 @@ void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
size_t *nactive, size_t *ndirty, arena_stats_t *astats,
- malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
- malloc_huge_stats_t *hstats);
+ malloc_bin_stats_t *bstats, malloc_huge_stats_t *hstats);
unsigned arena_nthreads_get(arena_t *arena, bool internal);
void arena_nthreads_inc(arena_t *arena, bool internal);
void arena_nthreads_dec(arena_t *arena, bool internal);
@@ -639,8 +621,7 @@ void arena_decay_tick(tsdn_t *tsdn, arena_t *arena);
void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
bool zero, tcache_t *tcache, bool slow_path);
arena_t *arena_aalloc(tsdn_t *tsdn, const void *ptr);
-size_t arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr,
- bool demote);
+size_t arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr);
void arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr,
tcache_t *tcache, bool slow_path);
void arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
@@ -1225,7 +1206,7 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
tcache, size, ind, zero, slow_path));
}
if (likely(size <= tcache_maxclass)) {
- return (tcache_alloc_large(tsdn_tsd(tsdn), arena,
+ return (tcache_alloc_huge(tsdn_tsd(tsdn), arena,
tcache, size, ind, zero, slow_path));
}
/* (size > tcache_maxclass) case falls through. */
@@ -1244,49 +1225,25 @@ arena_aalloc(tsdn_t *tsdn, const void *ptr)
/* Return the size of the allocation pointed to by ptr. */
JEMALLOC_ALWAYS_INLINE size_t
-arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr, bool demote)
+arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
{
size_t ret;
- size_t pageind;
- szind_t binind;
assert(ptr != NULL);
if (likely(extent_slab_get(extent))) {
const arena_chunk_t *chunk =
(const arena_chunk_t *)extent_base_get(extent);
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ szind_t binind;
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
binind = arena_mapbits_binind_get(chunk, pageind);
- if (unlikely(binind == BININD_INVALID || (config_prof && !demote
- && arena_mapbits_large_get(chunk, pageind) != 0))) {
- /*
- * Large allocation. In the common case (demote), and
- * as this is an inline function, most callers will only
- * end up looking at binind to determine that ptr is a
- * small allocation.
- */
- assert(config_cache_oblivious || ((uintptr_t)ptr &
- PAGE_MASK) == 0);
- ret = arena_mapbits_large_size_get(chunk, pageind) -
- large_pad;
- assert(ret != 0);
- assert(pageind + ((ret+large_pad)>>LG_PAGE) <=
- chunk_npages);
- assert(arena_mapbits_dirty_get(chunk, pageind) ==
- arena_mapbits_dirty_get(chunk,
- pageind+((ret+large_pad)>>LG_PAGE)-1));
- } else {
- /*
- * Small allocation (possibly promoted to a large
- * object).
- */
- assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
- arena_ptr_small_binind_get(tsdn, ptr,
- arena_mapbits_get(chunk, pageind)) == binind);
- ret = index2size(binind);
- }
+ /* Small allocation. */
+ assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
+ arena_ptr_small_binind_get(tsdn, ptr,
+ arena_mapbits_get(chunk, pageind)) == binind);
+ ret = index2size(binind);
} else
ret = huge_salloc(tsdn, extent);
@@ -1297,49 +1254,40 @@ JEMALLOC_ALWAYS_INLINE void
arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
bool slow_path)
{
- size_t pageind, mapbits;
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
if (likely(extent_slab_get(extent))) {
+ /* Small allocation. */
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
-
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- mapbits = arena_mapbits_get(chunk, pageind);
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ size_t mapbits = arena_mapbits_get(chunk, pageind);
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
- if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
- /* Small allocation. */
- if (likely(tcache != NULL)) {
- szind_t binind =
- arena_ptr_small_binind_get(tsdn, ptr,
- mapbits);
- tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
- binind, slow_path);
- } else {
- arena_dalloc_small(tsdn,
- extent_arena_get(extent), chunk, extent,
- ptr, pageind);
- }
+ assert((mapbits & CHUNK_MAP_LARGE) == 0);
+ if (likely(tcache != NULL)) {
+ szind_t binind = arena_ptr_small_binind_get(tsdn, ptr,
+ mapbits);
+ tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, binind,
+ slow_path);
} else {
- size_t size = arena_mapbits_large_size_get(chunk,
- pageind);
-
- assert(config_cache_oblivious || ((uintptr_t)ptr &
- PAGE_MASK) == 0);
+ arena_dalloc_small(tsdn, extent_arena_get(extent),
+ chunk, extent, ptr, pageind);
+ }
+ } else {
+ size_t usize = extent_usize_get(extent);
- if (likely(tcache != NULL) && size - large_pad <=
- tcache_maxclass) {
- tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
- size - large_pad, slow_path);
+ if (likely(tcache != NULL) && usize <= tcache_maxclass) {
+ if (config_prof && unlikely(usize <= SMALL_MAXCLASS)) {
+ arena_dalloc_promoted(tsdn, extent, ptr,
+ tcache, slow_path);
} else {
- arena_dalloc_large(tsdn,
- extent_arena_get(extent), chunk, extent,
- ptr);
+ tcache_dalloc_huge(tsdn_tsd(tsdn), tcache, ptr,
+ usize, slow_path);
}
- }
- } else
- huge_dalloc(tsdn, extent);
+ } else
+ huge_dalloc(tsdn, extent);
+ }
}
JEMALLOC_ALWAYS_INLINE void
@@ -1348,55 +1296,34 @@ arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
{
assert(!tsdn_null(tsdn) || tcache == NULL);
+ assert(ptr != NULL);
if (likely(extent_slab_get(extent))) {
- arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
-
- if (config_prof && opt_prof) {
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
- LG_PAGE;
- assert(arena_mapbits_allocated_get(chunk, pageind) !=
- 0);
- if (arena_mapbits_large_get(chunk, pageind) != 0) {
- /*
- * Make sure to use promoted size, not request
- * size.
- */
- size = arena_mapbits_large_size_get(chunk,
- pageind) - large_pad;
- }
+ /* Small allocation. */
+ if (likely(tcache != NULL)) {
+ szind_t binind = size2index(size);
+ tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, binind,
+ slow_path);
+ } else {
+ arena_chunk_t *chunk =
+ (arena_chunk_t *)extent_base_get(extent);
+ size_t pageind = ((uintptr_t)ptr -
+ (uintptr_t)chunk) >> LG_PAGE;
+ arena_dalloc_small(tsdn, extent_arena_get(extent),
+ chunk, extent, ptr, pageind);
}
- assert(s2u(size) == s2u(arena_salloc(tsdn, extent, ptr,
- false)));
-
- if (likely(size <= SMALL_MAXCLASS)) {
- /* Small allocation. */
- if (likely(tcache != NULL)) {
- szind_t binind = size2index(size);
- tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
- binind, slow_path);
+ } else {
+ if (likely(tcache != NULL) && size <= tcache_maxclass) {
+ if (config_prof && unlikely(size <= SMALL_MAXCLASS)) {
+ arena_dalloc_promoted(tsdn, extent, ptr,
+ tcache, slow_path);
} else {
- size_t pageind = ((uintptr_t)ptr -
- (uintptr_t)chunk) >> LG_PAGE;
- arena_dalloc_small(tsdn,
- extent_arena_get(extent), chunk, extent,
- ptr, pageind);
- }
- } else {
- assert(config_cache_oblivious || ((uintptr_t)ptr &
- PAGE_MASK) == 0);
-
- if (likely(tcache != NULL) && size <= tcache_maxclass) {
- tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
+ tcache_dalloc_huge(tsdn_tsd(tsdn), tcache, ptr,
size, slow_path);
- } else {
- arena_dalloc_large(tsdn,
- extent_arena_get(extent), chunk, extent,
- ptr);
}
- }
- } else
- huge_dalloc(tsdn, extent);
+ } else
+ huge_dalloc(tsdn, extent);
+ }
}
# endif /* JEMALLOC_ARENA_INLINE_B */
#endif
diff --git a/include/jemalloc/internal/chunk.h b/include/jemalloc/internal/chunk.h
index 624073d..6f50302 100644
--- a/include/jemalloc/internal/chunk.h
+++ b/include/jemalloc/internal/chunk.h
@@ -61,7 +61,8 @@ bool chunk_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, extent_t *extent, size_t offset, size_t length);
extent_t *chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, extent_t *extent, size_t size_a, size_t size_b);
+ chunk_hooks_t *chunk_hooks, extent_t *extent, size_t size_a, size_t usize_a,
+ size_t size_b, size_t usize_b);
bool chunk_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, extent_t *a, extent_t *b);
bool chunk_boot(void);
diff --git a/include/jemalloc/internal/ctl.h b/include/jemalloc/internal/ctl.h
index af0f6d7..00deeb8 100644
--- a/include/jemalloc/internal/ctl.h
+++ b/include/jemalloc/internal/ctl.h
@@ -51,8 +51,7 @@ struct ctl_arena_stats_s {
uint64_t nrequests_small;
malloc_bin_stats_t bstats[NBINS];
- malloc_large_stats_t *lstats; /* nlclasses elements. */
- malloc_huge_stats_t *hstats; /* nhclasses elements. */
+ malloc_huge_stats_t hstats[NSIZES - NBINS];
};
struct ctl_stats_s {
diff --git a/include/jemalloc/internal/extent.h b/include/jemalloc/internal/extent.h
index 4023f82..4e1e97e 100644
--- a/include/jemalloc/internal/extent.h
+++ b/include/jemalloc/internal/extent.h
@@ -15,9 +15,15 @@ struct extent_s {
/* Pointer to the extent that this structure is responsible for. */
void *e_addr;
- /* Total region size. */
+ /* Extent size. */
size_t e_size;
+ /*
+ * Usable size, typically smaller than extent size due to large_pad or
+ * promotion of sampled small regions.
+ */
+ size_t e_usize;
+
/* True if extent is active (in use). */
bool e_active;
@@ -106,6 +112,7 @@ void extent_arena_set(extent_t *extent, arena_t *arena);
void extent_addr_set(extent_t *extent, void *addr);
void extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment);
void extent_size_set(extent_t *extent, size_t size);
+void extent_usize_set(extent_t *extent, size_t usize);
void extent_active_set(extent_t *extent, bool active);
void extent_dirty_set(extent_t *extent, bool dirty);
void extent_zeroed_set(extent_t *extent, bool zeroed);
@@ -113,8 +120,8 @@ void extent_committed_set(extent_t *extent, bool committed);
void extent_slab_set(extent_t *extent, bool slab);
void extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx);
void extent_init(extent_t *extent, arena_t *arena, void *addr,
- size_t size, bool active, bool dirty, bool zeroed, bool committed,
- bool slab);
+ size_t size, size_t usize, bool active, bool dirty, bool zeroed,
+ bool committed, bool slab);
void extent_dirty_insert(extent_t *extent,
arena_runs_dirty_link_t *runs_dirty, extent_t *chunks_dirty);
void extent_dirty_remove(extent_t *extent);
@@ -158,7 +165,7 @@ extent_usize_get(const extent_t *extent)
{
assert(!extent->e_slab);
- return (extent->e_size - large_pad);
+ return (extent->e_usize);
}
JEMALLOC_INLINE void *
@@ -172,14 +179,15 @@ JEMALLOC_INLINE void *
extent_last_get(const extent_t *extent)
{
- return ((void *)(uintptr_t)extent->e_addr + extent->e_size - PAGE);
+ return ((void *)(uintptr_t)extent->e_addr + extent_size_get(extent) -
+ PAGE);
}
JEMALLOC_INLINE void *
extent_past_get(const extent_t *extent)
{
- return ((void *)(uintptr_t)extent->e_addr + extent->e_size);
+ return ((void *)(uintptr_t)extent->e_addr + extent_size_get(extent));
}
JEMALLOC_INLINE bool
@@ -258,9 +266,12 @@ extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment)
uint64_t r =
prng_lg_range(&extent_arena_get(extent)->offset_state,
lg_range, true);
- uintptr_t random_offset = ((uintptr_t)r) << lg_range;
+ uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
+ lg_range);
extent->e_addr = (void *)((uintptr_t)extent->e_addr +
random_offset);
+ assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) ==
+ extent->e_addr);
}
}
@@ -272,6 +283,13 @@ extent_size_set(extent_t *extent, size_t size)
}
JEMALLOC_INLINE void
+extent_usize_set(extent_t *extent, size_t usize)
+{
+
+ extent->e_usize = usize;
+}
+
+JEMALLOC_INLINE void
extent_active_set(extent_t *extent, bool active)
{
@@ -315,7 +333,8 @@ extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx)
JEMALLOC_INLINE void
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
- bool active, bool dirty, bool zeroed, bool committed, bool slab)
+ size_t usize, bool active, bool dirty, bool zeroed, bool committed,
+ bool slab)
{
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
@@ -323,6 +342,7 @@ extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
extent_arena_set(extent, arena);
extent_addr_set(extent, addr);
extent_size_set(extent, size);
+ extent_usize_set(extent, usize);
extent_active_set(extent, active);
extent_dirty_set(extent, dirty);
extent_zeroed_set(extent, zeroed);
diff --git a/include/jemalloc/internal/huge.h b/include/jemalloc/internal/huge.h
index bdc8f84..836f1b5 100644
--- a/include/jemalloc/internal/huge.h
+++ b/include/jemalloc/internal/huge.h
@@ -17,9 +17,12 @@ bool huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
void *huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
size_t usize, size_t alignment, bool zero, tcache_t *tcache);
#ifdef JEMALLOC_JET
-typedef void (huge_dalloc_junk_t)(tsdn_t *, void *, size_t);
+typedef void (huge_dalloc_junk_t)(void *, size_t);
extern huge_dalloc_junk_t *huge_dalloc_junk;
+#else
+void huge_dalloc_junk(void *ptr, size_t usize);
#endif
+void huge_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent);
void huge_dalloc(tsdn_t *tsdn, extent_t *extent);
size_t huge_salloc(tsdn_t *tsdn, const extent_t *extent);
prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent);
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index ef4e052..f4d26be 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -797,33 +797,14 @@ sa2u(size_t size, size_t alignment)
return (usize);
}
- /*
- * We can't achieve subpage alignment, so round up alignment to the
- * minimum that can actually be supported.
- */
- alignment = PAGE_CEILING(alignment);
-
- /* Try for a large size class. */
- if (likely(size <= large_maxclass) && likely(alignment == PAGE)) {
- /* Make sure result is a large size class. */
- usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size);
-
- /*
- * Calculate the size of the over-size run that arena_palloc()
- * would need to allocate in order to guarantee the alignment.
- */
- if (usize + large_pad + alignment <= arena_maxrun)
- return (usize);
- }
-
/* Huge size class. Beware of overflow. */
if (unlikely(alignment > HUGE_MAXCLASS))
return (0);
- /* Make sure result is a huge size class. */
- if (size <= chunksize)
- usize = chunksize;
+ /* Make sure result is a large size class. */
+ if (size <= LARGE_MINCLASS)
+ usize = LARGE_MINCLASS;
else {
usize = s2u(size);
if (usize < size) {
@@ -836,7 +817,7 @@ sa2u(size_t size, size_t alignment)
* Calculate the multi-page mapping that huge_palloc() would need in
* order to guarantee the alignment.
*/
- if (usize + alignment < usize) {
+ if (usize + large_pad + PAGE_CEILING(alignment) < usize) {
/* size_t overflow. */
return (0);
}
@@ -960,8 +941,7 @@ iealloc(tsdn_t *tsdn, const void *ptr)
#ifndef JEMALLOC_ENABLE_INLINE
arena_t *iaalloc(tsdn_t *tsdn, const void *ptr);
-size_t isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr,
- bool demote);
+size_t isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr);
void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path);
void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero,
@@ -971,7 +951,7 @@ void *ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena);
void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
-size_t ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote);
+size_t ivsalloc(tsdn_t *tsdn, const void *ptr);
void idalloctm(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
bool is_metadata, bool slow_path);
void idalloc(tsd_t *tsd, extent_t *extent, void *ptr);
@@ -1003,17 +983,15 @@ iaalloc(tsdn_t *tsdn, const void *ptr)
* tsdn_t *tsdn = [...]
* void *ptr = [...]
* extent_t *extent = iealloc(tsdn, ptr);
- * size_t sz = isalloc(tsdn, extent, ptr, config_prof);
+ * size_t sz = isalloc(tsdn, extent, ptr);
*/
JEMALLOC_ALWAYS_INLINE size_t
-isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr, bool demote)
+isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
{
assert(ptr != NULL);
- /* Demotion only makes sense if config_prof is true. */
- assert(config_prof || !demote);
- return (arena_salloc(tsdn, extent, ptr, demote));
+ return (arena_salloc(tsdn, extent, ptr));
}
JEMALLOC_ALWAYS_INLINE void *
@@ -1029,7 +1007,7 @@ iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
if (config_stats && is_metadata && likely(ret != NULL)) {
arena_metadata_allocated_add(iaalloc(tsdn, ret), isalloc(tsdn,
- iealloc(tsdn, ret), ret, config_prof));
+ iealloc(tsdn, ret), ret));
}
return (ret);
}
@@ -1057,7 +1035,7 @@ ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
if (config_stats && is_metadata && likely(ret != NULL)) {
arena_metadata_allocated_add(iaalloc(tsdn, ret), isalloc(tsdn,
- iealloc(tsdn, ret), ret, config_prof));
+ iealloc(tsdn, ret), ret));
}
return (ret);
}
@@ -1079,7 +1057,7 @@ ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
}
JEMALLOC_ALWAYS_INLINE size_t
-ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
+ivsalloc(tsdn_t *tsdn, const void *ptr)
{
extent_t *extent;
@@ -1091,7 +1069,7 @@ ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
/* Only arena chunks should be looked up via interior pointers. */
assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
- return (isalloc(tsdn, extent, ptr, demote));
+ return (isalloc(tsdn, extent, ptr));
}
JEMALLOC_ALWAYS_INLINE void
@@ -1104,7 +1082,7 @@ idalloctm(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
assert(!is_metadata || iaalloc(tsdn, ptr)->ind < narenas_auto);
if (config_stats && is_metadata) {
arena_metadata_allocated_sub(iaalloc(tsdn, ptr), isalloc(tsdn,
- extent, ptr, config_prof));
+ extent, ptr));
}
arena_dalloc(tsdn, extent, ptr, tcache, slow_path);
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index 75a1dac..5f94d2c 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -23,10 +23,8 @@ arena_cleanup
arena_dalloc
arena_dalloc_bin
arena_dalloc_bin_junked_locked
-arena_dalloc_junk_large
arena_dalloc_junk_small
-arena_dalloc_large
-arena_dalloc_large_junked_locked
+arena_dalloc_promoted
arena_dalloc_small
arena_decay_tick
arena_decay_ticks
@@ -45,7 +43,6 @@ arena_lg_dirty_mult_get
arena_lg_dirty_mult_set
arena_malloc
arena_malloc_hard
-arena_malloc_large
arena_mapbits_allocated_get
arena_mapbits_binind_get
arena_mapbits_decommitted_get
@@ -92,7 +89,7 @@ arena_prefork3
arena_prof_accum
arena_prof_accum_impl
arena_prof_accum_locked
-arena_prof_promoted
+arena_prof_promote
arena_prof_tctx_get
arena_prof_tctx_reset
arena_prof_tctx_set
@@ -254,6 +251,7 @@ hash_x86_128
hash_x86_32
huge_dalloc
huge_dalloc_junk
+huge_dalloc_junked_locked
huge_malloc
huge_palloc
huge_prof_tctx_get
@@ -287,7 +285,6 @@ ixalloc
jemalloc_postfork_child
jemalloc_postfork_parent
jemalloc_prefork
-large_maxclass
lg_floor
lg_prof_sample
malloc_cprintf
@@ -320,8 +317,6 @@ narenas_tdata_cleanup
narenas_total_get
ncpus
nhbins
-nhclasses
-nlclasses
nstime_add
nstime_compare
nstime_copy
diff --git a/include/jemalloc/internal/prof.h b/include/jemalloc/internal/prof.h
index 81f02d1..7da20ad 100644
--- a/include/jemalloc/internal/prof.h
+++ b/include/jemalloc/internal/prof.h
@@ -489,7 +489,7 @@ prof_malloc(tsdn_t *tsdn, extent_t *extent, const void *ptr, size_t usize,
cassert(config_prof);
assert(ptr != NULL);
- assert(usize == isalloc(tsdn, extent, ptr, true));
+ assert(usize == isalloc(tsdn, extent, ptr));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_malloc_sample_object(tsdn, extent, ptr, usize, tctx);
@@ -510,7 +510,7 @@ prof_realloc(tsd_t *tsd, extent_t *extent, const void *ptr, size_t usize,
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
if (prof_active && !updated && ptr != NULL) {
- assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr, true));
+ assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr));
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
/*
* Don't sample. The usize passed to prof_alloc_prep()
@@ -544,7 +544,7 @@ prof_free(tsd_t *tsd, const extent_t *extent, const void *ptr, size_t usize)
prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), extent, ptr);
cassert(config_prof);
- assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr, true));
+ assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_free_sampled_object(tsd, usize, tctx);
diff --git a/include/jemalloc/internal/stats.h b/include/jemalloc/internal/stats.h
index b621817..c9a716d 100644
--- a/include/jemalloc/internal/stats.h
+++ b/include/jemalloc/internal/stats.h
@@ -3,7 +3,6 @@
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
-typedef struct malloc_large_stats_s malloc_large_stats_t;
typedef struct malloc_huge_stats_s malloc_huge_stats_t;
typedef struct arena_stats_s arena_stats_t;
typedef struct chunk_stats_s chunk_stats_t;
@@ -62,12 +61,10 @@ struct malloc_bin_stats_s {
size_t curruns;
};
-struct malloc_large_stats_s {
+struct malloc_huge_stats_s {
/*
* Total number of allocation/deallocation requests served directly by
- * the arena. Note that tcache may allocate an object, then recycle it
- * many times, resulting many increments to nrequests, but only one
- * each to nmalloc and ndalloc.
+ * the arena.
*/
uint64_t nmalloc;
uint64_t ndalloc;
@@ -79,21 +76,6 @@ struct malloc_large_stats_s {
*/
uint64_t nrequests;
- /*
- * Current number of runs of this size class, including runs currently
- * cached by tcache.
- */
- size_t curruns;
-};
-
-struct malloc_huge_stats_s {
- /*
- * Total number of allocation/deallocation requests served directly by
- * the arena.
- */
- uint64_t nmalloc;
- uint64_t ndalloc;
-
/* Current number of (multi-)chunk allocations of this size class. */
size_t curhchunks;
};
@@ -126,21 +108,13 @@ struct arena_stats_s {
size_t metadata_mapped;
size_t metadata_allocated; /* Protected via atomic_*_z(). */
- /* Per-size-category statistics. */
- size_t allocated_large;
- uint64_t nmalloc_large;
- uint64_t ndalloc_large;
- uint64_t nrequests_large;
-
size_t allocated_huge;
uint64_t nmalloc_huge;
uint64_t ndalloc_huge;
-
- /* One element for each large size class. */
- malloc_large_stats_t *lstats;
+ uint64_t nrequests_huge;
/* One element for each huge size class. */
- malloc_huge_stats_t *hstats;
+ malloc_huge_stats_t hstats[NSIZES - NBINS];
};
#endif /* JEMALLOC_H_STRUCTS */
diff --git a/include/jemalloc/internal/tcache.h b/include/jemalloc/internal/tcache.h
index ee63a65..186adf2 100644
--- a/include/jemalloc/internal/tcache.h
+++ b/include/jemalloc/internal/tcache.h
@@ -30,8 +30,8 @@ typedef struct tcaches_s tcaches_t;
*/
#define TCACHE_NSLOTS_SMALL_MAX 200
-/* Number of cache slots for large size classes. */
-#define TCACHE_NSLOTS_LARGE 20
+/* Number of cache slots for huge size classes. */
+#define TCACHE_NSLOTS_HUGE 20
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
#define LG_TCACHE_MAXCLASS_DEFAULT 15
@@ -113,7 +113,7 @@ extern tcache_bin_info_t *tcache_bin_info;
/*
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
- * large-object bins.
+ * huge-object bins.
*/
extern unsigned nhbins;
@@ -136,7 +136,7 @@ void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
szind_t binind, unsigned rem);
-void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
+void tcache_bin_flush_huge(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
unsigned rem, tcache_t *tcache);
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
arena_t *oldarena, arena_t *newarena);
@@ -163,11 +163,11 @@ void tcache_enabled_set(bool enabled);
void *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success);
void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
size_t size, szind_t ind, bool zero, bool slow_path);
-void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
+void *tcache_alloc_huge(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
size_t size, szind_t ind, bool zero, bool slow_path);
void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
szind_t binind, bool slow_path);
-void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
+void tcache_dalloc_huge(tsd_t *tsd, tcache_t *tcache, void *ptr,
size_t size, bool slow_path);
tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
#endif
@@ -336,7 +336,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
}
JEMALLOC_ALWAYS_INLINE void *
-tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
+tcache_alloc_huge(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
szind_t binind, bool zero, bool slow_path)
{
void *ret;
@@ -349,14 +349,14 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
assert(tcache_success == (ret != NULL));
if (unlikely(!tcache_success)) {
/*
- * Only allocate one large object at a time, because it's quite
+ * Only allocate one huge object at a time, because it's quite
* expensive to create one and not use it.
*/
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL))
return (NULL);
- ret = arena_malloc_large(tsd_tsdn(tsd), arena, binind, zero);
+ ret = huge_malloc(tsd_tsdn(tsd), arena, s2u(size), zero);
if (ret == NULL)
return (NULL);
} else {
@@ -369,14 +369,6 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
assert(usize <= tcache_maxclass);
}
- if (config_prof && usize == LARGE_MINCLASS) {
- arena_chunk_t *chunk =(arena_chunk_t *)extent_addr_get(
- iealloc(tsd_tsdn(tsd), ret));
- size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
- LG_PAGE);
- arena_mapbits_large_binind_set(chunk, pageind,
- BININD_INVALID);
- }
if (likely(!zero)) {
if (slow_path && config_fill) {
if (unlikely(opt_junk_alloc)) {
@@ -424,26 +416,25 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
}
JEMALLOC_ALWAYS_INLINE void
-tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
+tcache_dalloc_huge(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
bool slow_path)
{
szind_t binind;
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
- assert((size & PAGE_MASK) == 0);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
binind = size2index(size);
if (slow_path && config_fill && unlikely(opt_junk_free))
- arena_dalloc_junk_large(ptr, size);
+ huge_dalloc_junk(ptr, size);
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
- tcache_bin_flush_large(tsd, tbin, binind,
+ tcache_bin_flush_huge(tsd, tbin, binind,
(tbin_info->ncached_max >> 1), tcache);
}
assert(tbin->ncached < tbin_info->ncached_max);
diff --git a/src/arena.c b/src/arena.c
index 4ce5557..d9882a4 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -33,9 +33,6 @@ const arena_bin_info_t arena_bin_info[NBINS] = {
size_t map_bias;
size_t map_misc_offset;
size_t arena_maxrun; /* Max run size for arenas. */
-size_t large_maxclass; /* Max large size class. */
-unsigned nlclasses; /* Number of large size classes. */
-unsigned nhclasses; /* Number of huge size classes. */
/******************************************************************************/
/*
@@ -447,6 +444,7 @@ static void
arena_nactive_sub(arena_t *arena, size_t sub_pages)
{
+ assert(arena->nactive >= sub_pages);
if (config_stats) {
size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
@@ -574,15 +572,6 @@ arena_run_split_large(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
}
static bool
-arena_run_init_large(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
- arena_run_t *run, size_t size, bool zero)
-{
-
- return (arena_run_split_large_helper(tsdn, arena, extent, run, size,
- false, zero));
-}
-
-static bool
arena_run_split_small(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
arena_run_t *run, size_t size, szind_t binind)
{
@@ -835,58 +824,64 @@ arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
static void
arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
{
- szind_t index = size2index(usize) - nlclasses - NBINS;
+ szind_t index = size2index(usize);
+ szind_t hindex = (index >= NBINS) ? index - NBINS : 0;
cassert(config_stats);
arena->stats.nmalloc_huge++;
arena->stats.allocated_huge += usize;
- arena->stats.hstats[index].nmalloc++;
- arena->stats.hstats[index].curhchunks++;
+ arena->stats.hstats[hindex].nmalloc++;
+ arena->stats.hstats[hindex].nrequests++;
+ arena->stats.hstats[hindex].curhchunks++;
}
static void
arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
{
- szind_t index = size2index(usize) - nlclasses - NBINS;
+ szind_t index = size2index(usize);
+ szind_t hindex = (index >= NBINS) ? index - NBINS : 0;
cassert(config_stats);
arena->stats.nmalloc_huge--;
arena->stats.allocated_huge -= usize;
- arena->stats.hstats[index].nmalloc--;
- arena->stats.hstats[index].curhchunks--;
+ arena->stats.hstats[hindex].nmalloc--;
+ arena->stats.hstats[hindex].nrequests--;
+ arena->stats.hstats[hindex].curhchunks--;
}
static void
arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
{
- szind_t index = size2index(usize) - nlclasses - NBINS;
+ szind_t index = size2index(usize);
+ szind_t hindex = (index >= NBINS) ? index - NBINS : 0;
cassert(config_stats);
arena->stats.ndalloc_huge++;
arena->stats.allocated_huge -= usize;
- arena->stats.hstats[index].ndalloc++;
- arena->stats.hstats[index].curhchunks--;
+ arena->stats.hstats[hindex].ndalloc++;
+ arena->stats.hstats[hindex].curhchunks--;
}
static void
arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
{
- szind_t index = size2index(usize) - nlclasses - NBINS;
+ szind_t index = size2index(usize);
+ szind_t hindex = (index >= NBINS) ? index - NBINS : 0;
cassert(config_stats);
arena->stats.ndalloc_huge++;
- arena->stats.hstats[index].ndalloc--;
+ arena->stats.hstats[hindex].ndalloc--;
}
static void
-arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
+arena_huge_ralloc_stats_update(arena_t *arena, size_t oldusize, size_t usize)
{
- arena_huge_dalloc_stats_update(arena, oldsize);
+ arena_huge_dalloc_stats_update(arena, oldusize);
arena_huge_malloc_stats_update(arena, usize);
}
@@ -906,7 +901,7 @@ arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
arena_huge_malloc_stats_update_undo(arena, usize);
arena->stats.mapped -= usize;
}
- arena_nactive_sub(arena, usize >> LG_PAGE);
+ arena_nactive_sub(arena, (usize + large_pad) >> LG_PAGE);
malloc_mutex_unlock(tsdn, &arena->lock);
}
@@ -927,7 +922,7 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
arena_huge_malloc_stats_update(arena, usize);
arena->stats.mapped += usize;
}
- arena_nactive_add(arena, usize >> LG_PAGE);
+ arena_nactive_add(arena, (usize + large_pad) >> LG_PAGE);
extent = arena_chunk_cache_alloc_locked(tsdn, arena, &chunk_hooks, NULL,
usize, large_pad, alignment, zero, false);
@@ -941,34 +936,35 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
}
void
-arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
+arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+ bool locked)
{
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
- malloc_mutex_lock(tsdn, &arena->lock);
+ if (!locked)
+ malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
- arena_huge_dalloc_stats_update(arena, extent_size_get(extent));
+ arena_huge_dalloc_stats_update(arena, extent_usize_get(extent));
arena->stats.mapped -= extent_size_get(extent);
}
arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
arena_chunk_cache_dalloc_locked(tsdn, arena, &chunk_hooks, extent);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ if (!locked)
+ malloc_mutex_unlock(tsdn, &arena->lock);
}
void
arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
- size_t oldsize)
+ size_t oldusize)
{
- size_t usize = extent_size_get(extent);
- size_t udiff = oldsize - usize;
- size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
+ size_t usize = extent_usize_get(extent);
+ size_t udiff = oldusize - usize;
malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
- arena_huge_ralloc_stats_update(arena, oldsize, usize);
- if (cdiff != 0)
- arena->stats.mapped -= cdiff;
+ arena_huge_ralloc_stats_update(arena, oldusize, usize);
+ arena->stats.mapped -= udiff;
}
arena_nactive_sub(arena, udiff >> LG_PAGE);
malloc_mutex_unlock(tsdn, &arena->lock);
@@ -976,16 +972,15 @@ arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
void
arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
- size_t oldsize)
+ size_t oldusize)
{
- size_t usize = extent_size_get(extent);
- size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
- size_t udiff = usize - oldsize;
+ size_t usize = extent_usize_get(extent);
+ size_t udiff = usize - oldusize;
malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
- arena_huge_ralloc_stats_update(arena, oldsize, usize);
- arena->stats.mapped += cdiff;
+ arena_huge_ralloc_stats_update(arena, oldusize, usize);
+ arena->stats.mapped += udiff;
}
arena_nactive_add(arena, udiff >> LG_PAGE);
malloc_mutex_unlock(tsdn, &arena->lock);
@@ -1003,7 +998,7 @@ arena_run_first_best_fit(arena_t *arena, size_t size)
pind = psz2ind(run_quantize_ceil(size));
- for (i = pind; pind2sz(i) <= large_maxclass; i++) {
+ for (i = pind; pind2sz(i) <= arena_maxrun; i++) {
arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
&arena->runs_avail[i]);
if (miscelm != NULL)
@@ -1014,54 +1009,6 @@ arena_run_first_best_fit(arena_t *arena, size_t size)
}
static arena_run_t *
-arena_run_alloc_large_helper(tsdn_t *tsdn, arena_t *arena, size_t size,
- bool zero)
-{
- arena_run_t *run = arena_run_first_best_fit(arena, s2u(size));
- if (run != NULL) {
- if (arena_run_split_large(tsdn, arena, iealloc(tsdn, run), run,
- size, zero))
- run = NULL;
- }
- return (run);
-}
-
-static arena_run_t *
-arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
-{
- arena_run_t *run;
- extent_t *extent;
-
- assert(size <= arena_maxrun);
- assert(size == PAGE_CEILING(size));
-
- /* Search the arena's chunks for the lowest best fit. */
- run = arena_run_alloc_large_helper(tsdn, arena, size, zero);
- if (run != NULL)
- return (run);
-
- /*
- * No usable runs. Create a new chunk from which to allocate the run.
- */
- extent = arena_chunk_alloc(tsdn, arena);
- if (extent != NULL) {
- run = &arena_miscelm_get_mutable((arena_chunk_t *)
- extent_base_get(extent), map_bias)->run;
- if (arena_run_split_large(tsdn, arena, iealloc(tsdn, run), run,
- size, zero))
- run = NULL;
- return (run);
- }
-
- /*
- * arena_chunk_alloc() failed, but another thread may have made
- * sufficient memory available while this one dropped arena->lock in
- * arena_chunk_alloc(), so search one more time.
- */
- return (arena_run_alloc_large_helper(tsdn, arena, size, zero));
-}
-
-static arena_run_t *
arena_run_alloc_small_helper(tsdn_t *tsdn, arena_t *arena, size_t size,
szind_t binind)
{
@@ -1700,8 +1647,8 @@ arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
qr_new(&purge_runs_sentinel, rd_link);
- extent_init(&purge_chunks_sentinel, arena, NULL, 0, false, false, false,
- false, false);
+ extent_init(&purge_chunks_sentinel, arena, NULL, 0, 0, false, false,
+ false, false, false);
npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
&purge_runs_sentinel, &purge_chunks_sentinel);
@@ -1732,47 +1679,6 @@ arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
malloc_mutex_unlock(tsdn, &arena->lock);
}
-static void
-arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, extent_t *extent)
-{
- arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
- size_t pageind, npages;
-
- cassert(config_prof);
- assert(opt_prof);
-
- /*
- * Iterate over the allocated runs and remove profiled allocations from
- * the sample set.
- */
- for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
- if (arena_mapbits_allocated_get(chunk, pageind) != 0) {
- if (arena_mapbits_large_get(chunk, pageind) != 0) {
- void *ptr = (void *)((uintptr_t)chunk + (pageind
- << LG_PAGE));
- size_t usize = isalloc(tsd_tsdn(tsd), extent,
- ptr, config_prof);
-
- prof_free(tsd, extent, ptr, usize);
- npages = arena_mapbits_large_size_get(chunk,
- pageind) >> LG_PAGE;
- } else {
- /* Skip small run. */
- size_t binind = arena_mapbits_binind_get(chunk,
- pageind);
- const arena_bin_info_t *bin_info =
- &arena_bin_info[binind];
- npages = bin_info->run_size >> LG_PAGE;
- }
- } else {
- /* Skip unallocated run. */
- npages = arena_mapbits_unallocated_size_get(chunk,
- pageind) >> LG_PAGE;
- }
- assert(pageind + npages <= chunk_npages);
- }
-}
-
void
arena_reset(tsd_t *tsd, arena_t *arena)
{
@@ -1793,19 +1699,6 @@ arena_reset(tsd_t *tsd, arena_t *arena)
* stats refreshes would impose an inconvenient burden.
*/
- /* Remove large allocations from prof sample set. */
- if (config_prof && opt_prof) {
- ql_foreach(extent, &arena->achunks, ql_link) {
- arena_achunk_prof_reset(tsd, arena, extent);
- }
- }
-
- /* Reset curruns for large size classes. */
- if (config_stats) {
- for (i = 0; i < nlclasses; i++)
- arena->stats.lstats[i].curruns = 0;
- }
-
/* Huge allocations. */
malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
for (extent = ql_last(&arena->huge, ql_link); extent != NULL; extent =
@@ -1814,10 +1707,8 @@ arena_reset(tsd_t *tsd, arena_t *arena)
size_t usize;
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
- if (config_stats || (config_prof && opt_prof)) {
- usize = isalloc(tsd_tsdn(tsd), extent, ptr,
- config_prof);
- }
+ if (config_stats || (config_prof && opt_prof))
+ usize = isalloc(tsd_tsdn(tsd), extent, ptr);
/* Remove huge allocation from prof sample set. */
if (config_prof && opt_prof)
prof_free(tsd, extent, ptr, usize);
@@ -2070,93 +1961,6 @@ arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
}
static void
-arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- extent_t *extent, arena_run_t *run, size_t oldsize, size_t newsize)
-{
- arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(extent, run);
- size_t pageind = arena_miscelm_to_pageind(extent, miscelm);
- size_t head_npages = (oldsize - newsize) >> LG_PAGE;
- size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
- size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
- size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
- CHUNK_MAP_UNZEROED : 0;
-
- assert(oldsize > newsize);
-
- /*
- * Update the chunk map so that arena_run_dalloc() can treat the
- * leading run as separately allocated. Set the last element of each
- * run first, in case of single-page runs.
- */
- assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
- arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
- (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
- pageind+head_npages-1)));
- arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
- (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
-
- if (config_debug) {
- UNUSED size_t tail_npages = newsize >> LG_PAGE;
- assert(arena_mapbits_large_size_get(chunk,
- pageind+head_npages+tail_npages-1) == 0);
- assert(arena_mapbits_dirty_get(chunk,
- pageind+head_npages+tail_npages-1) == flag_dirty);
- }
- arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
- flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
- pageind+head_npages)));
-
- arena_run_dalloc(tsdn, arena, extent, run, false, false,
- (flag_decommitted != 0));
-}
-
-static void
-arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- extent_t *extent, arena_run_t *run, size_t oldsize, size_t newsize,
- bool dirty)
-{
- arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(extent, run);
- size_t pageind = arena_miscelm_to_pageind(extent, miscelm);
- size_t head_npages = newsize >> LG_PAGE;
- size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
- size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
- size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
- CHUNK_MAP_UNZEROED : 0;
- arena_chunk_map_misc_t *tail_miscelm;
- arena_run_t *tail_run;
-
- assert(oldsize > newsize);
-
- /*
- * Update the chunk map so that arena_run_dalloc() can treat the
- * trailing run as separately allocated. Set the last element of each
- * run first, in case of single-page runs.
- */
- assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
- arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
- (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
- pageind+head_npages-1)));
- arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
- (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
-
- if (config_debug) {
- UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
- assert(arena_mapbits_large_size_get(chunk,
- pageind+head_npages+tail_npages-1) == 0);
- assert(arena_mapbits_dirty_get(chunk,
- pageind+head_npages+tail_npages-1) == flag_dirty);
- }
- arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
- flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
- pageind+head_npages)));
-
- tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
- tail_run = &tail_miscelm->run;
- arena_run_dalloc(tsdn, arena, extent, tail_run, dirty, false,
- (flag_decommitted != 0));
-}
-
-static void
arena_bin_runs_insert(arena_bin_t *bin, extent_t *extent, arena_run_t *run)
{
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(extent, run);
@@ -2390,7 +2194,7 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
bin->stats.curregs++;
}
malloc_mutex_unlock(tsdn, &bin->lock);
- if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize))
+ if (config_prof && arena_prof_accum(tsdn, arena, usize))
prof_idump(tsdn);
if (!zero) {
@@ -2414,71 +2218,6 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
}
void *
-arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
-{
- void *ret;
- size_t usize;
- uintptr_t random_offset;
- arena_run_t *run;
- extent_t *extent;
- arena_chunk_map_misc_t *miscelm;
- UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
-
- /* Large allocation. */
- usize = index2size(binind);
- if (config_cache_oblivious) {
- uint64_t r;
-
- /*
- * Compute a uniformly distributed offset within the first page
- * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
- * for 4 KiB pages and 64-byte cachelines.
- */
- r = prng_lg_range(&arena->offset_state, LG_PAGE - LG_CACHELINE,
- true);
- random_offset = ((uintptr_t)r) << LG_CACHELINE;
- } else
- random_offset = 0;
- malloc_mutex_lock(tsdn, &arena->lock);
- run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero);
- if (run == NULL) {
- malloc_mutex_unlock(tsdn, &arena->lock);
- return (NULL);
- }
- extent = iealloc(tsdn, run);
- miscelm = arena_run_to_miscelm(extent, run);
- ret = (void *)((uintptr_t)arena_miscelm_to_rpages(extent, miscelm) +
- random_offset);
- if (config_stats) {
- szind_t index = binind - NBINS;
-
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += usize;
- arena->stats.lstats[index].nmalloc++;
- arena->stats.lstats[index].nrequests++;
- arena->stats.lstats[index].curruns++;
- }
- if (config_prof)
- idump = arena_prof_accum_locked(arena, usize);
- malloc_mutex_unlock(tsdn, &arena->lock);
- if (config_prof && idump)
- prof_idump(tsdn);
-
- if (!zero) {
- if (config_fill) {
- if (unlikely(opt_junk_alloc))
- memset(ret, JEMALLOC_ALLOC_JUNK, usize);
- else if (unlikely(opt_zero))
- memset(ret, 0, usize);
- }
- }
-
- arena_decay_tick(tsdn, arena);
- return (ret);
-}
-
-void *
arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
bool zero)
{
@@ -2492,106 +2231,9 @@ arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
if (likely(size <= SMALL_MAXCLASS))
return (arena_malloc_small(tsdn, arena, ind, zero));
- if (likely(size <= large_maxclass))
- return (arena_malloc_large(tsdn, arena, ind, zero));
return (huge_malloc(tsdn, arena, index2size(ind), zero));
}
-/* Only handles large allocations that require more than page alignment. */
-static void *
-arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
- bool zero)
-{
- void *ret;
- size_t alloc_size, leadsize, trailsize;
- arena_run_t *run;
- extent_t *extent;
- arena_chunk_t *chunk;
- arena_chunk_map_misc_t *miscelm;
- void *rpages;
-
- assert(!tsdn_null(tsdn) || arena != NULL);
- assert(usize == PAGE_CEILING(usize));
-
- if (likely(!tsdn_null(tsdn)))
- arena = arena_choose(tsdn_tsd(tsdn), arena);
- if (unlikely(arena == NULL))
- return (NULL);
-
- alignment = PAGE_CEILING(alignment);
- alloc_size = usize + large_pad + alignment;
-
- malloc_mutex_lock(tsdn, &arena->lock);
- run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
- if (run == NULL) {
- malloc_mutex_unlock(tsdn, &arena->lock);
- return (NULL);
- }
- extent = iealloc(tsdn, run);
- chunk = (arena_chunk_t *)extent_base_get(extent);
- miscelm = arena_run_to_miscelm(extent, run);
- rpages = arena_miscelm_to_rpages(extent, miscelm);
-
- leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
- (uintptr_t)rpages;
- assert(alloc_size >= leadsize + usize);
- trailsize = alloc_size - leadsize - usize - large_pad;
- if (leadsize != 0) {
- arena_chunk_map_misc_t *head_miscelm = miscelm;
- arena_run_t *head_run = run;
- extent_t *head_extent = extent;
-
- miscelm = arena_miscelm_get_mutable(chunk,
- arena_miscelm_to_pageind(head_extent, head_miscelm) +
- (leadsize >> LG_PAGE));
- run = &miscelm->run;
- extent = iealloc(tsdn, run);
-
- arena_run_trim_head(tsdn, arena, chunk, head_extent, head_run,
- alloc_size, alloc_size - leadsize);
- }
- if (trailsize != 0) {
- arena_run_trim_tail(tsdn, arena, chunk, extent, run, usize +
- large_pad + trailsize, usize + large_pad, false);
- }
- if (arena_run_init_large(tsdn, arena, extent, run, usize + large_pad,
- zero)) {
- size_t run_ind = arena_miscelm_to_pageind(extent,
- arena_run_to_miscelm(extent, run));
- bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
- bool decommitted = (arena_mapbits_decommitted_get(chunk,
- run_ind) != 0);
-
- assert(decommitted); /* Cause of OOM. */
- arena_run_dalloc(tsdn, arena, extent, run, dirty, false,
- decommitted);
- malloc_mutex_unlock(tsdn, &arena->lock);
- return (NULL);
- }
- ret = arena_miscelm_to_rpages(extent, miscelm);
-
- if (config_stats) {
- szind_t index = size2index(usize) - NBINS;
-
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += usize;
- arena->stats.lstats[index].nmalloc++;
- arena->stats.lstats[index].nrequests++;
- arena->stats.lstats[index].curruns++;
- }
- malloc_mutex_unlock(tsdn, &arena->lock);
-
- if (config_fill && !zero) {
- if (unlikely(opt_junk_alloc))
- memset(ret, JEMALLOC_ALLOC_JUNK, usize);
- else if (unlikely(opt_zero))
- memset(ret, 0, usize);
- }
- arena_decay_tick(tsdn, arena);
- return (ret);
-}
-
void *
arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero, tcache_t *tcache)
@@ -2603,22 +2245,8 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
/* Small; alignment doesn't require special run placement. */
ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
tcache, true);
- } else if (usize <= large_maxclass && alignment <= PAGE) {
- /*
- * Large; alignment doesn't require special run placement.
- * However, the cached pointer may be at a random offset from
- * the base of the run, so do some bit manipulation to retrieve
- * the base.
- */
- ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
- tcache, true);
- if (config_cache_oblivious)
- ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
} else {
- if (likely(usize <= large_maxclass)) {
- ret = arena_palloc_large(tsdn, arena, usize, alignment,
- zero);
- } else if (likely(alignment <= CACHELINE))
+ if (likely(alignment <= CACHELINE))
ret = huge_malloc(tsdn, arena, usize, zero);
else
ret = huge_palloc(tsdn, arena, usize, alignment, zero);
@@ -2627,27 +2255,49 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
}
void
-arena_prof_promoted(tsdn_t *tsdn, const extent_t *extent, const void *ptr,
- size_t size)
+arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr,
+ size_t usize)
{
- arena_chunk_t *chunk;
- size_t pageind;
- szind_t binind;
cassert(config_prof);
assert(ptr != NULL);
- assert(isalloc(tsdn, extent, ptr, false) == LARGE_MINCLASS);
- assert(isalloc(tsdn, extent, ptr, true) == LARGE_MINCLASS);
- assert(size <= SMALL_MAXCLASS);
+ assert(isalloc(tsdn, extent, ptr) == LARGE_MINCLASS);
+ assert(usize <= SMALL_MAXCLASS);
- chunk = (arena_chunk_t *)extent_base_get(extent);
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- binind = size2index(size);
- assert(binind < NBINS);
- arena_mapbits_large_binind_set(chunk, pageind, binind);
+ extent_usize_set(extent, usize);
- assert(isalloc(tsdn, extent, ptr, false) == LARGE_MINCLASS);
- assert(isalloc(tsdn, extent, ptr, true) == size);
+ assert(isalloc(tsdn, extent, ptr) == usize);
+}
+
+static size_t
+arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr)
+{
+
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ extent_usize_set(extent, LARGE_MINCLASS);
+
+ assert(isalloc(tsdn, extent, ptr) == LARGE_MINCLASS);
+
+ return (LARGE_MINCLASS);
+}
+
+void
+arena_dalloc_promoted(tsdn_t *tsdn, extent_t *extent, void *ptr,
+ tcache_t *tcache, bool slow_path)
+{
+ size_t usize;
+
+ cassert(config_prof);
+ assert(opt_prof);
+
+ usize = arena_prof_demote(tsdn, extent, ptr);
+ if (usize <= tcache_maxclass) {
+ tcache_dalloc_huge(tsdn_tsd(tsdn), tcache, ptr, usize,
+ slow_path);
+ } else
+ huge_dalloc(tsdn, extent);
}
static void
@@ -2792,274 +2442,6 @@ arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
arena_decay_tick(tsdn, arena);
}
-#ifdef JEMALLOC_JET
-#undef arena_dalloc_junk_large
-#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large)
-#endif
-void
-arena_dalloc_junk_large(void *ptr, size_t usize)
-{
-
- if (config_fill && unlikely(opt_junk_free))
- memset(ptr, JEMALLOC_FREE_JUNK, usize);
-}
-#ifdef JEMALLOC_JET
-#undef arena_dalloc_junk_large
-#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
-arena_dalloc_junk_large_t *arena_dalloc_junk_large =
- JEMALLOC_N(n_arena_dalloc_junk_large);
-#endif
-
-static void
-arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk, extent_t *extent, void *ptr, bool junked)
-{
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
- pageind);
- arena_run_t *run = &miscelm->run;
-
- if (config_fill || config_stats) {
- size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
- large_pad;
-
- if (!junked)
- arena_dalloc_junk_large(ptr, usize);
- if (config_stats) {
- szind_t index = size2index(usize) - NBINS;
-
- arena->stats.ndalloc_large++;
- arena->stats.allocated_large -= usize;
- arena->stats.lstats[index].ndalloc++;
- arena->stats.lstats[index].curruns--;
- }
- }
-
- arena_run_dalloc(tsdn, arena, extent, run, true, false, false);
-}
-
-void
-arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk, extent_t *extent, void *ptr)
-{
-
- arena_dalloc_large_locked_impl(tsdn, arena, chunk, extent, ptr, true);
-}
-
-void
-arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- extent_t *extent, void *ptr)
-{
-
- malloc_mutex_lock(tsdn, &arena->lock);
- arena_dalloc_large_locked_impl(tsdn, arena, chunk, extent, ptr, false);
- malloc_mutex_unlock(tsdn, &arena->lock);
- arena_decay_tick(tsdn, arena);
-}
-
-static void
-arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- extent_t *extent, void *ptr, size_t oldsize, size_t size)
-{
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
- pageind);
- arena_run_t *run = &miscelm->run;
-
- assert(size < oldsize);
-
- /*
- * Shrink the run, and make trailing pages available for other
- * allocations.
- */
- malloc_mutex_lock(tsdn, &arena->lock);
- arena_run_trim_tail(tsdn, arena, chunk, extent, run, oldsize +
- large_pad, size + large_pad, true);
- if (config_stats) {
- szind_t oldindex = size2index(oldsize) - NBINS;
- szind_t index = size2index(size) - NBINS;
-
- arena->stats.ndalloc_large++;
- arena->stats.allocated_large -= oldsize;
- arena->stats.lstats[oldindex].ndalloc++;
- arena->stats.lstats[oldindex].curruns--;
-
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[index].nmalloc++;
- arena->stats.lstats[index].nrequests++;
- arena->stats.lstats[index].curruns++;
- }
- malloc_mutex_unlock(tsdn, &arena->lock);
-}
-
-static bool
-arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
-{
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- size_t npages = (oldsize + large_pad) >> LG_PAGE;
- size_t followsize;
-
- assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
- large_pad);
-
- /* Try to extend the run. */
- malloc_mutex_lock(tsdn, &arena->lock);
- if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
- pageind+npages) != 0)
- goto label_fail;
- followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
- if (oldsize + followsize >= usize_min) {
- /*
- * The next run is available and sufficiently large. Split the
- * following run, then merge the first part with the existing
- * allocation.
- */
- arena_run_t *run;
- size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
-
- usize = usize_max;
- while (oldsize + followsize < usize)
- usize = index2size(size2index(usize)-1);
- assert(usize >= usize_min);
- assert(usize >= oldsize);
- splitsize = usize - oldsize;
- if (splitsize == 0)
- goto label_fail;
-
- run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
- if (arena_run_split_large(tsdn, arena, iealloc(tsdn, run), run,
- splitsize, zero))
- goto label_fail;
-
- if (config_cache_oblivious && zero) {
- /*
- * Zero the trailing bytes of the original allocation's
- * last page, since they are in an indeterminate state.
- * There will always be trailing bytes, because ptr's
- * offset from the beginning of the run is a multiple of
- * CACHELINE in [0 .. PAGE).
- */
- void *zbase = (void *)((uintptr_t)ptr + oldsize);
- void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
- PAGE));
- size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
- assert(nzero > 0);
- memset(zbase, 0, nzero);
- }
-
- size = oldsize + splitsize;
- npages = (size + large_pad) >> LG_PAGE;
-
- /*
- * Mark the extended run as dirty if either portion of the run
- * was dirty before allocation. This is rather pedantic,
- * because there's not actually any sequence of events that
- * could cause the resulting run to be passed to
- * arena_run_dalloc() with the dirty argument set to false
- * (which is when dirty flag consistency would really matter).
- */
- flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
- arena_mapbits_dirty_get(chunk, pageind+npages-1);
- flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0;
- arena_mapbits_large_set(chunk, pageind, size + large_pad,
- flag_dirty | (flag_unzeroed_mask &
- arena_mapbits_unzeroed_get(chunk, pageind)));
- arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
- (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
- pageind+npages-1)));
-
- if (config_stats) {
- szind_t oldindex = size2index(oldsize) - NBINS;
- szind_t index = size2index(size) - NBINS;
-
- arena->stats.ndalloc_large++;
- arena->stats.allocated_large -= oldsize;
- arena->stats.lstats[oldindex].ndalloc++;
- arena->stats.lstats[oldindex].curruns--;
-
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[index].nmalloc++;
- arena->stats.lstats[index].nrequests++;
- arena->stats.lstats[index].curruns++;
- }
- malloc_mutex_unlock(tsdn, &arena->lock);
- return (false);
- }
-label_fail:
- malloc_mutex_unlock(tsdn, &arena->lock);
- return (true);
-}
-
-#ifdef JEMALLOC_JET
-#undef arena_ralloc_junk_large
-#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large)
-#endif
-static void
-arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
-{
-
- if (config_fill && unlikely(opt_junk_free)) {
- memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK,
- old_usize - usize);
- }
-}
-#ifdef JEMALLOC_JET
-#undef arena_ralloc_junk_large
-#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
-arena_ralloc_junk_large_t *arena_ralloc_junk_large =
- JEMALLOC_N(n_arena_ralloc_junk_large);
-#endif
-
-/*
- * Try to resize a large allocation, in order to avoid copying. This will
- * always fail if growing an object, and the following run is already in use.
- */
-static bool
-arena_ralloc_large(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
- size_t usize_min, size_t usize_max, bool zero)
-{
- arena_chunk_t *chunk;
- arena_t *arena;
-
- if (oldsize == usize_max) {
- /* Current size class is compatible and maximal. */
- return (false);
- }
-
- chunk = (arena_chunk_t *)extent_base_get(extent);
- arena = extent_arena_get(extent);
-
- if (oldsize < usize_max) {
- bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
- oldsize, usize_min, usize_max, zero);
- if (config_fill && !ret && !zero) {
- if (unlikely(opt_junk_alloc)) {
- memset((void *)((uintptr_t)ptr + oldsize),
- JEMALLOC_ALLOC_JUNK,
- isalloc(tsdn, extent, ptr, config_prof) -
- oldsize);
- } else if (unlikely(opt_zero)) {
- memset((void *)((uintptr_t)ptr + oldsize), 0,
- isalloc(tsdn, extent, ptr, config_prof) -
- oldsize);
- }
- }
- return (ret);
- }
-
- assert(oldsize > usize_max);
- /* Fill before shrinking in order avoid a race. */
- arena_ralloc_junk_large(ptr, oldsize, usize_max);
- arena_ralloc_large_shrink(tsdn, arena, chunk, extent, ptr, oldsize,
- usize_max);
- return (false);
-}
-
bool
arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
size_t size, size_t extra, bool zero)
@@ -3074,29 +2456,21 @@ arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
usize_min = s2u(size);
usize_max = s2u(size + extra);
- if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
+ if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) {
/*
* Avoid moving the allocation if the size class can be left the
* same.
*/
- if (oldsize <= SMALL_MAXCLASS) {
- assert(arena_bin_info[size2index(oldsize)].reg_size ==
- oldsize);
- if ((usize_max > SMALL_MAXCLASS ||
- size2index(usize_max) != size2index(oldsize)) &&
- (size > oldsize || usize_max < oldsize))
- return (true);
- } else {
- if (usize_max <= SMALL_MAXCLASS)
- return (true);
- if (arena_ralloc_large(tsdn, extent, ptr, oldsize,
- usize_min, usize_max, zero))
- return (true);
- }
+ assert(arena_bin_info[size2index(oldsize)].reg_size ==
+ oldsize);
+ if ((usize_max > SMALL_MAXCLASS || size2index(usize_max) !=
+ size2index(oldsize)) && (size > oldsize || usize_max <
+ oldsize))
+ return (true);
arena_decay_tick(tsdn, extent_arena_get(extent));
return (false);
- } else if (oldsize >= chunksize && usize_max >= chunksize) {
+ } else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) {
return (huge_ralloc_no_move(tsdn, extent, usize_min, usize_max,
zero));
}
@@ -3129,14 +2503,14 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
return (NULL);
- if (likely(usize <= large_maxclass)) {
+ if (likely(usize <= SMALL_MAXCLASS)) {
/* Try to avoid moving the allocation. */
if (!arena_ralloc_no_move(tsdn, extent, ptr, oldsize, usize, 0,
zero))
return (ptr);
}
- if (oldsize >= chunksize && usize >= chunksize) {
+ if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) {
return (huge_ralloc(tsdn, arena, extent, usize, alignment, zero,
tcache));
}
@@ -3252,8 +2626,7 @@ void
arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
size_t *nactive, size_t *ndirty, arena_stats_t *astats,
- malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
- malloc_huge_stats_t *hstats)
+ malloc_bin_stats_t *bstats, malloc_huge_stats_t *hstats)
{
unsigned i;
@@ -3270,24 +2643,15 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
astats->purged += arena->stats.purged;
astats->metadata_mapped += arena->stats.metadata_mapped;
astats->metadata_allocated += arena_metadata_allocated_get(arena);
- astats->allocated_large += arena->stats.allocated_large;
- astats->nmalloc_large += arena->stats.nmalloc_large;
- astats->ndalloc_large += arena->stats.ndalloc_large;
- astats->nrequests_large += arena->stats.nrequests_large;
astats->allocated_huge += arena->stats.allocated_huge;
astats->nmalloc_huge += arena->stats.nmalloc_huge;
astats->ndalloc_huge += arena->stats.ndalloc_huge;
+ astats->nrequests_huge += arena->stats.nrequests_huge;
- for (i = 0; i < nlclasses; i++) {
- lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
- lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
- lstats[i].nrequests += arena->stats.lstats[i].nrequests;
- lstats[i].curruns += arena->stats.lstats[i].curruns;
- }
-
- for (i = 0; i < nhclasses; i++) {
+ for (i = 0; i < NSIZES - NBINS; i++) {
hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
+ hstats[i].nrequests += arena->stats.hstats[i].nrequests;
hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
}
malloc_mutex_unlock(tsdn, &arena->lock);
@@ -3338,17 +2702,7 @@ arena_new(tsdn_t *tsdn, unsigned ind)
arena_t *arena;
unsigned i;
- /*
- * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
- * because there is no way to clean up if base_alloc() OOMs.
- */
- if (config_stats) {
- arena = (arena_t *)base_alloc(tsdn,
- CACHELINE_CEILING(sizeof(arena_t)) +
- QUANTUM_CEILING((nlclasses * sizeof(malloc_large_stats_t)) +
- (nhclasses * sizeof(malloc_huge_stats_t))));
- } else
- arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t));
+ arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t));
if (arena == NULL)
return (NULL);
@@ -3357,20 +2711,8 @@ arena_new(tsdn_t *tsdn, unsigned ind)
if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
return (NULL);
- if (config_stats) {
- memset(&arena->stats, 0, sizeof(arena_stats_t));
- arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
- + CACHELINE_CEILING(sizeof(arena_t)));
- memset(arena->stats.lstats, 0, nlclasses *
- sizeof(malloc_large_stats_t));
- arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
- + CACHELINE_CEILING(sizeof(arena_t)) +
- QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
- memset(arena->stats.hstats, 0, nhclasses *
- sizeof(malloc_huge_stats_t));
- if (config_tcache)
- ql_new(&arena->tcache_ql);
- }
+ if (config_stats && config_tcache)
+ ql_new(&arena->tcache_ql);
if (config_prof)
arena->prof_accumbytes = 0;
@@ -3476,18 +2818,6 @@ arena_boot(void)
arena_maxrun = chunksize - (map_bias << LG_PAGE);
assert(arena_maxrun > 0);
- large_maxclass = index2size(size2index(chunksize)-1);
- if (large_maxclass > arena_maxrun) {
- /*
- * For small chunk sizes it's possible for there to be fewer
- * non-header pages available than are necessary to serve the
- * size classes just below chunksize.
- */
- large_maxclass = arena_maxrun;
- }
- assert(large_maxclass > 0);
- nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
- nhclasses = NSIZES - nlclasses - NBINS;
}
void
diff --git a/src/base.c b/src/base.c
index 1e32d95..134018a 100644
--- a/src/base.c
+++ b/src/base.c
@@ -74,7 +74,8 @@ base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
base_resident += PAGE_CEILING(nsize);
}
}
- extent_init(extent, NULL, addr, csize, true, false, true, true, false);
+ extent_init(extent, NULL, addr, csize, 0, true, false, true, true,
+ false);
return (extent);
}
diff --git a/src/chunk.c b/src/chunk.c
index 4b213a9..8c4f741 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -369,7 +369,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (leadsize != 0) {
extent_t *lead = extent;
extent = chunk_split_wrapper(tsdn, arena, chunk_hooks, lead,
- leadsize, size + trailsize);
+ leadsize, leadsize, size + trailsize, usize + trailsize);
if (extent == NULL) {
chunk_leak(tsdn, arena, chunk_hooks, cache, lead);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
@@ -382,7 +382,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
/* Split the trail. */
if (trailsize != 0) {
extent_t *trail = chunk_split_wrapper(tsdn, arena, chunk_hooks,
- extent, size, trailsize);
+ extent, size, usize, trailsize, trailsize);
if (trail == NULL) {
chunk_leak(tsdn, arena, chunk_hooks, cache, extent);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
@@ -390,6 +390,12 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
extent_heaps_insert(extent_heaps, trail);
arena_chunk_cache_maybe_insert(arena, trail, cache);
+ } else if (leadsize == 0) {
+ /*
+ * Splitting causes usize to be set as a side effect, but no
+ * splitting occurred.
+ */
+ extent_usize_set(extent, usize);
}
if (!extent_committed_get(extent) &&
@@ -552,7 +558,8 @@ chunk_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
extent_dalloc(tsdn, arena, extent);
return (NULL);
}
- extent_init(extent, arena, addr, size, true, false, zero, commit, slab);
+ extent_init(extent, arena, addr, size, usize, true, false, zero, commit,
+ slab);
if (pad != 0)
extent_addr_randomize(tsdn, extent, alignment);
if (chunk_register(tsdn, extent)) {
@@ -635,6 +642,7 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
+ extent_usize_set(extent, 0);
extent_active_set(extent, false);
extent_zeroed_set(extent, !cache && extent_zeroed_get(extent));
if (extent_slab_get(extent)) {
@@ -801,7 +809,8 @@ chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
extent_t *
chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- extent_t *extent, size_t size_a, size_t size_b)
+ extent_t *extent, size_t size_a, size_t usize_a, size_t size_b,
+ size_t usize_b)
{
extent_t *trail;
rtree_elm_t *lead_elm_a, *lead_elm_b, *trail_elm_a, *trail_elm_b;
@@ -818,9 +827,9 @@ chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_t lead;
extent_init(&lead, arena, extent_addr_get(extent), size_a,
- extent_active_get(extent), extent_dirty_get(extent),
- extent_zeroed_get(extent), extent_committed_get(extent),
- extent_slab_get(extent));
+ usize_a, extent_active_get(extent),
+ extent_dirty_get(extent), extent_zeroed_get(extent),
+ extent_committed_get(extent), extent_slab_get(extent));
if (extent_rtree_acquire(tsdn, &lead, false, true, &lead_elm_a,
&lead_elm_b))
@@ -828,7 +837,7 @@ chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
- size_a), size_b, extent_active_get(extent),
+ size_a), size_b, usize_b, extent_active_get(extent),
extent_dirty_get(extent), extent_zeroed_get(extent),
extent_committed_get(extent), extent_slab_get(extent));
if (extent_rtree_acquire(tsdn, trail, false, true, &trail_elm_a,
@@ -840,6 +849,7 @@ chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
goto label_error_d;
extent_size_set(extent, size_a);
+ extent_usize_set(extent, usize_a);
extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent);
extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail);
@@ -905,6 +915,7 @@ chunk_merge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
b_elm_b = b_elm_a;
extent_size_set(a, extent_size_get(a) + extent_size_get(b));
+ extent_usize_set(a, extent_usize_get(a) + extent_usize_get(b));
extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a);
diff --git a/src/chunk_dss.c b/src/chunk_dss.c
index 0119c12..e92fda7 100644
--- a/src/chunk_dss.c
+++ b/src/chunk_dss.c
@@ -121,7 +121,7 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
pad_size = (uintptr_t)ret - (uintptr_t)pad_addr;
if (pad_size != 0) {
extent_init(pad, arena, pad_addr, pad_size,
- false, true, false, true, false);
+ pad_size, false, true, false, true, false);
}
dss_next = (void *)((uintptr_t)ret + size);
if ((uintptr_t)ret < (uintptr_t)dss_max ||
diff --git a/src/ctl.c b/src/ctl.c
index 908a285..26bc175 100644
--- a/src/ctl.c
+++ b/src/ctl.c
@@ -49,7 +49,6 @@ static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
const size_t *mib, size_t miblen, size_t i);
-static bool ctl_arena_init(ctl_arena_stats_t *astats);
static void ctl_arena_clear(ctl_arena_stats_t *astats);
static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats,
arena_t *arena);
@@ -127,8 +126,6 @@ CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs)
CTL_PROTO(arenas_bin_i_run_size)
INDEX_PROTO(arenas_bin_i)
-CTL_PROTO(arenas_lrun_i_size)
-INDEX_PROTO(arenas_lrun_i)
CTL_PROTO(arenas_hchunk_i_size)
INDEX_PROTO(arenas_hchunk_i)
CTL_PROTO(arenas_narenas)
@@ -140,7 +137,6 @@ CTL_PROTO(arenas_page)
CTL_PROTO(arenas_tcache_max)
CTL_PROTO(arenas_nbins)
CTL_PROTO(arenas_nhbins)
-CTL_PROTO(arenas_nlruns)
CTL_PROTO(arenas_nhchunks)
CTL_PROTO(arenas_extend)
CTL_PROTO(prof_thread_active_init)
@@ -154,10 +150,6 @@ CTL_PROTO(stats_arenas_i_small_allocated)
CTL_PROTO(stats_arenas_i_small_nmalloc)
CTL_PROTO(stats_arenas_i_small_ndalloc)
CTL_PROTO(stats_arenas_i_small_nrequests)
-CTL_PROTO(stats_arenas_i_large_allocated)
-CTL_PROTO(stats_arenas_i_large_nmalloc)
-CTL_PROTO(stats_arenas_i_large_ndalloc)
-CTL_PROTO(stats_arenas_i_large_nrequests)
CTL_PROTO(stats_arenas_i_huge_allocated)
CTL_PROTO(stats_arenas_i_huge_nmalloc)
CTL_PROTO(stats_arenas_i_huge_ndalloc)
@@ -172,11 +164,6 @@ CTL_PROTO(stats_arenas_i_bins_j_nruns)
CTL_PROTO(stats_arenas_i_bins_j_nreruns)
CTL_PROTO(stats_arenas_i_bins_j_curruns)
INDEX_PROTO(stats_arenas_i_bins_j)
-CTL_PROTO(stats_arenas_i_lruns_j_nmalloc)
-CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
-CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
-CTL_PROTO(stats_arenas_i_lruns_j_curruns)
-INDEX_PROTO(stats_arenas_i_lruns_j)
CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc)
CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc)
CTL_PROTO(stats_arenas_i_hchunks_j_nrequests)
@@ -323,17 +310,6 @@ static const ctl_indexed_node_t arenas_bin_node[] = {
{INDEX(arenas_bin_i)}
};
-static const ctl_named_node_t arenas_lrun_i_node[] = {
- {NAME("size"), CTL(arenas_lrun_i_size)}
-};
-static const ctl_named_node_t super_arenas_lrun_i_node[] = {
- {NAME(""), CHILD(named, arenas_lrun_i)}
-};
-
-static const ctl_indexed_node_t arenas_lrun_node[] = {
- {INDEX(arenas_lrun_i)}
-};
-
static const ctl_named_node_t arenas_hchunk_i_node[] = {
{NAME("size"), CTL(arenas_hchunk_i_size)}
};
@@ -356,8 +332,6 @@ static const ctl_named_node_t arenas_node[] = {
{NAME("nbins"), CTL(arenas_nbins)},
{NAME("nhbins"), CTL(arenas_nhbins)},
{NAME("bin"), CHILD(indexed, arenas_bin)},
- {NAME("nlruns"), CTL(arenas_nlruns)},
- {NAME("lrun"), CHILD(indexed, arenas_lrun)},
{NAME("nhchunks"), CTL(arenas_nhchunks)},
{NAME("hchunk"), CHILD(indexed, arenas_hchunk)},
{NAME("extend"), CTL(arenas_extend)}
@@ -385,13 +359,6 @@ static const ctl_named_node_t stats_arenas_i_small_node[] = {
{NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}
};
-static const ctl_named_node_t stats_arenas_i_large_node[] = {
- {NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
- {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
-};
-
static const ctl_named_node_t stats_arenas_i_huge_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_huge_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)},
@@ -418,20 +385,6 @@ static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
{INDEX(stats_arenas_i_bins_j)}
};
-static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = {
- {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)},
- {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)}
-};
-static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = {
- {NAME(""), CHILD(named, stats_arenas_i_lruns_j)}
-};
-
-static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
- {INDEX(stats_arenas_i_lruns_j)}
-};
-
static const ctl_named_node_t stats_arenas_i_hchunks_j_node[] = {
{NAME("nmalloc"), CTL(stats_arenas_i_hchunks_j_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_hchunks_j_ndalloc)},
@@ -460,10 +413,8 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("purged"), CTL(stats_arenas_i_purged)},
{NAME("metadata"), CHILD(named, stats_arenas_i_metadata)},
{NAME("small"), CHILD(named, stats_arenas_i_small)},
- {NAME("large"), CHILD(named, stats_arenas_i_large)},
{NAME("huge"), CHILD(named, stats_arenas_i_huge)},
{NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
- {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)},
{NAME("hchunks"), CHILD(indexed, stats_arenas_i_hchunks)}
};
static const ctl_named_node_t super_stats_arenas_i_node[] = {
@@ -508,27 +459,6 @@ static const ctl_named_node_t super_root_node[] = {
/******************************************************************************/
-static bool
-ctl_arena_init(ctl_arena_stats_t *astats)
-{
-
- if (astats->lstats == NULL) {
- astats->lstats = (malloc_large_stats_t *)a0malloc(nlclasses *
- sizeof(malloc_large_stats_t));
- if (astats->lstats == NULL)
- return (true);
- }
-
- if (astats->hstats == NULL) {
- astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses *
- sizeof(malloc_huge_stats_t));
- if (astats->hstats == NULL)
- return (true);
- }
-
- return (false);
-}
-
static void
ctl_arena_clear(ctl_arena_stats_t *astats)
{
@@ -546,9 +476,7 @@ ctl_arena_clear(ctl_arena_stats_t *astats)
astats->ndalloc_small = 0;
astats->nrequests_small = 0;
memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
- memset(astats->lstats, 0, nlclasses *
- sizeof(malloc_large_stats_t));
- memset(astats->hstats, 0, nhclasses *
+ memset(astats->hstats, 0, (NSIZES - NBINS) *
sizeof(malloc_huge_stats_t));
}
}
@@ -562,7 +490,7 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena)
arena_stats_merge(tsdn, arena, &cstats->nthreads, &cstats->dss,
&cstats->lg_dirty_mult, &cstats->decay_time,
&cstats->pactive, &cstats->pdirty, &cstats->astats,
- cstats->bstats, cstats->lstats, cstats->hstats);
+ cstats->bstats, cstats->hstats);
for (i = 0; i < NBINS; i++) {
cstats->allocated_small += cstats->bstats[i].curregs *
@@ -604,16 +532,10 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
sstats->ndalloc_small += astats->ndalloc_small;
sstats->nrequests_small += astats->nrequests_small;
- sstats->astats.allocated_large +=
- astats->astats.allocated_large;
- sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
- sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
- sstats->astats.nrequests_large +=
- astats->astats.nrequests_large;
-
sstats->astats.allocated_huge += astats->astats.allocated_huge;
sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
+ sstats->astats.nrequests_huge += astats->astats.nrequests_huge;
for (i = 0; i < NBINS; i++) {
sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
@@ -632,17 +554,11 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
sstats->bstats[i].curruns += astats->bstats[i].curruns;
}
- for (i = 0; i < nlclasses; i++) {
- sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
- sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
- sstats->lstats[i].nrequests +=
- astats->lstats[i].nrequests;
- sstats->lstats[i].curruns += astats->lstats[i].curruns;
- }
-
- for (i = 0; i < nhclasses; i++) {
+ for (i = 0; i < NSIZES - NBINS; i++) {
sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc;
sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc;
+ sstats->hstats[i].nrequests +=
+ astats->hstats[i].nrequests;
sstats->hstats[i].curhchunks +=
astats->hstats[i].curhchunks;
}
@@ -680,10 +596,6 @@ ctl_grow(tsdn_t *tsdn)
memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
sizeof(ctl_arena_stats_t));
memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
- if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) {
- a0dalloc(astats);
- return (true);
- }
/* Swap merged stats to their new location. */
{
ctl_arena_stats_t tstats;
@@ -730,7 +642,6 @@ ctl_refresh(tsdn_t *tsdn)
&base_mapped);
ctl_stats.allocated =
ctl_stats.arenas[ctl_stats.narenas].allocated_small +
- ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large +
ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge;
ctl_stats.active =
(ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE);
@@ -771,30 +682,6 @@ ctl_init(tsdn_t *tsdn)
}
memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) *
sizeof(ctl_arena_stats_t));
-
- /*
- * Initialize all stats structures, regardless of whether they
- * ever get used. Lazy initialization would allow errors to
- * cause inconsistent state to be viewable by the application.
- */
- if (config_stats) {
- unsigned i;
- for (i = 0; i <= ctl_stats.narenas; i++) {
- if (ctl_arena_init(&ctl_stats.arenas[i])) {
- unsigned j;
- for (j = 0; j < i; j++) {
- a0dalloc(
- ctl_stats.arenas[j].lstats);
- a0dalloc(
- ctl_stats.arenas[j].hstats);
- }
- a0dalloc(ctl_stats.arenas);
- ctl_stats.arenas = NULL;
- ret = true;
- goto label_return;
- }
- }
- }
ctl_stats.arenas[ctl_stats.narenas].initialized = true;
ctl_epoch = 0;
@@ -1924,25 +1811,13 @@ arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
return (super_arenas_bin_i_node);
}
-CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned)
-CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
-static const ctl_named_node_t *
-arenas_lrun_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
-{
-
- if (i > nlclasses)
- return (NULL);
- return (super_arenas_lrun_i_node);
-}
-
-CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned)
-CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+(szind_t)mib[2]),
- size_t)
+CTL_RO_NL_GEN(arenas_nhchunks, NSIZES - NBINS, unsigned)
+CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
static const ctl_named_node_t *
arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{
- if (i > nhclasses)
+ if (i > NSIZES - NBINS)
return (NULL);
return (super_arenas_hchunk_i_node);
}
@@ -2136,14 +2011,6 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
ctl_stats.arenas[mib[2]].nrequests_small, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
- ctl_stats.arenas[mib[2]].astats.allocated_large, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
- ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
- ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
- ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated,
ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc,
@@ -2182,32 +2049,12 @@ stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
return (super_stats_arenas_i_bins_j_node);
}
-CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc,
- ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc,
- ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
- ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
- ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
-
-static const ctl_named_node_t *
-stats_arenas_i_lruns_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
- size_t j)
-{
-
- if (j > nlclasses)
- return (NULL);
- return (super_stats_arenas_i_lruns_j_node);
-}
-
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nmalloc,
ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_ndalloc,
ctl_stats.arenas[mib[2]].hstats[mib[4]].ndalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nrequests,
- ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, /* Intentional. */
- uint64_t)
+ ctl_stats.arenas[mib[2]].hstats[mib[4]].nrequests, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks,
ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t)
@@ -2216,7 +2063,7 @@ stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
size_t j)
{
- if (j > nhclasses)
+ if (j > NSIZES - NBINS)
return (NULL);
return (super_stats_arenas_i_hchunks_j_node);
}
diff --git a/src/extent.c b/src/extent.c
index d7f3b6c..757a6e2 100644
--- a/src/extent.c
+++ b/src/extent.c
@@ -40,7 +40,7 @@ extent_size_quantize_floor(size_t size)
pszind_t pind;
assert(size > 0);
- assert(size <= HUGE_MAXCLASS);
+ assert(size - large_pad <= HUGE_MAXCLASS);
assert((size & PAGE_MASK) == 0);
assert(size != 0);
@@ -77,7 +77,7 @@ extent_size_quantize_ceil(size_t size)
size_t ret;
assert(size > 0);
- assert(size <= HUGE_MAXCLASS);
+ assert(size - large_pad <= HUGE_MAXCLASS);
assert((size & PAGE_MASK) == 0);
ret = extent_size_quantize_floor(size);
diff --git a/src/huge.c b/src/huge.c
index b00be90..5375b59 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -19,6 +19,7 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
size_t ausize;
extent_t *extent;
bool is_zeroed;
+ UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
assert(!tsdn_null(tsdn) || arena != NULL);
@@ -42,6 +43,8 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
ql_elm_new(extent, ql_link);
ql_tail_insert(&arena->huge, extent, ql_link);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+ if (config_prof && arena_prof_accum(tsdn, arena, usize))
+ prof_idump(tsdn);
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed) {
@@ -61,8 +64,20 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
#undef huge_dalloc_junk
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
#endif
+void
+huge_dalloc_junk(void *ptr, size_t usize)
+{
+
+ memset(ptr, JEMALLOC_FREE_JUNK, usize);
+}
+#ifdef JEMALLOC_JET
+#undef huge_dalloc_junk
+#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
+huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
+#endif
+
static void
-huge_dalloc_junk(tsdn_t *tsdn, void *ptr, size_t usize)
+huge_dalloc_maybe_junk(tsdn_t *tsdn, void *ptr, size_t usize)
{
if (config_fill && have_dss && unlikely(opt_junk_free)) {
@@ -71,14 +86,10 @@ huge_dalloc_junk(tsdn_t *tsdn, void *ptr, size_t usize)
* unmapped.
*/
if (!config_munmap || (have_dss && chunk_in_dss(tsdn, ptr)))
+ huge_dalloc_junk(ptr, usize);
memset(ptr, JEMALLOC_FREE_JUNK, usize);
}
}
-#ifdef JEMALLOC_JET
-#undef huge_dalloc_junk
-#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
-huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
-#endif
static bool
huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
@@ -93,12 +104,12 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
/* Split excess pages. */
if (diff != 0) {
extent_t *trail = chunk_split_wrapper(tsdn, arena, &chunk_hooks,
- extent, usize + large_pad, diff);
+ extent, usize + large_pad, usize, diff, diff);
if (trail == NULL)
return (true);
if (config_fill && unlikely(opt_junk_free)) {
- huge_dalloc_junk(tsdn, extent_addr_get(trail),
+ huge_dalloc_maybe_junk(tsdn, extent_addr_get(trail),
extent_usize_get(trail));
}
@@ -176,7 +187,8 @@ huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
/* The following should have been caught by callers. */
assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
/* Both allocation sizes must be huge to avoid a move. */
- assert(extent_usize_get(extent) >= chunksize && usize_max >= chunksize);
+ assert(extent_usize_get(extent) >= LARGE_MINCLASS && usize_max >=
+ LARGE_MINCLASS);
if (usize_max > extent_usize_get(extent)) {
/* Attempt to expand the allocation in-place. */
@@ -234,7 +246,8 @@ huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
/* The following should have been caught by callers. */
assert(usize > 0 && usize <= HUGE_MAXCLASS);
/* Both allocation sizes must be huge to avoid a move. */
- assert(extent_usize_get(extent) >= chunksize && usize >= chunksize);
+ assert(extent_usize_get(extent) >= LARGE_MINCLASS && usize >=
+ LARGE_MINCLASS);
/* Try to avoid moving the allocation. */
if (!huge_ralloc_no_move(tsdn, extent, usize, usize, zero))
@@ -257,21 +270,39 @@ huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
return (ret);
}
-void
-huge_dalloc(tsdn_t *tsdn, extent_t *extent)
+static void
+huge_dalloc_impl(tsdn_t *tsdn, extent_t *extent, bool junked_locked)
{
arena_t *arena;
arena = extent_arena_get(extent);
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ if (!junked_locked)
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
ql_remove(&arena->huge, extent, ql_link);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+ if (!junked_locked) {
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
- huge_dalloc_junk(tsdn, extent_addr_get(extent),
- extent_usize_get(extent));
- arena_chunk_dalloc_huge(tsdn, extent_arena_get(extent), extent);
+ huge_dalloc_maybe_junk(tsdn, extent_addr_get(extent),
+ extent_usize_get(extent));
+ }
+ arena_chunk_dalloc_huge(tsdn, arena, extent, junked_locked);
- arena_decay_tick(tsdn, arena);
+ if (!junked_locked)
+ arena_decay_tick(tsdn, arena);
+}
+
+void
+huge_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent)
+{
+
+ huge_dalloc_impl(tsdn, extent, true);
+}
+
+void
+huge_dalloc(tsdn_t *tsdn, extent_t *extent)
+{
+
+ huge_dalloc_impl(tsdn, extent, false);
}
size_t
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 479d831..9f8bd01 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -1401,7 +1401,7 @@ ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero,
p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path);
if (p == NULL)
return (NULL);
- arena_prof_promoted(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
+ arena_prof_promote(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
usize);
} else
p = ialloc(tsd, usize, ind, zero, slow_path);
@@ -1483,8 +1483,7 @@ ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func,
set_errno(ENOMEM);
}
if (config_stats && likely(ret != NULL)) {
- assert(usize == isalloc(tsdn, iealloc(tsdn, ret), ret,
- config_prof));
+ assert(usize == isalloc(tsdn, iealloc(tsdn, ret), ret));
*tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize;
}
witness_assert_lockless(tsdn);
@@ -1527,7 +1526,7 @@ imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
if (p == NULL)
return (NULL);
- arena_prof_promoted(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
+ arena_prof_promote(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
usize);
} else
p = ipalloc(tsd, usize, alignment, false);
@@ -1608,7 +1607,7 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
label_return:
if (config_stats && likely(result != NULL)) {
assert(usize == isalloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd),
- result), result, config_prof));
+ result), result));
*tsd_thread_allocatedp_get(tsd) += usize;
}
UTRACE(0, size, result);
@@ -1699,7 +1698,7 @@ irealloc_prof_sample(tsd_t *tsd, extent_t *extent, void *old_ptr,
false);
if (p == NULL)
return (NULL);
- arena_prof_promoted(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
+ arena_prof_promote(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
usize);
} else
p = iralloc(tsd, extent, old_ptr, old_usize, usize, 0, false);
@@ -1748,10 +1747,10 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
extent = iealloc(tsd_tsdn(tsd), ptr);
if (config_prof && opt_prof) {
- usize = isalloc(tsd_tsdn(tsd), extent, ptr, config_prof);
+ usize = isalloc(tsd_tsdn(tsd), extent, ptr);
prof_free(tsd, extent, ptr, usize);
} else if (config_stats)
- usize = isalloc(tsd_tsdn(tsd), extent, ptr, config_prof);
+ usize = isalloc(tsd_tsdn(tsd), extent, ptr);
if (config_stats)
*tsd_thread_deallocatedp_get(tsd) += usize;
@@ -1815,7 +1814,7 @@ je_realloc(void *ptr, size_t size)
witness_assert_lockless(tsd_tsdn(tsd));
extent = iealloc(tsd_tsdn(tsd), ptr);
- old_usize = isalloc(tsd_tsdn(tsd), extent, ptr, config_prof);
+ old_usize = isalloc(tsd_tsdn(tsd), extent, ptr);
if (config_prof && opt_prof) {
usize = s2u(size);
ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
@@ -1848,8 +1847,7 @@ je_realloc(void *ptr, size_t size)
if (config_stats && likely(ret != NULL)) {
tsd_t *tsd;
- assert(usize == isalloc(tsdn, iealloc(tsdn, ret), ret,
- config_prof));
+ assert(usize == isalloc(tsdn, iealloc(tsdn, ret), ret));
tsd = tsdn_tsd(tsdn);
*tsd_thread_allocatedp_get(tsd) += usize;
*tsd_thread_deallocatedp_get(tsd) += old_usize;
@@ -2003,7 +2001,7 @@ imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache, arena, slow_path);
if (p == NULL)
return (NULL);
- arena_prof_promoted(tsdn, iealloc(tsdn, p), p, usize);
+ arena_prof_promote(tsdn, iealloc(tsdn, p), p, usize);
} else
p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena,
slow_path);
@@ -2138,7 +2136,7 @@ irallocx_prof_sample(tsdn_t *tsdn, extent_t *extent, void *old_ptr,
alignment, zero, tcache, arena);
if (p == NULL)
return (NULL);
- arena_prof_promoted(tsdn, iealloc(tsdn, p), p, usize);
+ arena_prof_promote(tsdn, iealloc(tsdn, p), p, usize);
} else {
p = iralloct(tsdn, extent, old_ptr, old_usize, usize, alignment,
zero, tcache, arena);
@@ -2182,7 +2180,7 @@ irallocx_prof(tsd_t *tsd, extent_t *extent, void *old_ptr, size_t old_usize,
* reallocation. Therefore, query the actual value of usize.
*/
e = extent;
- *usize = isalloc(tsd_tsdn(tsd), e, p, config_prof);
+ *usize = isalloc(tsd_tsdn(tsd), e, p);
} else
e = iealloc(tsd_tsdn(tsd), p);
prof_realloc(tsd, e, p, *usize, tctx, prof_active, true, old_ptr,
@@ -2229,7 +2227,7 @@ je_rallocx(void *ptr, size_t size, int flags)
} else
tcache = tcache_get(tsd, true);
- old_usize = isalloc(tsd_tsdn(tsd), extent, ptr, config_prof);
+ old_usize = isalloc(tsd_tsdn(tsd), extent, ptr);
if (config_prof && opt_prof) {
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
@@ -2246,7 +2244,7 @@ je_rallocx(void *ptr, size_t size, int flags)
goto label_oom;
if (config_stats) {
usize = isalloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd),
- p), p, config_prof);
+ p), p);
}
}
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
@@ -2276,7 +2274,7 @@ ixallocx_helper(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t old_usize,
if (ixalloc(tsdn, extent, ptr, old_usize, size, extra, alignment, zero))
return (old_usize);
- usize = isalloc(tsdn, extent, ptr, config_prof);
+ usize = isalloc(tsdn, extent, ptr);
return (usize);
}
@@ -2363,7 +2361,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
witness_assert_lockless(tsd_tsdn(tsd));
extent = iealloc(tsd_tsdn(tsd), ptr);
- old_usize = isalloc(tsd_tsdn(tsd), extent, ptr, config_prof);
+ old_usize = isalloc(tsd_tsdn(tsd), extent, ptr);
/*
* The API explicitly absolves itself of protecting against (size +
@@ -2414,9 +2412,9 @@ je_sallocx(const void *ptr, int flags)
witness_assert_lockless(tsdn);
if (config_ivsalloc)
- usize = ivsalloc(tsdn, ptr, config_prof);
+ usize = ivsalloc(tsdn, ptr);
else
- usize = isalloc(tsdn, iealloc(tsdn, ptr), ptr, config_prof);
+ usize = isalloc(tsdn, iealloc(tsdn, ptr), ptr);
witness_assert_lockless(tsdn);
return (usize);
@@ -2477,7 +2475,7 @@ je_sdallocx(void *ptr, size_t size, int flags)
tsd = tsd_fetch();
extent = iealloc(tsd_tsdn(tsd), ptr);
usize = inallocx(tsd_tsdn(tsd), size, flags);
- assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr, config_prof));
+ assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr));
witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
@@ -2593,10 +2591,10 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
witness_assert_lockless(tsdn);
if (config_ivsalloc)
- ret = ivsalloc(tsdn, ptr, config_prof);
+ ret = ivsalloc(tsdn, ptr);
else {
- ret = (ptr == NULL) ? 0 : isalloc(tsdn, iealloc(tsdn, ptr), ptr,
- config_prof);
+ ret = (ptr == NULL) ? 0 : isalloc(tsdn, iealloc(tsdn, ptr),
+ ptr);
}
witness_assert_lockless(tsdn);
diff --git a/src/stats.c b/src/stats.c
index 0e1442e..4dc48d5 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -37,12 +37,10 @@ size_t stats_cactive = 0;
static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i);
-static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
- void *cbopaque, unsigned i);
static void stats_arena_hchunks_print(
void (*write_cb)(void *, const char *), void *cbopaque, unsigned i);
static void stats_arena_print(void (*write_cb)(void *, const char *),
- void *cbopaque, unsigned i, bool bins, bool large, bool huge);
+ void *cbopaque, unsigned i, bool bins, bool huge);
/******************************************************************************/
@@ -158,63 +156,16 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
}
static void
-stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
- unsigned i)
-{
- unsigned nbins, nlruns, j;
- bool in_gap;
-
- malloc_cprintf(write_cb, cbopaque,
- "large: size ind allocated nmalloc ndalloc"
- " nrequests curruns\n");
- CTL_GET("arenas.nbins", &nbins, unsigned);
- CTL_GET("arenas.nlruns", &nlruns, unsigned);
- for (j = 0, in_gap = false; j < nlruns; j++) {
- uint64_t nmalloc, ndalloc, nrequests;
- size_t run_size, curruns;
-
- CTL_M2_M4_GET("stats.arenas.0.lruns.0.nmalloc", i, j, &nmalloc,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.lruns.0.ndalloc", i, j, &ndalloc,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j,
- &nrequests, uint64_t);
- if (nrequests == 0)
- in_gap = true;
- else {
- CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t);
- CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j,
- &curruns, size_t);
- if (in_gap) {
- malloc_cprintf(write_cb, cbopaque,
- " ---\n");
- in_gap = false;
- }
- malloc_cprintf(write_cb, cbopaque,
- "%20zu %3u %12zu %12"FMTu64" %12"FMTu64
- " %12"FMTu64" %12zu\n",
- run_size, nbins + j, curruns * run_size, nmalloc,
- ndalloc, nrequests, curruns);
- }
- }
- if (in_gap) {
- malloc_cprintf(write_cb, cbopaque,
- " ---\n");
- }
-}
-
-static void
stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i)
{
- unsigned nbins, nlruns, nhchunks, j;
+ unsigned nbins, nhchunks, j;
bool in_gap;
malloc_cprintf(write_cb, cbopaque,
"huge: size ind allocated nmalloc ndalloc"
" nrequests curhchunks\n");
CTL_GET("arenas.nbins", &nbins, unsigned);
- CTL_GET("arenas.nlruns", &nlruns, unsigned);
CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
for (j = 0, in_gap = false; j < nhchunks; j++) {
uint64_t nmalloc, ndalloc, nrequests;
@@ -241,7 +192,7 @@ stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
malloc_cprintf(write_cb, cbopaque,
"%20zu %3u %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64" %12zu\n",
- hchunk_size, nbins + nlruns + j,
+ hchunk_size, nbins + j,
curhchunks * hchunk_size, nmalloc, ndalloc,
nrequests, curhchunks);
}
@@ -254,7 +205,7 @@ stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
static void
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
- unsigned i, bool bins, bool large, bool huge)
+ unsigned i, bool bins, bool huge)
{
unsigned nthreads;
const char *dss;
@@ -264,8 +215,6 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
uint64_t npurge, nmadvise, purged;
size_t small_allocated;
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
- size_t large_allocated;
- uint64_t large_nmalloc, large_ndalloc, large_nrequests;
size_t huge_allocated;
uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests;
@@ -318,16 +267,6 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
"small: %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64"\n",
small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
- CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated,
- size_t);
- CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t);
- CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t);
- CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests,
- uint64_t);
- malloc_cprintf(write_cb, cbopaque,
- "large: %12zu %12"FMTu64" %12"FMTu64
- " %12"FMTu64"\n",
- large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t);
CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t);
@@ -340,10 +279,8 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque,
"total: %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64"\n",
- small_allocated + large_allocated + huge_allocated,
- small_nmalloc + large_nmalloc + huge_nmalloc,
- small_ndalloc + large_ndalloc + huge_ndalloc,
- small_nrequests + large_nrequests + huge_nrequests);
+ small_allocated + huge_allocated, small_nmalloc + huge_nmalloc,
+ small_ndalloc + huge_ndalloc, small_nrequests + huge_nrequests);
malloc_cprintf(write_cb, cbopaque,
"active: %12zu\n", pactive * page);
CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t);
@@ -362,8 +299,6 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
if (bins)
stats_arena_bins_print(write_cb, cbopaque, i);
- if (large)
- stats_arena_lruns_print(write_cb, cbopaque, i);
if (huge)
stats_arena_hchunks_print(write_cb, cbopaque, i);
}
@@ -379,7 +314,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
bool merged = true;
bool unmerged = true;
bool bins = true;
- bool large = true;
bool huge = true;
/*
@@ -421,9 +355,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
bins = false;
break;
case 'l':
- large = false;
- break;
- case 'h':
huge = false;
break;
default:;
@@ -636,7 +567,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque,
"\nMerged arenas stats:\n");
stats_arena_print(write_cb, cbopaque,
- narenas, bins, large, huge);
+ narenas, bins, huge);
}
}
}
@@ -662,8 +593,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
cbopaque,
"\narenas[%u]:\n", i);
stats_arena_print(write_cb,
- cbopaque, i, bins, large,
- huge);
+ cbopaque, i, bins, huge);
}
}
}
diff --git a/src/tcache.c b/src/tcache.c
index d3ef999..41074d3 100644
--- a/src/tcache.c
+++ b/src/tcache.c
@@ -27,7 +27,7 @@ size_t
tcache_salloc(tsdn_t *tsdn, const void *ptr)
{
- return (arena_salloc(tsdn, iealloc(tsdn, ptr), ptr, false));
+ return (arena_salloc(tsdn, iealloc(tsdn, ptr), ptr));
}
void
@@ -46,7 +46,7 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
tbin->ncached - tbin->low_water + (tbin->low_water
>> 2));
} else {
- tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
+ tcache_bin_flush_huge(tsd, tbin, binind, tbin->ncached
- tbin->low_water + (tbin->low_water >> 2), tcache);
}
/*
@@ -170,7 +170,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
}
void
-tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
+tcache_bin_flush_huge(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
unsigned rem, tcache_t *tcache)
{
arena_t *arena;
@@ -200,9 +200,9 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
}
if (config_stats) {
merged_stats = true;
- arena->stats.nrequests_large +=
+ arena->stats.nrequests_huge +=
tbin->tstats.nrequests;
- arena->stats.lstats[binind - NBINS].nrequests +=
+ arena->stats.hstats[binind - NBINS].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
@@ -213,10 +213,8 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
assert(ptr != NULL);
extent = iealloc(tsd_tsdn(tsd), ptr);
if (extent_arena_get(extent) == locked_arena) {
- arena_chunk_t *chunk =
- (arena_chunk_t *)extent_base_get(extent);
- arena_dalloc_large_junked_locked(tsd_tsdn(tsd),
- locked_arena, chunk, extent, ptr);
+ huge_dalloc_junked_locked(tsd_tsdn(tsd),
+ extent);
} else {
/*
* This object was allocated via a different
@@ -240,8 +238,8 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
* arena, so the stats didn't get merged. Manually do so now.
*/
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
- arena->stats.nrequests_large += tbin->tstats.nrequests;
- arena->stats.lstats[binind - NBINS].nrequests +=
+ arena->stats.nrequests_huge += tbin->tstats.nrequests;
+ arena->stats.hstats[binind - NBINS].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
@@ -379,12 +377,12 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
for (; i < nhbins; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
- tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
+ tcache_bin_flush_huge(tsd, tbin, i, 0, tcache);
if (config_stats && tbin->tstats.nrequests != 0) {
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
- arena->stats.nrequests_large += tbin->tstats.nrequests;
- arena->stats.lstats[i - NBINS].nrequests +=
+ arena->stats.nrequests_huge += tbin->tstats.nrequests;
+ arena->stats.hstats[i - NBINS].nrequests +=
tbin->tstats.nrequests;
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
}
@@ -439,10 +437,10 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
}
for (; i < nhbins; i++) {
- malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
+ malloc_huge_stats_t *hstats = &arena->stats.hstats[i - NBINS];
tcache_bin_t *tbin = &tcache->tbins[i];
- arena->stats.nrequests_large += tbin->tstats.nrequests;
- lstats->nrequests += tbin->tstats.nrequests;
+ arena->stats.nrequests_huge += tbin->tstats.nrequests;
+ hstats->nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
}
@@ -516,14 +514,9 @@ tcache_boot(tsdn_t *tsdn)
{
unsigned i;
- /*
- * If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
- * known.
- */
+ /* If necessary, clamp opt_lg_tcache_max. */
if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
tcache_maxclass = SMALL_MAXCLASS;
- else if ((1U << opt_lg_tcache_max) > large_maxclass)
- tcache_maxclass = large_maxclass;
else
tcache_maxclass = (1U << opt_lg_tcache_max);
@@ -550,7 +543,7 @@ tcache_boot(tsdn_t *tsdn)
stack_nelms += tcache_bin_info[i].ncached_max;
}
for (; i < nhbins; i++) {
- tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
+ tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_HUGE;
stack_nelms += tcache_bin_info[i].ncached_max;
}
diff --git a/src/zone.c b/src/zone.c
index 2c17123..4609503 100644
--- a/src/zone.c
+++ b/src/zone.c
@@ -56,7 +56,7 @@ zone_size(malloc_zone_t *zone, void *ptr)
* not work in practice, we must check all pointers to assure that they
* reside within a mapped chunk before determining size.
*/
- return (ivsalloc(tsdn_fetch(), ptr, config_prof));
+ return (ivsalloc(tsdn_fetch(), ptr));
}
static void *
@@ -87,7 +87,7 @@ static void
zone_free(malloc_zone_t *zone, void *ptr)
{
- if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) {
+ if (ivsalloc(tsdn_fetch(), ptr) != 0) {
je_free(ptr);
return;
}
@@ -99,7 +99,7 @@ static void *
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
{
- if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0)
+ if (ivsalloc(tsdn_fetch(), ptr) != 0)
return (je_realloc(ptr, size));
return (realloc(ptr, size));
@@ -123,7 +123,7 @@ zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
{
size_t alloc_size;
- alloc_size = ivsalloc(tsdn_fetch(), ptr, config_prof);
+ alloc_size = ivsalloc(tsdn_fetch(), ptr);
if (alloc_size != 0) {
assert(alloc_size == size);
je_free(ptr);
diff --git a/test/integration/chunk.c b/test/integration/chunk.c
index 092472c..3aad7a8 100644
--- a/test/integration/chunk.c
+++ b/test/integration/chunk.c
@@ -120,7 +120,7 @@ chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
TEST_BEGIN(test_chunk)
{
void *p;
- size_t old_size, new_size, large0, large1, huge0, huge1, huge2, sz;
+ size_t old_size, new_size, huge0, huge1, huge2, sz;
unsigned arena_ind;
int flags;
size_t hooks_mib[3], purge_mib[3];
@@ -162,14 +162,8 @@ TEST_BEGIN(test_chunk)
assert_ptr_ne(old_hooks.split, chunk_split, "Unexpected split error");
assert_ptr_ne(old_hooks.merge, chunk_merge, "Unexpected merge error");
- /* Get large size classes. */
- sz = sizeof(size_t);
- assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0,
- "Unexpected arenas.lrun.0.size failure");
- assert_d_eq(mallctl("arenas.lrun.1.size", &large1, &sz, NULL, 0), 0,
- "Unexpected arenas.lrun.1.size failure");
-
/* Get huge size classes. */
+ sz = sizeof(size_t);
assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0,
"Unexpected arenas.hchunk.0.size failure");
assert_d_eq(mallctl("arenas.hchunk.1.size", &huge1, &sz, NULL, 0), 0,
@@ -224,24 +218,6 @@ TEST_BEGIN(test_chunk)
do_dalloc = true;
do_decommit = false;
- /* Test decommit for large allocations. */
- do_decommit = true;
- p = mallocx(large1, flags);
- assert_ptr_not_null(p, "Unexpected mallocx() error");
- assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
- 0, "Unexpected arena.%u.purge error", arena_ind);
- did_decommit = false;
- assert_zu_eq(xallocx(p, large0, 0, flags), large0,
- "Unexpected xallocx() failure");
- assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
- 0, "Unexpected arena.%u.purge error", arena_ind);
- did_commit = false;
- assert_zu_eq(xallocx(p, large1, 0, flags), large1,
- "Unexpected xallocx() failure");
- assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match");
- dallocx(p, flags);
- do_decommit = false;
-
/* Make sure non-huge allocation succeeds. */
p = mallocx(42, flags);
assert_ptr_not_null(p, "Unexpected mallocx() error");
diff --git a/test/integration/xallocx.c b/test/integration/xallocx.c
index ad292bb..7af1b19 100644
--- a/test/integration/xallocx.c
+++ b/test/integration/xallocx.c
@@ -92,13 +92,6 @@ get_nsmall(void)
}
static unsigned
-get_nlarge(void)
-{
-
- return (get_nsizes_impl("arenas.nlruns"));
-}
-
-static unsigned
get_nhuge(void)
{
@@ -132,13 +125,6 @@ get_small_size(size_t ind)
}
static size_t
-get_large_size(size_t ind)
-{
-
- return (get_size_impl("arenas.lrun.0.size", ind));
-}
-
-static size_t
get_huge_size(size_t ind)
{
@@ -239,81 +225,14 @@ TEST_BEGIN(test_extra_small)
}
TEST_END
-TEST_BEGIN(test_extra_large)
-{
- int flags = MALLOCX_ARENA(arena_ind());
- size_t smallmax, large0, large1, large2, huge0, hugemax;
- void *p;
-
- /* Get size classes. */
- smallmax = get_small_size(get_nsmall()-1);
- large0 = get_large_size(0);
- large1 = get_large_size(1);
- large2 = get_large_size(2);
- huge0 = get_huge_size(0);
- hugemax = get_huge_size(get_nhuge()-1);
-
- p = mallocx(large2, flags);
- assert_ptr_not_null(p, "Unexpected mallocx() error");
-
- assert_zu_eq(xallocx(p, large2, 0, flags), large2,
- "Unexpected xallocx() behavior");
- /* Test size decrease with zero extra. */
- assert_zu_eq(xallocx(p, large0, 0, flags), large0,
- "Unexpected xallocx() behavior");
- assert_zu_eq(xallocx(p, smallmax, 0, flags), large0,
- "Unexpected xallocx() behavior");
-
- assert_zu_eq(xallocx(p, large2, 0, flags), large2,
- "Unexpected xallocx() behavior");
- /* Test size decrease with non-zero extra. */
- assert_zu_eq(xallocx(p, large0, large2 - large0, flags), large2,
- "Unexpected xallocx() behavior");
- assert_zu_eq(xallocx(p, large1, large2 - large1, flags), large2,
- "Unexpected xallocx() behavior");
- assert_zu_eq(xallocx(p, large0, large1 - large0, flags), large1,
- "Unexpected xallocx() behavior");
- assert_zu_eq(xallocx(p, smallmax, large0 - smallmax, flags), large0,
- "Unexpected xallocx() behavior");
-
- assert_zu_eq(xallocx(p, large0, 0, flags), large0,
- "Unexpected xallocx() behavior");
- /* Test size increase with zero extra. */
- assert_zu_eq(xallocx(p, large2, 0, flags), large2,
- "Unexpected xallocx() behavior");
- assert_zu_eq(xallocx(p, huge0, 0, flags), large2,
- "Unexpected xallocx() behavior");
-
- assert_zu_eq(xallocx(p, large0, 0, flags), large0,
- "Unexpected xallocx() behavior");
- /* Test size increase with non-zero extra. */
- assert_zu_lt(xallocx(p, large0, huge0 - large0, flags), huge0,
- "Unexpected xallocx() behavior");
-
- assert_zu_eq(xallocx(p, large0, 0, flags), large0,
- "Unexpected xallocx() behavior");
- /* Test size increase with non-zero extra. */
- assert_zu_eq(xallocx(p, large0, large2 - large0, flags), large2,
- "Unexpected xallocx() behavior");
-
- assert_zu_eq(xallocx(p, large2, 0, flags), large2,
- "Unexpected xallocx() behavior");
- /* Test size+extra overflow. */
- assert_zu_lt(xallocx(p, large2, hugemax - large2 + 1, flags), huge0,
- "Unexpected xallocx() behavior");
-
- dallocx(p, flags);
-}
-TEST_END
-
TEST_BEGIN(test_extra_huge)
{
int flags = MALLOCX_ARENA(arena_ind());
- size_t largemax, huge1, huge2, huge3, hugemax;
+ size_t smallmax, huge1, huge2, huge3, hugemax;
void *p;
/* Get size classes. */
- largemax = get_large_size(get_nlarge()-1);
+ smallmax = get_small_size(get_nsmall()-1);
huge1 = get_huge_size(1);
huge2 = get_huge_size(2);
huge3 = get_huge_size(3);
@@ -327,7 +246,7 @@ TEST_BEGIN(test_extra_huge)
/* Test size decrease with zero extra. */
assert_zu_ge(xallocx(p, huge1, 0, flags), huge1,
"Unexpected xallocx() behavior");
- assert_zu_ge(xallocx(p, largemax, 0, flags), huge1,
+ assert_zu_ge(xallocx(p, smallmax, 0, flags), huge1,
"Unexpected xallocx() behavior");
assert_zu_eq(xallocx(p, huge3, 0, flags), huge3,
@@ -339,7 +258,7 @@ TEST_BEGIN(test_extra_huge)
"Unexpected xallocx() behavior");
assert_zu_eq(xallocx(p, huge1, huge2 - huge1, flags), huge2,
"Unexpected xallocx() behavior");
- assert_zu_ge(xallocx(p, largemax, huge1 - largemax, flags), huge1,
+ assert_zu_ge(xallocx(p, smallmax, huge1 - smallmax, flags), huge1,
"Unexpected xallocx() behavior");
assert_zu_ge(xallocx(p, huge1, 0, flags), huge1,
@@ -455,18 +374,6 @@ test_zero(size_t szmin, size_t szmax)
dallocx(p, flags);
}
-TEST_BEGIN(test_zero_large)
-{
- size_t large0, largemax;
-
- /* Get size classes. */
- large0 = get_large_size(0);
- largemax = get_large_size(get_nlarge()-1);
-
- test_zero(large0, largemax);
-}
-TEST_END
-
TEST_BEGIN(test_zero_huge)
{
size_t huge0, huge1;
@@ -490,8 +397,6 @@ main(void)
test_size,
test_size_extra_overflow,
test_extra_small,
- test_extra_large,
test_extra_huge,
- test_zero_large,
test_zero_huge));
}
diff --git a/test/unit/arena_reset.c b/test/unit/arena_reset.c
index fa2c5cd..546d3cc 100644
--- a/test/unit/arena_reset.c
+++ b/test/unit/arena_reset.c
@@ -25,13 +25,6 @@ get_nsmall(void)
}
static unsigned
-get_nlarge(void)
-{
-
- return (get_nsizes_impl("arenas.nlruns"));
-}
-
-static unsigned
get_nhuge(void)
{
@@ -65,13 +58,6 @@ get_small_size(size_t ind)
}
static size_t
-get_large_size(size_t ind)
-{
-
- return (get_size_impl("arenas.lrun.0.size", ind));
-}
-
-static size_t
get_huge_size(size_t ind)
{
@@ -90,13 +76,13 @@ vsalloc(tsdn_t *tsdn, const void *ptr)
if (!extent_active_get(extent))
return (0);
- return (isalloc(tsdn, extent, ptr, false));
+ return (isalloc(tsdn, extent, ptr));
}
TEST_BEGIN(test_arena_reset)
{
-#define NHUGE 4
- unsigned arena_ind, nsmall, nlarge, nhuge, nptrs, i;
+#define NHUGE 32
+ unsigned arena_ind, nsmall, nhuge, nptrs, i;
size_t sz, miblen;
void **ptrs;
int flags;
@@ -110,9 +96,8 @@ TEST_BEGIN(test_arena_reset)
flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
nsmall = get_nsmall();
- nlarge = get_nlarge();
nhuge = get_nhuge() > NHUGE ? NHUGE : get_nhuge();
- nptrs = nsmall + nlarge + nhuge;
+ nptrs = nsmall + nhuge;
ptrs = (void **)malloc(nptrs * sizeof(void *));
assert_ptr_not_null(ptrs, "Unexpected malloc() failure");
@@ -123,15 +108,9 @@ TEST_BEGIN(test_arena_reset)
assert_ptr_not_null(ptrs[i],
"Unexpected mallocx(%zu, %#x) failure", sz, flags);
}
- for (i = 0; i < nlarge; i++) {
- sz = get_large_size(i);
- ptrs[nsmall + i] = mallocx(sz, flags);
- assert_ptr_not_null(ptrs[i],
- "Unexpected mallocx(%zu, %#x) failure", sz, flags);
- }
for (i = 0; i < nhuge; i++) {
sz = get_huge_size(i);
- ptrs[nsmall + nlarge + i] = mallocx(sz, flags);
+ ptrs[nsmall + i] = mallocx(sz, flags);
assert_ptr_not_null(ptrs[i],
"Unexpected mallocx(%zu, %#x) failure", sz, flags);
}
@@ -140,7 +119,7 @@ TEST_BEGIN(test_arena_reset)
/* Verify allocations. */
for (i = 0; i < nptrs; i++) {
- assert_zu_gt(ivsalloc(tsdn, ptrs[i], false), 0,
+ assert_zu_gt(ivsalloc(tsdn, ptrs[i]), 0,
"Allocation should have queryable size");
}
diff --git a/test/unit/decay.c b/test/unit/decay.c
index 70a2e67..786cc93 100644
--- a/test/unit/decay.c
+++ b/test/unit/decay.c
@@ -1,6 +1,6 @@
#include "test/jemalloc_test.h"
-const char *malloc_conf = "purge:decay,decay_time:1";
+const char *malloc_conf = "purge:decay,decay_time:1,lg_tcache_max:0";
static nstime_update_t *nstime_update_orig;
@@ -22,7 +22,7 @@ TEST_BEGIN(test_decay_ticks)
{
ticker_t *decay_ticker;
unsigned tick0, tick1;
- size_t sz, huge0, large0;
+ size_t sz, huge0;
void *p;
test_skip_if(opt_purge != purge_mode_decay);
@@ -34,13 +34,11 @@ TEST_BEGIN(test_decay_ticks)
sz = sizeof(size_t);
assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
- assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0,
- "Unexpected mallctl failure");
/*
* Test the standard APIs using a huge size class, since we can't
- * control tcache interactions (except by completely disabling tcache
- * for the entire test program).
+ * control tcache interactions for small size classes (except by
+ * completely disabling tcache for the entire test program).
*/
/* malloc(). */
@@ -101,15 +99,14 @@ TEST_BEGIN(test_decay_ticks)
assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
/*
- * Test the *allocx() APIs using huge, large, and small size classes,
- * with tcache explicitly disabled.
+ * Test the *allocx() APIs using huge and small size classes, with
+ * tcache explicitly disabled.
*/
{
unsigned i;
- size_t allocx_sizes[3];
+ size_t allocx_sizes[2];
allocx_sizes[0] = huge0;
- allocx_sizes[1] = large0;
- allocx_sizes[2] = 1;
+ allocx_sizes[1] = 1;
for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) {
sz = allocx_sizes[i];
@@ -157,13 +154,13 @@ TEST_BEGIN(test_decay_ticks)
}
/*
- * Test tcache fill/flush interactions for large and small size classes,
+ * Test tcache fill/flush interactions for huge and small size classes,
* using an explicit tcache.
*/
if (config_tcache) {
unsigned tcache_ind, i;
size_t tcache_sizes[2];
- tcache_sizes[0] = large0;
+ tcache_sizes[0] = huge0;
tcache_sizes[1] = 1;
sz = sizeof(unsigned);
@@ -204,14 +201,14 @@ TEST_BEGIN(test_decay_ticker)
uint64_t epoch;
uint64_t npurge0 = 0;
uint64_t npurge1 = 0;
- size_t sz, large;
+ size_t sz, huge;
unsigned i, nupdates0;
nstime_t time, decay_time, deadline;
test_skip_if(opt_purge != purge_mode_decay);
/*
- * Allocate a bunch of large objects, pause the clock, deallocate the
+ * Allocate a bunch of huge objects, pause the clock, deallocate the
* objects, restore the clock, then [md]allocx() in a tight loop to
* verify the ticker triggers purging.
*/
@@ -222,11 +219,11 @@ TEST_BEGIN(test_decay_ticker)
sz = sizeof(size_t);
assert_d_eq(mallctl("arenas.tcache_max", &tcache_max, &sz, NULL,
0), 0, "Unexpected mallctl failure");
- large = nallocx(tcache_max + 1, flags);
+ huge = nallocx(tcache_max + 1, flags);
} else {
sz = sizeof(size_t);
- assert_d_eq(mallctl("arenas.lrun.0.size", &large, &sz, NULL, 0),
- 0, "Unexpected mallctl failure");
+ assert_d_eq(mallctl("arenas.hchunk.0.size", &huge, &sz, NULL,
+ 0), 0, "Unexpected mallctl failure");
}
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
@@ -238,7 +235,7 @@ TEST_BEGIN(test_decay_ticker)
config_stats ? 0 : ENOENT, "Unexpected mallctl result");
for (i = 0; i < NPS; i++) {
- ps[i] = mallocx(large, flags);
+ ps[i] = mallocx(huge, flags);
assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
}
@@ -296,13 +293,13 @@ TEST_BEGIN(test_decay_nonmonotonic)
uint64_t epoch;
uint64_t npurge0 = 0;
uint64_t npurge1 = 0;
- size_t sz, large0;
+ size_t sz, huge0;
unsigned i, nupdates0;
test_skip_if(opt_purge != purge_mode_decay);
sz = sizeof(size_t);
- assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0,
+ assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
@@ -322,7 +319,7 @@ TEST_BEGIN(test_decay_nonmonotonic)
nstime_update = nstime_update_mock;
for (i = 0; i < NPS; i++) {
- ps[i] = mallocx(large0, flags);
+ ps[i] = mallocx(huge0, flags);
assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
}
diff --git a/test/unit/extent_quantize.c b/test/unit/extent_quantize.c
index d8846db..98c9fde 100644
--- a/test/unit/extent_quantize.c
+++ b/test/unit/extent_quantize.c
@@ -35,16 +35,16 @@ TEST_BEGIN(test_small_extent_size)
}
TEST_END
-TEST_BEGIN(test_large_extent_size)
+TEST_BEGIN(test_huge_extent_size)
{
bool cache_oblivious;
- unsigned nlruns, i;
+ unsigned nhchunks, i;
size_t sz, extent_size_prev, ceil_prev;
size_t mib[4];
size_t miblen = sizeof(mib) / sizeof(size_t);
/*
- * Iterate over all large size classes, get their extent sizes, and
+ * Iterate over all huge size classes, get their extent sizes, and
* verify that the quantized size is the same as the extent size.
*/
@@ -53,12 +53,12 @@ TEST_BEGIN(test_large_extent_size)
NULL, 0), 0, "Unexpected mallctl failure");
sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.nlruns", &nlruns, &sz, NULL, 0), 0,
+ assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
- assert_d_eq(mallctlnametomib("arenas.lrun.0.size", mib, &miblen), 0,
+ assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0,
"Unexpected mallctlnametomib failure");
- for (i = 0; i < nlruns; i++) {
+ for (i = 0; i < nhchunks; i++) {
size_t lextent_size, extent_size, floor, ceil;
mib[2] = i;
@@ -91,33 +91,24 @@ TEST_BEGIN(test_large_extent_size)
ceil_prev, extent_size);
}
}
- extent_size_prev = floor;
- ceil_prev = extent_size_quantize_ceil(extent_size + PAGE);
+ if (i + 1 < nhchunks) {
+ extent_size_prev = floor;
+ ceil_prev = extent_size_quantize_ceil(extent_size +
+ PAGE);
+ }
}
}
TEST_END
TEST_BEGIN(test_monotonic)
{
- unsigned nbins, nlruns, i;
- size_t sz, floor_prev, ceil_prev;
-
- /*
- * Iterate over all extent sizes and verify that
- * extent_size_quantize_{floor,ceil}() are monotonic.
- */
-
- sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.nbins", &nbins, &sz, NULL, 0), 0,
- "Unexpected mallctl failure");
-
- sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.nlruns", &nlruns, &sz, NULL, 0), 0,
- "Unexpected mallctl failure");
+#define SZ_MAX ZU(4 * 1024 * 1024)
+ unsigned i;
+ size_t floor_prev, ceil_prev;
floor_prev = 0;
ceil_prev = 0;
- for (i = 1; i <= large_maxclass >> LG_PAGE; i++) {
+ for (i = 1; i <= SZ_MAX >> LG_PAGE; i++) {
size_t extent_size, floor, ceil;
extent_size = i << LG_PAGE;
@@ -150,6 +141,6 @@ main(void)
return (test(
test_small_extent_size,
- test_large_extent_size,
+ test_huge_extent_size,
test_monotonic));
}
diff --git a/test/unit/junk.c b/test/unit/junk.c
index 82eddf4..cdf8fb3 100644
--- a/test/unit/junk.c
+++ b/test/unit/junk.c
@@ -9,7 +9,6 @@ const char *malloc_conf =
#endif
static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig;
-static arena_dalloc_junk_large_t *arena_dalloc_junk_large_orig;
static huge_dalloc_junk_t *huge_dalloc_junk_orig;
static void *watch_for_junking;
static bool saw_junking;
@@ -38,25 +37,10 @@ arena_dalloc_junk_small_intercept(void *ptr, const arena_bin_info_t *bin_info)
}
static void
-arena_dalloc_junk_large_intercept(void *ptr, size_t usize)
+huge_dalloc_junk_intercept(void *ptr, size_t usize)
{
- size_t i;
-
- arena_dalloc_junk_large_orig(ptr, usize);
- for (i = 0; i < usize; i++) {
- assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
- "Missing junk fill for byte %zu/%zu of deallocated region",
- i, usize);
- }
- if (ptr == watch_for_junking)
- saw_junking = true;
-}
-static void
-huge_dalloc_junk_intercept(tsdn_t *tsdn, void *ptr, size_t usize)
-{
-
- huge_dalloc_junk_orig(tsdn, ptr, usize);
+ huge_dalloc_junk_orig(ptr, usize);
/*
* The conditions under which junk filling actually occurs are nuanced
* enough that it doesn't make sense to duplicate the decision logic in
@@ -75,8 +59,6 @@ test_junk(size_t sz_min, size_t sz_max)
if (opt_junk_free) {
arena_dalloc_junk_small_orig = arena_dalloc_junk_small;
arena_dalloc_junk_small = arena_dalloc_junk_small_intercept;
- arena_dalloc_junk_large_orig = arena_dalloc_junk_large;
- arena_dalloc_junk_large = arena_dalloc_junk_large_intercept;
huge_dalloc_junk_orig = huge_dalloc_junk;
huge_dalloc_junk = huge_dalloc_junk_intercept;
}
@@ -106,13 +88,18 @@ test_junk(size_t sz_min, size_t sz_max)
}
if (xallocx(s, sz+1, 0, 0) == sz) {
+ uint8_t *t;
watch_junking(s);
- s = (uint8_t *)rallocx(s, sz+1, 0);
- assert_ptr_not_null((void *)s,
+ t = (uint8_t *)rallocx(s, sz+1, 0);
+ assert_ptr_not_null((void *)t,
"Unexpected rallocx() failure");
+ assert_ptr_ne(s, t, "Unexpected in-place rallocx()");
+ assert_zu_ge(sallocx(t, 0), sz+1,
+ "Unexpectedly small rallocx() result");
assert_true(!opt_junk_free || saw_junking,
"Expected region of size %zu to be junk-filled",
sz);
+ s = t;
}
}
@@ -123,7 +110,6 @@ test_junk(size_t sz_min, size_t sz_max)
if (opt_junk_free) {
arena_dalloc_junk_small = arena_dalloc_junk_small_orig;
- arena_dalloc_junk_large = arena_dalloc_junk_large_orig;
huge_dalloc_junk = huge_dalloc_junk_orig;
}
}
@@ -136,64 +122,11 @@ TEST_BEGIN(test_junk_small)
}
TEST_END
-TEST_BEGIN(test_junk_large)
-{
-
- test_skip_if(!config_fill);
- test_junk(SMALL_MAXCLASS+1, large_maxclass);
-}
-TEST_END
-
TEST_BEGIN(test_junk_huge)
{
test_skip_if(!config_fill);
- test_junk(large_maxclass+1, chunksize*2);
-}
-TEST_END
-
-arena_ralloc_junk_large_t *arena_ralloc_junk_large_orig;
-static void *most_recently_trimmed;
-
-static size_t
-shrink_size(size_t size)
-{
- size_t shrink_size;
-
- for (shrink_size = size - 1; nallocx(shrink_size, 0) == size;
- shrink_size--)
- ; /* Do nothing. */
-
- return (shrink_size);
-}
-
-static void
-arena_ralloc_junk_large_intercept(void *ptr, size_t old_usize, size_t usize)
-{
-
- arena_ralloc_junk_large_orig(ptr, old_usize, usize);
- assert_zu_eq(old_usize, large_maxclass, "Unexpected old_usize");
- assert_zu_eq(usize, shrink_size(large_maxclass), "Unexpected usize");
- most_recently_trimmed = ptr;
-}
-
-TEST_BEGIN(test_junk_large_ralloc_shrink)
-{
- void *p1, *p2;
-
- p1 = mallocx(large_maxclass, 0);
- assert_ptr_not_null(p1, "Unexpected mallocx() failure");
-
- arena_ralloc_junk_large_orig = arena_ralloc_junk_large;
- arena_ralloc_junk_large = arena_ralloc_junk_large_intercept;
-
- p2 = rallocx(p1, shrink_size(large_maxclass), 0);
- assert_ptr_eq(p1, p2, "Unexpected move during shrink");
-
- arena_ralloc_junk_large = arena_ralloc_junk_large_orig;
-
- assert_ptr_eq(most_recently_trimmed, p1,
- "Expected trimmed portion of region to be junk-filled");
+ test_junk(SMALL_MAXCLASS+1, chunksize*2);
}
TEST_END
@@ -203,7 +136,5 @@ main(void)
return (test(
test_junk_small,
- test_junk_large,
- test_junk_huge,
- test_junk_large_ralloc_shrink));
+ test_junk_huge));
}
diff --git a/test/unit/mallctl.c b/test/unit/mallctl.c
index 79c5147..9ba730a 100644
--- a/test/unit/mallctl.c
+++ b/test/unit/mallctl.c
@@ -596,8 +596,7 @@ TEST_BEGIN(test_arenas_constants)
TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM);
TEST_ARENAS_CONSTANT(size_t, page, PAGE);
TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS);
- TEST_ARENAS_CONSTANT(unsigned, nlruns, nlclasses);
- TEST_ARENAS_CONSTANT(unsigned, nhchunks, nhclasses);
+ TEST_ARENAS_CONSTANT(unsigned, nhchunks, NSIZES - NBINS);
#undef TEST_ARENAS_CONSTANT
}
@@ -622,23 +621,6 @@ TEST_BEGIN(test_arenas_bin_constants)
}
TEST_END
-TEST_BEGIN(test_arenas_lrun_constants)
-{
-
-#define TEST_ARENAS_LRUN_CONSTANT(t, name, expected) do { \
- t name; \
- size_t sz = sizeof(t); \
- assert_d_eq(mallctl("arenas.lrun.0."#name, &name, &sz, NULL, \
- 0), 0, "Unexpected mallctl() failure"); \
- assert_zu_eq(name, expected, "Incorrect "#name" size"); \
-} while (0)
-
- TEST_ARENAS_LRUN_CONSTANT(size_t, size, LARGE_MINCLASS);
-
-#undef TEST_ARENAS_LRUN_CONSTANT
-}
-TEST_END
-
TEST_BEGIN(test_arenas_hchunk_constants)
{
@@ -650,7 +632,7 @@ TEST_BEGIN(test_arenas_hchunk_constants)
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
- TEST_ARENAS_HCHUNK_CONSTANT(size_t, size, chunksize);
+ TEST_ARENAS_HCHUNK_CONSTANT(size_t, size, LARGE_MINCLASS);
#undef TEST_ARENAS_HCHUNK_CONSTANT
}
@@ -721,7 +703,6 @@ main(void)
test_arenas_decay_time,
test_arenas_constants,
test_arenas_bin_constants,
- test_arenas_lrun_constants,
test_arenas_hchunk_constants,
test_arenas_extend,
test_stats_arenas));
diff --git a/test/unit/prof_idump.c b/test/unit/prof_idump.c
index bdea53e..2b0639d 100644
--- a/test/unit/prof_idump.c
+++ b/test/unit/prof_idump.c
@@ -1,10 +1,17 @@
#include "test/jemalloc_test.h"
+const char *malloc_conf = ""
#ifdef JEMALLOC_PROF
-const char *malloc_conf =
- "prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0,"
- "lg_prof_interval:0";
+ "prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0"
+ ",lg_prof_interval:0"
+# ifdef JEMALLOC_TCACHE
+ ","
+# endif
#endif
+#ifdef JEMALLOC_TCACHE
+ "tcache:false"
+#endif
+ ;
static bool did_prof_dump_open;
diff --git a/test/unit/run_quantize.c b/test/unit/run_quantize.c
deleted file mode 100644
index 45f3201..0000000
--- a/test/unit/run_quantize.c
+++ /dev/null
@@ -1,149 +0,0 @@
-#include "test/jemalloc_test.h"
-
-TEST_BEGIN(test_small_run_size)
-{
- unsigned nbins, i;
- size_t sz, run_size;
- size_t mib[4];
- size_t miblen = sizeof(mib) / sizeof(size_t);
-
- /*
- * Iterate over all small size classes, get their run sizes, and verify
- * that the quantized size is the same as the run size.
- */
-
- sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.nbins", &nbins, &sz, NULL, 0), 0,
- "Unexpected mallctl failure");
-
- assert_d_eq(mallctlnametomib("arenas.bin.0.run_size", mib, &miblen), 0,
- "Unexpected mallctlnametomib failure");
- for (i = 0; i < nbins; i++) {
- mib[2] = i;
- sz = sizeof(size_t);
- assert_d_eq(mallctlbymib(mib, miblen, &run_size, &sz, NULL, 0),
- 0, "Unexpected mallctlbymib failure");
- assert_zu_eq(run_size, run_quantize_floor(run_size),
- "Small run quantization should be a no-op (run_size=%zu)",
- run_size);
- assert_zu_eq(run_size, run_quantize_ceil(run_size),
- "Small run quantization should be a no-op (run_size=%zu)",
- run_size);
- }
-}
-TEST_END
-
-TEST_BEGIN(test_large_run_size)
-{
- bool cache_oblivious;
- unsigned nlruns, i;
- size_t sz, run_size_prev, ceil_prev;
- size_t mib[4];
- size_t miblen = sizeof(mib) / sizeof(size_t);
-
- /*
- * Iterate over all large size classes, get their run sizes, and verify
- * that the quantized size is the same as the run size.
- */
-
- sz = sizeof(bool);
- assert_d_eq(mallctl("config.cache_oblivious", &cache_oblivious, &sz,
- NULL, 0), 0, "Unexpected mallctl failure");
-
- sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.nlruns", &nlruns, &sz, NULL, 0), 0,
- "Unexpected mallctl failure");
-
- assert_d_eq(mallctlnametomib("arenas.lrun.0.size", mib, &miblen), 0,
- "Unexpected mallctlnametomib failure");
- for (i = 0; i < nlruns; i++) {
- size_t lrun_size, run_size, floor, ceil;
-
- mib[2] = i;
- sz = sizeof(size_t);
- assert_d_eq(mallctlbymib(mib, miblen, &lrun_size, &sz, NULL, 0),
- 0, "Unexpected mallctlbymib failure");
- run_size = cache_oblivious ? lrun_size + PAGE : lrun_size;
- floor = run_quantize_floor(run_size);
- ceil = run_quantize_ceil(run_size);
-
- assert_zu_eq(run_size, floor,
- "Large run quantization should be a no-op for precise "
- "size (lrun_size=%zu, run_size=%zu)", lrun_size, run_size);
- assert_zu_eq(run_size, ceil,
- "Large run quantization should be a no-op for precise "
- "size (lrun_size=%zu, run_size=%zu)", lrun_size, run_size);
-
- if (i > 0) {
- assert_zu_eq(run_size_prev, run_quantize_floor(run_size
- - PAGE), "Floor should be a precise size");
- if (run_size_prev < ceil_prev) {
- assert_zu_eq(ceil_prev, run_size,
- "Ceiling should be a precise size "
- "(run_size_prev=%zu, ceil_prev=%zu, "
- "run_size=%zu)", run_size_prev, ceil_prev,
- run_size);
- }
- }
- run_size_prev = floor;
- ceil_prev = run_quantize_ceil(run_size + PAGE);
- }
-}
-TEST_END
-
-TEST_BEGIN(test_monotonic)
-{
- unsigned nbins, nlruns, i;
- size_t sz, floor_prev, ceil_prev;
-
- /*
- * Iterate over all run sizes and verify that
- * run_quantize_{floor,ceil}() are monotonic.
- */
-
- sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.nbins", &nbins, &sz, NULL, 0), 0,
- "Unexpected mallctl failure");
-
- sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.nlruns", &nlruns, &sz, NULL, 0), 0,
- "Unexpected mallctl failure");
-
- floor_prev = 0;
- ceil_prev = 0;
- for (i = 1; i <= large_maxclass >> LG_PAGE; i++) {
- size_t run_size, floor, ceil;
-
- run_size = i << LG_PAGE;
- floor = run_quantize_floor(run_size);
- ceil = run_quantize_ceil(run_size);
-
- assert_zu_le(floor, run_size,
- "Floor should be <= (floor=%zu, run_size=%zu, ceil=%zu)",
- floor, run_size, ceil);
- assert_zu_ge(ceil, run_size,
- "Ceiling should be >= (floor=%zu, run_size=%zu, ceil=%zu)",
- floor, run_size, ceil);
-
- assert_zu_le(floor_prev, floor, "Floor should be monotonic "
- "(floor_prev=%zu, floor=%zu, run_size=%zu, ceil=%zu)",
- floor_prev, floor, run_size, ceil);
- assert_zu_le(ceil_prev, ceil, "Ceiling should be monotonic "
- "(floor=%zu, run_size=%zu, ceil_prev=%zu, ceil=%zu)",
- floor, run_size, ceil_prev, ceil);
-
- floor_prev = floor;
- ceil_prev = ceil;
- }
-}
-TEST_END
-
-int
-main(void)
-{
-
- return (test(
- test_small_run_size,
- test_large_run_size,
- test_monotonic));
-}
diff --git a/test/unit/stats.c b/test/unit/stats.c
index a9a3981..b0e318a 100644
--- a/test/unit/stats.c
+++ b/test/unit/stats.c
@@ -42,7 +42,7 @@ TEST_BEGIN(test_stats_huge)
size_t sz;
int expected = config_stats ? 0 : ENOENT;
- p = mallocx(large_maxclass+1, 0);
+ p = mallocx(SMALL_MAXCLASS+1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
@@ -75,7 +75,7 @@ TEST_END
TEST_BEGIN(test_stats_arenas_summary)
{
unsigned arena;
- void *little, *large, *huge;
+ void *little, *huge;
uint64_t epoch;
size_t sz;
int expected = config_stats ? 0 : ENOENT;
@@ -88,13 +88,10 @@ TEST_BEGIN(test_stats_arenas_summary)
little = mallocx(SMALL_MAXCLASS, 0);
assert_ptr_not_null(little, "Unexpected mallocx() failure");
- large = mallocx(large_maxclass, 0);
- assert_ptr_not_null(large, "Unexpected mallocx() failure");
huge = mallocx(chunksize, 0);
assert_ptr_not_null(huge, "Unexpected mallocx() failure");
dallocx(little, 0);
- dallocx(large, 0);
dallocx(huge, 0);
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
@@ -188,50 +185,6 @@ TEST_BEGIN(test_stats_arenas_small)
}
TEST_END
-TEST_BEGIN(test_stats_arenas_large)
-{
- unsigned arena;
- void *p;
- size_t sz, allocated;
- uint64_t epoch, nmalloc, ndalloc, nrequests;
- int expected = config_stats ? 0 : ENOENT;
-
- arena = 0;
- assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
- 0, "Unexpected mallctl() failure");
-
- p = mallocx(large_maxclass, 0);
- assert_ptr_not_null(p, "Unexpected mallocx() failure");
-
- assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
- "Unexpected mallctl() failure");
-
- sz = sizeof(size_t);
- assert_d_eq(mallctl("stats.arenas.0.large.allocated", &allocated, &sz,
- NULL, 0), expected, "Unexpected mallctl() result");
- sz = sizeof(uint64_t);
- assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", &nmalloc, &sz,
- NULL, 0), expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", &ndalloc, &sz,
- NULL, 0), expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.large.nrequests", &nrequests, &sz,
- NULL, 0), expected, "Unexpected mallctl() result");
-
- if (config_stats) {
- assert_zu_gt(allocated, 0,
- "allocated should be greater than zero");
- assert_u64_gt(nmalloc, 0,
- "nmalloc should be greater than zero");
- assert_u64_ge(nmalloc, ndalloc,
- "nmalloc should be at least as large as ndalloc");
- assert_u64_gt(nrequests, 0,
- "nrequests should be greater than zero");
- }
-
- dallocx(p, 0);
-}
-TEST_END
-
TEST_BEGIN(test_stats_arenas_huge)
{
unsigned arena;
@@ -346,63 +299,23 @@ TEST_BEGIN(test_stats_arenas_bins)
}
TEST_END
-TEST_BEGIN(test_stats_arenas_lruns)
-{
- unsigned arena;
- void *p;
- uint64_t epoch, nmalloc, ndalloc, nrequests;
- size_t curruns, sz;
- int expected = config_stats ? 0 : ENOENT;
-
- arena = 0;
- assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
- 0, "Unexpected mallctl() failure");
-
- p = mallocx(LARGE_MINCLASS, 0);
- assert_ptr_not_null(p, "Unexpected mallocx() failure");
-
- assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
- "Unexpected mallctl() failure");
-
- sz = sizeof(uint64_t);
- assert_d_eq(mallctl("stats.arenas.0.lruns.0.nmalloc", &nmalloc, &sz,
- NULL, 0), expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.lruns.0.ndalloc", &ndalloc, &sz,
- NULL, 0), expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.lruns.0.nrequests", &nrequests, &sz,
- NULL, 0), expected, "Unexpected mallctl() result");
- sz = sizeof(size_t);
- assert_d_eq(mallctl("stats.arenas.0.lruns.0.curruns", &curruns, &sz,
- NULL, 0), expected, "Unexpected mallctl() result");
-
- if (config_stats) {
- assert_u64_gt(nmalloc, 0,
- "nmalloc should be greater than zero");
- assert_u64_ge(nmalloc, ndalloc,
- "nmalloc should be at least as large as ndalloc");
- assert_u64_gt(nrequests, 0,
- "nrequests should be greater than zero");
- assert_u64_gt(curruns, 0,
- "At least one run should be currently allocated");
- }
-
- dallocx(p, 0);
-}
-TEST_END
-
TEST_BEGIN(test_stats_arenas_hchunks)
{
unsigned arena;
void *p;
uint64_t epoch, nmalloc, ndalloc;
- size_t curhchunks, sz;
+ size_t curhchunks, sz, hsize;
int expected = config_stats ? 0 : ENOENT;
arena = 0;
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure");
- p = mallocx(chunksize, 0);
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("arenas.hchunk.0.size", &hsize, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ p = mallocx(hsize, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
@@ -439,9 +352,7 @@ main(void)
test_stats_huge,
test_stats_arenas_summary,
test_stats_arenas_small,
- test_stats_arenas_large,
test_stats_arenas_huge,
test_stats_arenas_bins,
- test_stats_arenas_lruns,
test_stats_arenas_hchunks));
}
diff --git a/test/unit/zero.c b/test/unit/zero.c
index 123f0e0..2da288a 100644
--- a/test/unit/zero.c
+++ b/test/unit/zero.c
@@ -53,19 +53,11 @@ TEST_BEGIN(test_zero_small)
}
TEST_END
-TEST_BEGIN(test_zero_large)
-{
-
- test_skip_if(!config_fill);
- test_zero(SMALL_MAXCLASS+1, large_maxclass);
-}
-TEST_END
-
TEST_BEGIN(test_zero_huge)
{
test_skip_if(!config_fill);
- test_zero(large_maxclass+1, chunksize*2);
+ test_zero(SMALL_MAXCLASS+1, chunksize*2);
}
TEST_END
@@ -75,6 +67,5 @@ main(void)
return (test(
test_zero_small,
- test_zero_large,
test_zero_huge));
}