summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/jemalloc/internal/arena.h148
-rw-r--r--include/jemalloc/internal/huge.h8
-rw-r--r--include/jemalloc/internal/jemalloc_internal.h.in34
-rw-r--r--include/jemalloc/internal/private_symbols.txt7
-rw-r--r--include/jemalloc/internal/prof.h58
-rwxr-xr-xinclude/jemalloc/internal/size_classes.sh5
-rw-r--r--include/jemalloc/internal/tcache.h18
-rw-r--r--include/jemalloc/jemalloc_protos.h.in2
8 files changed, 194 insertions, 86 deletions
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index cb015ee..12c6179 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -39,7 +39,7 @@ typedef struct arena_s arena_t;
#ifdef JEMALLOC_ARENA_STRUCTS_A
struct arena_run_s {
/* Index of bin this run is associated with. */
- index_t binind;
+ szind_t binind;
/* Number of free regions in run. */
unsigned nfree;
@@ -424,7 +424,7 @@ extern arena_bin_info_t arena_bin_info[NBINS];
extern size_t map_bias; /* Number of arena chunk header pages. */
extern size_t map_misc_offset;
extern size_t arena_maxrun; /* Max run size for arenas. */
-extern size_t arena_maxclass; /* Max size class for arenas. */
+extern size_t large_maxclass; /* Max large size class. */
extern unsigned nlclasses; /* Number of large size classes. */
extern unsigned nhclasses; /* Number of huge size classes. */
@@ -448,7 +448,7 @@ bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult);
void arena_maybe_purge(arena_t *arena);
void arena_purge_all(arena_t *arena);
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
- index_t binind, uint64_t prof_accumbytes);
+ szind_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
bool zero);
#ifdef JEMALLOC_JET
@@ -488,7 +488,7 @@ extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
- size_t size, size_t extra, size_t alignment, bool zero, tcache_t *tcache);
+ size_t size, size_t alignment, bool zero, tcache_t *tcache);
dss_prec_t arena_dss_prec_get(arena_t *arena);
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
ssize_t arena_lg_dirty_mult_default_get(void);
@@ -519,17 +519,19 @@ arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run);
size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbitsp_read(size_t *mapbitsp);
size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_size_decode(size_t mapbits);
size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
size_t pageind);
size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
-index_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
+szind_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
+size_t arena_mapbits_size_encode(size_t size);
void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
size_t size, size_t flags);
void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
@@ -539,21 +541,23 @@ void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind,
void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
size_t size, size_t flags);
void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
- index_t binind);
+ szind_t binind);
void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
- size_t runind, index_t binind, size_t flags);
+ size_t runind, szind_t binind, size_t flags);
void arena_metadata_allocated_add(arena_t *arena, size_t size);
void arena_metadata_allocated_sub(arena_t *arena, size_t size);
size_t arena_metadata_allocated_get(arena_t *arena);
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
-index_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
-index_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
+szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
+szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
const void *ptr);
prof_tctx_t *arena_prof_tctx_get(const void *ptr);
-void arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
+void arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
+void arena_prof_tctx_reset(const void *ptr, size_t usize,
+ const void *old_ptr, prof_tctx_t *old_tctx);
void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
tcache_t *tcache);
arena_t *arena_aalloc(const void *ptr);
@@ -653,13 +657,29 @@ arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
+arena_mapbits_size_decode(size_t mapbits)
+{
+ size_t size;
+
+#if CHUNK_MAP_SIZE_SHIFT > 0
+ size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT;
+#elif CHUNK_MAP_SIZE_SHIFT == 0
+ size = mapbits & CHUNK_MAP_SIZE_MASK;
+#else
+ size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT;
+#endif
+
+ return (size);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
- return ((mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT);
+ return (arena_mapbits_size_decode(mapbits));
}
JEMALLOC_ALWAYS_INLINE size_t
@@ -670,7 +690,7 @@ arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
(CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
- return ((mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT);
+ return (arena_mapbits_size_decode(mapbits));
}
JEMALLOC_ALWAYS_INLINE size_t
@@ -684,11 +704,11 @@ arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
return (mapbits >> CHUNK_MAP_RUNIND_SHIFT);
}
-JEMALLOC_ALWAYS_INLINE index_t
+JEMALLOC_ALWAYS_INLINE szind_t
arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
- index_t binind;
+ szind_t binind;
mapbits = arena_mapbits_get(chunk, pageind);
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
@@ -754,6 +774,23 @@ arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
*mapbitsp = mapbits;
}
+JEMALLOC_ALWAYS_INLINE size_t
+arena_mapbits_size_encode(size_t size)
+{
+ size_t mapbits;
+
+#if CHUNK_MAP_SIZE_SHIFT > 0
+ mapbits = size << CHUNK_MAP_SIZE_SHIFT;
+#elif CHUNK_MAP_SIZE_SHIFT == 0
+ mapbits = size;
+#else
+ mapbits = size >> -CHUNK_MAP_SIZE_SHIFT;
+#endif
+
+ assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0);
+ return (mapbits);
+}
+
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
@@ -761,11 +798,10 @@ arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
- assert(((size << CHUNK_MAP_SIZE_SHIFT) & ~CHUNK_MAP_SIZE_MASK) == 0);
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
- arena_mapbitsp_write(mapbitsp, (size << CHUNK_MAP_SIZE_SHIFT) |
+ arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
CHUNK_MAP_BININD_INVALID | flags);
}
@@ -777,10 +813,9 @@ arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert((size & PAGE_MASK) == 0);
- assert(((size << CHUNK_MAP_SIZE_SHIFT) & ~CHUNK_MAP_SIZE_MASK) == 0);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
- arena_mapbitsp_write(mapbitsp, (size << CHUNK_MAP_SIZE_SHIFT) | (mapbits
- & ~CHUNK_MAP_SIZE_MASK));
+ arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
+ (mapbits & ~CHUNK_MAP_SIZE_MASK));
}
JEMALLOC_ALWAYS_INLINE void
@@ -799,18 +834,17 @@ arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
- assert(((size << CHUNK_MAP_SIZE_SHIFT) & ~CHUNK_MAP_SIZE_MASK) == 0);
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
- arena_mapbitsp_write(mapbitsp, (size << CHUNK_MAP_SIZE_SHIFT) |
+ arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE |
CHUNK_MAP_ALLOCATED);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
- index_t binind)
+ szind_t binind)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
@@ -824,7 +858,7 @@ arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
- index_t binind, size_t flags)
+ szind_t binind, size_t flags)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
@@ -901,10 +935,10 @@ arena_prof_accum(arena_t *arena, uint64_t accumbytes)
}
}
-JEMALLOC_ALWAYS_INLINE index_t
+JEMALLOC_ALWAYS_INLINE szind_t
arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
{
- index_t binind;
+ szind_t binind;
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
@@ -916,7 +950,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
size_t rpages_ind;
arena_run_t *run;
arena_bin_t *bin;
- index_t run_binind, actual_binind;
+ szind_t run_binind, actual_binind;
arena_bin_info_t *bin_info;
arena_chunk_map_misc_t *miscelm;
void *rpages;
@@ -950,10 +984,10 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
# endif /* JEMALLOC_ARENA_INLINE_A */
# ifdef JEMALLOC_ARENA_INLINE_B
-JEMALLOC_INLINE index_t
+JEMALLOC_INLINE szind_t
arena_bin_index(arena_t *arena, arena_bin_t *bin)
{
- index_t binind = bin - arena->bins;
+ szind_t binind = bin - arena->bins;
assert(binind < NBINS);
return (binind);
}
@@ -1060,7 +1094,7 @@ arena_prof_tctx_get(const void *ptr)
}
JEMALLOC_INLINE void
-arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
+arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
{
arena_chunk_t *chunk;
@@ -1070,17 +1104,59 @@ arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr)) {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
- if (unlikely(arena_mapbits_large_get(chunk, pageind) != 0)) {
- arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk,
- pageind);
+ if (unlikely(usize > SMALL_MAXCLASS || (uintptr_t)tctx >
+ (uintptr_t)1U)) {
+ arena_chunk_map_misc_t *elm;
+
+ assert(arena_mapbits_large_get(chunk, pageind) != 0);
+
+ elm = arena_miscelm_get(chunk, pageind);
atomic_write_p(&elm->prof_tctx_pun, tctx);
+ } else {
+ /*
+ * tctx must always be initialized for large runs.
+ * Assert that the surrounding conditional logic is
+ * equivalent to checking whether ptr refers to a large
+ * run.
+ */
+ assert(arena_mapbits_large_get(chunk, pageind) == 0);
}
} else
huge_prof_tctx_set(ptr, tctx);
}
+JEMALLOC_INLINE void
+arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
+ prof_tctx_t *old_tctx)
+{
+
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr &&
+ (uintptr_t)old_tctx > (uintptr_t)1U))) {
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (likely(chunk != ptr)) {
+ size_t pageind;
+ arena_chunk_map_misc_t *elm;
+
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
+ LG_PAGE;
+ assert(arena_mapbits_allocated_get(chunk, pageind) !=
+ 0);
+ assert(arena_mapbits_large_get(chunk, pageind) != 0);
+
+ elm = arena_miscelm_get(chunk, pageind);
+ atomic_write_p(&elm->prof_tctx_pun,
+ (prof_tctx_t *)(uintptr_t)1U);
+ } else
+ huge_prof_tctx_reset(ptr);
+ }
+}
+
JEMALLOC_ALWAYS_INLINE void *
arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
tcache_t *tcache)
@@ -1098,7 +1174,7 @@ arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
zero));
} else
return (arena_malloc_small(arena, size, zero));
- } else if (likely(size <= arena_maxclass)) {
+ } else if (likely(size <= large_maxclass)) {
/*
* Initialize tcache after checking size in order to avoid
* infinite recursion during tcache initialization.
@@ -1131,7 +1207,7 @@ arena_salloc(const void *ptr, bool demote)
size_t ret;
arena_chunk_t *chunk;
size_t pageind;
- index_t binind;
+ szind_t binind;
assert(ptr != NULL);
@@ -1190,7 +1266,7 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
/* Small allocation. */
if (likely(tcache != NULL)) {
- index_t binind = arena_ptr_small_binind_get(ptr,
+ szind_t binind = arena_ptr_small_binind_get(ptr,
mapbits);
tcache_dalloc_small(tsd, tcache, ptr, binind);
} else {
@@ -1242,7 +1318,7 @@ arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
if (likely(size <= SMALL_MAXCLASS)) {
/* Small allocation. */
if (likely(tcache != NULL)) {
- index_t binind = size2index(size);
+ szind_t binind = size2index(size);
tcache_dalloc_small(tsd, tcache, ptr, binind);
} else {
size_t pageind = ((uintptr_t)ptr -
diff --git a/include/jemalloc/internal/huge.h b/include/jemalloc/internal/huge.h
index 8b6c6ce..ece7af9 100644
--- a/include/jemalloc/internal/huge.h
+++ b/include/jemalloc/internal/huge.h
@@ -13,11 +13,10 @@ void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
tcache_t *tcache);
void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
bool zero, tcache_t *tcache);
-bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
- size_t extra, bool zero);
+bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
+ size_t usize_max, bool zero);
void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
- size_t size, size_t extra, size_t alignment, bool zero,
- tcache_t *tcache);
+ size_t usize, size_t alignment, bool zero, tcache_t *tcache);
#ifdef JEMALLOC_JET
typedef void (huge_dalloc_junk_t)(void *, size_t);
extern huge_dalloc_junk_t *huge_dalloc_junk;
@@ -27,6 +26,7 @@ arena_t *huge_aalloc(const void *ptr);
size_t huge_salloc(const void *ptr);
prof_tctx_t *huge_prof_tctx_get(const void *ptr);
void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
+void huge_prof_tctx_reset(const void *ptr);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index 7a137b6..8536a3e 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -184,7 +184,7 @@ static const bool config_cache_oblivious =
#include "jemalloc/internal/jemalloc_internal_macros.h"
/* Size class index type. */
-typedef unsigned index_t;
+typedef unsigned szind_t;
/*
* Flags bits:
@@ -232,7 +232,7 @@ typedef unsigned index_t;
# ifdef __alpha__
# define LG_QUANTUM 4
# endif
-# ifdef __sparc64__
+# if (defined(__sparc64__) || defined(__sparcv9))
# define LG_QUANTUM 4
# endif
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
@@ -511,12 +511,12 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/huge.h"
#ifndef JEMALLOC_ENABLE_INLINE
-index_t size2index_compute(size_t size);
-index_t size2index_lookup(size_t size);
-index_t size2index(size_t size);
-size_t index2size_compute(index_t index);
-size_t index2size_lookup(index_t index);
-size_t index2size(index_t index);
+szind_t size2index_compute(size_t size);
+szind_t size2index_lookup(size_t size);
+szind_t size2index(size_t size);
+size_t index2size_compute(szind_t index);
+size_t index2size_lookup(szind_t index);
+size_t index2size(szind_t index);
size_t s2u_compute(size_t size);
size_t s2u_lookup(size_t size);
size_t s2u(size_t size);
@@ -527,7 +527,7 @@ arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
-JEMALLOC_INLINE index_t
+JEMALLOC_INLINE szind_t
size2index_compute(size_t size)
{
@@ -558,7 +558,7 @@ size2index_compute(size_t size)
}
}
-JEMALLOC_ALWAYS_INLINE index_t
+JEMALLOC_ALWAYS_INLINE szind_t
size2index_lookup(size_t size)
{
@@ -571,7 +571,7 @@ size2index_lookup(size_t size)
}
}
-JEMALLOC_ALWAYS_INLINE index_t
+JEMALLOC_ALWAYS_INLINE szind_t
size2index(size_t size)
{
@@ -582,7 +582,7 @@ size2index(size_t size)
}
JEMALLOC_INLINE size_t
-index2size_compute(index_t index)
+index2size_compute(szind_t index)
{
#if (NTBINS > 0)
@@ -609,7 +609,7 @@ index2size_compute(index_t index)
}
JEMALLOC_ALWAYS_INLINE size_t
-index2size_lookup(index_t index)
+index2size_lookup(szind_t index)
{
size_t ret = (size_t)index2size_tab[index];
assert(ret == index2size_compute(index));
@@ -617,7 +617,7 @@ index2size_lookup(index_t index)
}
JEMALLOC_ALWAYS_INLINE size_t
-index2size(index_t index)
+index2size(szind_t index)
{
assert(index < NSIZES);
@@ -705,7 +705,7 @@ sa2u(size_t size, size_t alignment)
}
/* Try for a large size class. */
- if (likely(size <= arena_maxclass) && likely(alignment < chunksize)) {
+ if (likely(size <= large_maxclass) && likely(alignment < chunksize)) {
/*
* We can't achieve subpage alignment, so round up alignment
* to the minimum that can actually be supported.
@@ -976,7 +976,7 @@ u2rz(size_t usize)
size_t ret;
if (usize <= SMALL_MAXCLASS) {
- index_t binind = size2index(usize);
+ szind_t binind = size2index(usize);
ret = arena_bin_info[binind].redzone_size;
} else
ret = 0;
@@ -1096,7 +1096,7 @@ iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
zero, tcache, arena));
}
- return (arena_ralloc(tsd, arena, ptr, oldsize, size, 0, alignment, zero,
+ return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero,
tcache));
}
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index dbf6aa7..a90021a 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -50,13 +50,14 @@ arena_mapbits_large_size_get
arena_mapbitsp_get
arena_mapbitsp_read
arena_mapbitsp_write
+arena_mapbits_size_decode
+arena_mapbits_size_encode
arena_mapbits_small_runind_get
arena_mapbits_small_set
arena_mapbits_unallocated_set
arena_mapbits_unallocated_size_get
arena_mapbits_unallocated_size_set
arena_mapbits_unzeroed_get
-arena_maxclass
arena_maxrun
arena_maybe_purge
arena_metadata_allocated_add
@@ -79,6 +80,7 @@ arena_prof_accum_impl
arena_prof_accum_locked
arena_prof_promoted
arena_prof_tctx_get
+arena_prof_tctx_reset
arena_prof_tctx_set
arena_ptr_small_binind_get
arena_purge_all
@@ -249,6 +251,7 @@ huge_dalloc_junk
huge_malloc
huge_palloc
huge_prof_tctx_get
+huge_prof_tctx_reset
huge_prof_tctx_set
huge_ralloc
huge_ralloc_no_move
@@ -283,6 +286,7 @@ ixalloc
jemalloc_postfork_child
jemalloc_postfork_parent
jemalloc_prefork
+large_maxclass
lg_floor
malloc_cprintf
malloc_mutex_init
@@ -377,6 +381,7 @@ prof_reset
prof_sample_accum_update
prof_sample_threshold_update
prof_tctx_get
+prof_tctx_reset
prof_tctx_set
prof_tdata_cleanup
prof_tdata_get
diff --git a/include/jemalloc/internal/prof.h b/include/jemalloc/internal/prof.h
index 2e22711..e5198c3 100644
--- a/include/jemalloc/internal/prof.h
+++ b/include/jemalloc/internal/prof.h
@@ -90,10 +90,11 @@ struct prof_tctx_s {
prof_tdata_t *tdata;
/*
- * Copy of tdata->thr_uid, necessary because tdata may be defunct during
- * teardown.
+ * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
+ * defunct during teardown.
*/
uint64_t thr_uid;
+ uint64_t thr_discrim;
/* Profiling counters, protected by tdata->lock. */
prof_cnt_t cnts;
@@ -330,14 +331,18 @@ bool prof_gdump_get_unlocked(void);
prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
prof_tdata_t **tdata_out);
-prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool update);
+prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
+ bool update);
prof_tctx_t *prof_tctx_get(const void *ptr);
-void prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
+void prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
+void prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
+ prof_tctx_t *tctx);
void prof_malloc_sample_object(const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx);
void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
- prof_tctx_t *tctx, bool updated, size_t old_usize, prof_tctx_t *old_tctx);
+ prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr,
+ size_t old_usize, prof_tctx_t *old_tctx);
void prof_free(tsd_t *tsd, const void *ptr, size_t usize);
#endif
@@ -402,13 +407,24 @@ prof_tctx_get(const void *ptr)
}
JEMALLOC_ALWAYS_INLINE void
-prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
+prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
- arena_prof_tctx_set(ptr, tctx);
+ arena_prof_tctx_set(ptr, usize, tctx);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
+ prof_tctx_t *old_tctx)
+{
+
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ arena_prof_tctx_reset(ptr, usize, old_ptr, old_tctx);
}
JEMALLOC_ALWAYS_INLINE bool
@@ -442,7 +458,7 @@ prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
-prof_alloc_prep(tsd_t *tsd, size_t usize, bool update)
+prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
{
prof_tctx_t *ret;
prof_tdata_t *tdata;
@@ -450,8 +466,8 @@ prof_alloc_prep(tsd_t *tsd, size_t usize, bool update)
assert(usize == s2u(usize));
- if (!prof_active_get_unlocked() || likely(prof_sample_accum_update(tsd,
- usize, update, &tdata)))
+ if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
+ &tdata)))
ret = (prof_tctx_t *)(uintptr_t)1U;
else {
bt_init(&bt, tdata->vec);
@@ -473,22 +489,24 @@ prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx)
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_malloc_sample_object(ptr, usize, tctx);
else
- prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
+ prof_tctx_set(ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
}
JEMALLOC_ALWAYS_INLINE void
prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
- bool updated, size_t old_usize, prof_tctx_t *old_tctx)
+ bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
+ prof_tctx_t *old_tctx)
{
+ bool sampled, old_sampled;
cassert(config_prof);
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
- if (!updated && ptr != NULL) {
+ if (prof_active && !updated && ptr != NULL) {
assert(usize == isalloc(ptr, true));
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
/*
- * Don't sample. The usize passed to PROF_ALLOC_PREP()
+ * Don't sample. The usize passed to prof_alloc_prep()
* was larger than what actually got allocated, so a
* backtrace was captured for this allocation, even
* though its actual usize was insufficient to cross the
@@ -498,12 +516,16 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
}
}
- if (unlikely((uintptr_t)old_tctx > (uintptr_t)1U))
- prof_free_sampled_object(tsd, old_usize, old_tctx);
- if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
+ sampled = ((uintptr_t)tctx > (uintptr_t)1U);
+ old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
+
+ if (unlikely(sampled))
prof_malloc_sample_object(ptr, usize, tctx);
else
- prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
+ prof_tctx_reset(ptr, usize, old_ptr, old_tctx);
+
+ if (unlikely(old_sampled))
+ prof_free_sampled_object(tsd, old_usize, old_tctx);
}
JEMALLOC_ALWAYS_INLINE void
diff --git a/include/jemalloc/internal/size_classes.sh b/include/jemalloc/internal/size_classes.sh
index 1c2d681..fc82036 100755
--- a/include/jemalloc/internal/size_classes.sh
+++ b/include/jemalloc/internal/size_classes.sh
@@ -167,6 +167,8 @@ size_classes() {
lg_large_minclass=$((${lg_grp} + 2))
fi
fi
+ # Final written value is correct:
+ huge_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
index=$((${index} + 1))
ndelta=$((${ndelta} + 1))
done
@@ -185,6 +187,7 @@ size_classes() {
# - lookup_maxclass
# - small_maxclass
# - lg_large_minclass
+ # - huge_maxclass
}
cat <<EOF
@@ -215,6 +218,7 @@ cat <<EOF
* LOOKUP_MAXCLASS: Maximum size class included in lookup table.
* SMALL_MAXCLASS: Maximum small size class.
* LG_LARGE_MINCLASS: Lg of minimum large size class.
+ * HUGE_MAXCLASS: Maximum (huge) size class.
*/
#define LG_SIZE_CLASS_GROUP ${lg_g}
@@ -238,6 +242,7 @@ for lg_z in ${lg_zarr} ; do
echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}"
echo "#define SMALL_MAXCLASS ${small_maxclass}"
echo "#define LG_LARGE_MINCLASS ${lg_large_minclass}"
+ echo "#define HUGE_MAXCLASS ${huge_maxclass}"
echo "#endif"
echo
done
diff --git a/include/jemalloc/internal/tcache.h b/include/jemalloc/internal/tcache.h
index 493f457..5079cd2 100644
--- a/include/jemalloc/internal/tcache.h
+++ b/include/jemalloc/internal/tcache.h
@@ -77,7 +77,7 @@ struct tcache_s {
ql_elm(tcache_t) link; /* Used for aggregating stats. */
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */
unsigned ev_cnt; /* Event count since incremental GC. */
- index_t next_gc_bin; /* Next bin to GC. */
+ szind_t next_gc_bin; /* Next bin to GC. */
tcache_bin_t tbins[1]; /* Dynamically sized. */
/*
* The pointer stacks associated with tbins follow as a contiguous
@@ -126,10 +126,10 @@ extern tcaches_t *tcaches;
size_t tcache_salloc(const void *ptr);
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
- tcache_bin_t *tbin, index_t binind);
+ tcache_bin_t *tbin, szind_t binind);
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
- index_t binind, unsigned rem);
-void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
+ szind_t binind, unsigned rem);
+void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
unsigned rem, tcache_t *tcache);
void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
void tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena,
@@ -161,7 +161,7 @@ void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
size_t size, bool zero);
void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
- index_t binind);
+ szind_t binind);
void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
size_t size);
tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
@@ -267,7 +267,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
bool zero)
{
void *ret;
- index_t binind;
+ szind_t binind;
size_t usize;
tcache_bin_t *tbin;
@@ -312,7 +312,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
bool zero)
{
void *ret;
- index_t binind;
+ szind_t binind;
size_t usize;
tcache_bin_t *tbin;
@@ -360,7 +360,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
}
JEMALLOC_ALWAYS_INLINE void
-tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, index_t binind)
+tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind)
{
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
@@ -386,7 +386,7 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, index_t binind)
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size)
{
- index_t binind;
+ szind_t binind;
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
diff --git a/include/jemalloc/jemalloc_protos.h.in b/include/jemalloc/jemalloc_protos.h.in
index 317ffdb..a78414b 100644
--- a/include/jemalloc/jemalloc_protos.h.in
+++ b/include/jemalloc/jemalloc_protos.h.in
@@ -56,7 +56,7 @@ JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@malloc_usable_size(
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *@je_@memalign(size_t alignment, size_t size)
- JEMALLOC_ATTR(malloc);
+ JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC