summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2016-04-16 07:36:11 (GMT)
committerJason Evans <jasone@canonware.com>2016-06-03 19:27:41 (GMT)
commit8c9be3e83732883e852d43bca2cf7724c465f93e (patch)
treeddb899211c2b8b79826aacf6a2154f610ca1d285 /include
parentdb72272bef91fa1b4709e89168aede0f01206d55 (diff)
downloadjemalloc-8c9be3e83732883e852d43bca2cf7724c465f93e.zip
jemalloc-8c9be3e83732883e852d43bca2cf7724c465f93e.tar.gz
jemalloc-8c9be3e83732883e852d43bca2cf7724c465f93e.tar.bz2
Refactor rtree to always use base_alloc() for node allocation.
Diffstat (limited to 'include')
-rw-r--r--include/jemalloc/internal/arena.h18
-rw-r--r--include/jemalloc/internal/chunk.h9
-rw-r--r--include/jemalloc/internal/jemalloc_internal.h.in31
-rw-r--r--include/jemalloc/internal/private_symbols.txt2
-rw-r--r--include/jemalloc/internal/rtree.h75
-rw-r--r--include/jemalloc/internal/tcache.h4
6 files changed, 71 insertions, 68 deletions
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index d441aaf..ff3e01d 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -633,7 +633,8 @@ size_t arena_metadata_allocated_get(arena_t *arena);
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes);
-szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
+szind_t arena_ptr_small_binind_get(tsdn_t *tsdn, const void *ptr,
+ size_t mapbits);
szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
size_t arena_run_regind(arena_run_t *run, const arena_bin_info_t *bin_info,
const void *ptr);
@@ -647,7 +648,7 @@ void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks);
void arena_decay_tick(tsdn_t *tsdn, arena_t *arena);
void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
bool zero, tcache_t *tcache, bool slow_path);
-arena_t *arena_aalloc(const void *ptr);
+arena_t *arena_aalloc(tsdn_t *tsdn, const void *ptr);
size_t arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr,
bool demote);
void arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr,
@@ -1049,7 +1050,7 @@ arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes)
# ifdef JEMALLOC_ARENA_INLINE_B
JEMALLOC_ALWAYS_INLINE szind_t
-arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
+arena_ptr_small_binind_get(tsdn_t *tsdn, const void *ptr, size_t mapbits)
{
szind_t binind;
@@ -1071,7 +1072,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
assert(binind != BININD_INVALID);
assert(binind < NBINS);
- extent = iealloc(ptr);
+ extent = iealloc(tsdn, ptr);
chunk = (arena_chunk_t *)extent_addr_get(extent);
arena = extent_arena_get(extent);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
@@ -1314,10 +1315,10 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
}
JEMALLOC_ALWAYS_INLINE arena_t *
-arena_aalloc(const void *ptr)
+arena_aalloc(tsdn_t *tsdn, const void *ptr)
{
- return (extent_arena_get(iealloc(ptr)));
+ return (extent_arena_get(iealloc(tsdn, ptr)));
}
/* Return the size of the allocation pointed to by ptr. */
@@ -1361,7 +1362,7 @@ arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr, bool demote)
* object).
*/
assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
- arena_ptr_small_binind_get(ptr,
+ arena_ptr_small_binind_get(tsdn, ptr,
arena_mapbits_get(chunk, pageind)) == binind);
ret = index2size(binind);
}
@@ -1389,7 +1390,8 @@ arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
/* Small allocation. */
if (likely(tcache != NULL)) {
- szind_t binind = arena_ptr_small_binind_get(ptr,
+ szind_t binind =
+ arena_ptr_small_binind_get(tsdn, ptr,
mapbits);
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
binind, slow_path);
diff --git a/include/jemalloc/internal/chunk.h b/include/jemalloc/internal/chunk.h
index c13f217..be56c2b 100644
--- a/include/jemalloc/internal/chunk.h
+++ b/include/jemalloc/internal/chunk.h
@@ -53,7 +53,8 @@ chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
const chunk_hooks_t *chunk_hooks);
bool chunk_register(tsdn_t *tsdn, const void *chunk, const extent_t *extent);
-void chunk_deregister(const void *chunk, const extent_t *extent);
+void chunk_deregister(tsdn_t *tsdn, const void *chunk,
+ const extent_t *extent);
void chunk_reregister(tsdn_t *tsdn, const void *chunk,
const extent_t *extent);
void *chunk_alloc_base(size_t size);
@@ -81,15 +82,15 @@ void chunk_postfork_child(tsdn_t *tsdn);
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
-extent_t *chunk_lookup(const void *chunk, bool dependent);
+extent_t *chunk_lookup(tsdn_t *tsdn, const void *chunk, bool dependent);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_))
JEMALLOC_INLINE extent_t *
-chunk_lookup(const void *ptr, bool dependent)
+chunk_lookup(tsdn_t *tsdn, const void *ptr, bool dependent)
{
- return (rtree_read(&chunks_rtree, (uintptr_t)ptr, dependent));
+ return (rtree_read(tsdn, &chunks_rtree, (uintptr_t)ptr, dependent));
}
#endif
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index 1fc9d3d..d1306e1 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -961,15 +961,15 @@ decay_ticker_get(tsd_t *tsd, unsigned ind)
#undef JEMALLOC_ARENA_INLINE_A
#ifndef JEMALLOC_ENABLE_INLINE
-extent_t *iealloc(const void *ptr);
+extent_t *iealloc(tsdn_t *tsdn, const void *ptr);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_ALWAYS_INLINE extent_t *
-iealloc(const void *ptr)
+iealloc(tsdn_t *tsdn, const void *ptr)
{
- return (chunk_lookup(ptr, true));
+ return (chunk_lookup(tsdn, ptr, true));
}
#endif
@@ -980,8 +980,7 @@ iealloc(const void *ptr)
#include "jemalloc/internal/hash.h"
#ifndef JEMALLOC_ENABLE_INLINE
-extent_t *iealloc(const void *ptr);
-arena_t *iaalloc(const void *ptr);
+arena_t *iaalloc(tsdn_t *tsdn, const void *ptr);
size_t isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr,
bool demote);
void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
@@ -1012,19 +1011,19 @@ bool ixalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_ALWAYS_INLINE arena_t *
-iaalloc(const void *ptr)
+iaalloc(tsdn_t *tsdn, const void *ptr)
{
assert(ptr != NULL);
- return (arena_aalloc(ptr));
+ return (arena_aalloc(tsdn, ptr));
}
/*
* Typical usage:
* tsdn_t *tsdn = [...]
* void *ptr = [...]
- * extent_t *extent = iealloc(ptr);
+ * extent_t *extent = iealloc(tsdn, ptr);
* size_t sz = isalloc(tsdn, extent, ptr, config_prof);
*/
JEMALLOC_ALWAYS_INLINE size_t
@@ -1050,8 +1049,8 @@ iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
if (config_stats && is_metadata && likely(ret != NULL)) {
- arena_metadata_allocated_add(iaalloc(ret), isalloc(tsdn,
- iealloc(ret), ret, config_prof));
+ arena_metadata_allocated_add(iaalloc(tsdn, ret), isalloc(tsdn,
+ iealloc(tsdn, ret), ret, config_prof));
}
return (ret);
}
@@ -1078,8 +1077,8 @@ ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
if (config_stats && is_metadata && likely(ret != NULL)) {
- arena_metadata_allocated_add(iaalloc(ret), isalloc(tsdn,
- iealloc(ret), ret, config_prof));
+ arena_metadata_allocated_add(iaalloc(tsdn, ret), isalloc(tsdn,
+ iealloc(tsdn, ret), ret, config_prof));
}
return (ret);
}
@@ -1106,7 +1105,7 @@ ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
extent_t *extent;
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
- extent = chunk_lookup(ptr, false);
+ extent = chunk_lookup(tsdn, ptr, false);
if (extent == NULL)
return (0);
/* Only arena chunks should be looked up via interior pointers. */
@@ -1123,10 +1122,10 @@ idalloctm(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
assert(ptr != NULL);
assert(!is_metadata || tcache == NULL);
- assert(!is_metadata || iaalloc(ptr)->ind < narenas_auto);
+ assert(!is_metadata || iaalloc(tsdn, ptr)->ind < narenas_auto);
if (config_stats && is_metadata) {
- arena_metadata_allocated_sub(iaalloc(ptr), isalloc(tsdn, extent,
- ptr, config_prof));
+ arena_metadata_allocated_sub(iaalloc(tsdn, ptr), isalloc(tsdn,
+ extent, ptr, config_prof));
}
arena_dalloc(tsdn, extent, ptr, tcache, slow_path);
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index 5f4a4b0..42c730c 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -460,6 +460,8 @@ rtree_child_tryread
rtree_clear
rtree_delete
rtree_new
+rtree_node_alloc
+rtree_node_dalloc
rtree_node_valid
rtree_elm_acquire
rtree_elm_lookup
diff --git a/include/jemalloc/internal/rtree.h b/include/jemalloc/internal/rtree.h
index 59a7ab3..dbea434 100644
--- a/include/jemalloc/internal/rtree.h
+++ b/include/jemalloc/internal/rtree.h
@@ -23,13 +23,6 @@ typedef struct rtree_s rtree_t;
/* Used for two-stage lock-free node initialization. */
#define RTREE_NODE_INITIALIZING ((rtree_elm_t *)0x1)
-/*
- * The node allocation callback function's argument is the number of contiguous
- * rtree_elm_t structures to allocate, and the resulting memory must be zeroed.
- */
-typedef rtree_elm_t *(rtree_node_alloc_t)(size_t);
-typedef void (rtree_node_dalloc_t)(rtree_elm_t *);
-
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
@@ -79,8 +72,6 @@ struct rtree_level_s {
};
struct rtree_s {
- rtree_node_alloc_t *alloc;
- rtree_node_dalloc_t *dalloc;
unsigned height;
/*
* Precomputed table used to convert from the number of leading 0 key
@@ -94,12 +85,18 @@ struct rtree_s {
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
- rtree_node_dalloc_t *dalloc);
-void rtree_delete(rtree_t *rtree);
-rtree_elm_t *rtree_subtree_read_hard(rtree_t *rtree, unsigned level);
-rtree_elm_t *rtree_child_read_hard(rtree_t *rtree, rtree_elm_t *elm,
+bool rtree_new(rtree_t *rtree, unsigned bits);
+#ifdef JEMALLOC_JET
+typedef rtree_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t);
+extern rtree_node_alloc_t *rtree_node_alloc;
+typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_elm_t *);
+extern rtree_node_dalloc_t *rtree_node_dalloc;
+void rtree_delete(tsdn_t *tsdn, rtree_t *rtree);
+#endif
+rtree_elm_t *rtree_subtree_read_hard(tsdn_t *tsdn, rtree_t *rtree,
unsigned level);
+rtree_elm_t *rtree_child_read_hard(tsdn_t *tsdn, rtree_t *rtree,
+ rtree_elm_t *elm, unsigned level);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
@@ -111,25 +108,27 @@ uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level);
bool rtree_node_valid(rtree_elm_t *node);
rtree_elm_t *rtree_child_tryread(rtree_elm_t *elm, bool dependent);
-rtree_elm_t *rtree_child_read(rtree_t *rtree, rtree_elm_t *elm,
+rtree_elm_t *rtree_child_read(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *elm,
unsigned level, bool dependent);
extent_t *rtree_elm_read(rtree_elm_t *elm, bool dependent);
void rtree_elm_write(rtree_elm_t *elm, const extent_t *extent);
rtree_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level,
bool dependent);
-rtree_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level,
- bool dependent);
-rtree_elm_t *rtree_elm_lookup(rtree_t *rtree, uintptr_t key,
+rtree_elm_t *rtree_subtree_read(tsdn_t *tsdn, rtree_t *rtree,
+ unsigned level, bool dependent);
+rtree_elm_t *rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, uintptr_t key,
bool dependent, bool init_missing);
-bool rtree_write(rtree_t *rtree, uintptr_t key, const extent_t *extent);
-extent_t *rtree_read(rtree_t *rtree, uintptr_t key, bool dependent);
-rtree_elm_t *rtree_elm_acquire(rtree_t *rtree, uintptr_t key,
+bool rtree_write(tsdn_t *tsdn, rtree_t *rtree, uintptr_t key,
+ const extent_t *extent);
+extent_t *rtree_read(tsdn_t *tsdn, rtree_t *rtree, uintptr_t key,
+ bool dependent);
+rtree_elm_t *rtree_elm_acquire(tsdn_t *tsdn, rtree_t *rtree, uintptr_t key,
bool dependent, bool init_missing);
extent_t *rtree_elm_read_acquired(rtree_elm_t *elm);
void rtree_elm_write_acquired(rtree_elm_t *elm, const extent_t *extent);
void rtree_elm_release(rtree_elm_t *elm);
-void rtree_clear(rtree_t *rtree, uintptr_t key);
+void rtree_clear(tsdn_t *tsdn, rtree_t *rtree, uintptr_t key);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
@@ -177,14 +176,14 @@ rtree_child_tryread(rtree_elm_t *elm, bool dependent)
}
JEMALLOC_ALWAYS_INLINE rtree_elm_t *
-rtree_child_read(rtree_t *rtree, rtree_elm_t *elm, unsigned level,
+rtree_child_read(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *elm, unsigned level,
bool dependent)
{
rtree_elm_t *child;
child = rtree_child_tryread(elm, dependent);
if (!dependent && unlikely(!rtree_node_valid(child)))
- child = rtree_child_read_hard(rtree, elm, level);
+ child = rtree_child_read_hard(tsdn, rtree, elm, level);
assert(!dependent || child != NULL);
return (child);
}
@@ -238,19 +237,19 @@ rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent)
}
JEMALLOC_ALWAYS_INLINE rtree_elm_t *
-rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent)
+rtree_subtree_read(tsdn_t *tsdn, rtree_t *rtree, unsigned level, bool dependent)
{
rtree_elm_t *subtree;
subtree = rtree_subtree_tryread(rtree, level, dependent);
if (!dependent && unlikely(!rtree_node_valid(subtree)))
- subtree = rtree_subtree_read_hard(rtree, level);
+ subtree = rtree_subtree_read_hard(tsdn, rtree, level);
assert(!dependent || subtree != NULL);
return (subtree);
}
JEMALLOC_ALWAYS_INLINE rtree_elm_t *
-rtree_elm_lookup(rtree_t *rtree, uintptr_t key, bool dependent,
+rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, uintptr_t key, bool dependent,
bool init_missing)
{
uintptr_t subkey;
@@ -261,8 +260,8 @@ rtree_elm_lookup(rtree_t *rtree, uintptr_t key, bool dependent,
start_level = rtree_start_level(rtree, key);
- node = init_missing ? rtree_subtree_read(rtree, start_level, dependent)
- : rtree_subtree_tryread(rtree, start_level, dependent);
+ node = init_missing ? rtree_subtree_read(tsdn, rtree, start_level,
+ dependent) : rtree_subtree_tryread(rtree, start_level, dependent);
#define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height)
switch (start_level + RTREE_GET_BIAS) {
#define RTREE_GET_SUBTREE(level) \
@@ -272,7 +271,7 @@ rtree_elm_lookup(rtree_t *rtree, uintptr_t key, bool dependent,
return (NULL); \
subkey = rtree_subkey(rtree, key, level - \
RTREE_GET_BIAS); \
- node = init_missing ? rtree_child_read(rtree, \
+ node = init_missing ? rtree_child_read(tsdn, rtree, \
&node[subkey], level - RTREE_GET_BIAS, dependent) : \
rtree_child_tryread(&node[subkey], dependent); \
/* Fall through. */
@@ -346,14 +345,14 @@ rtree_elm_lookup(rtree_t *rtree, uintptr_t key, bool dependent,
}
JEMALLOC_INLINE bool
-rtree_write(rtree_t *rtree, uintptr_t key, const extent_t *extent)
+rtree_write(tsdn_t *tsdn, rtree_t *rtree, uintptr_t key, const extent_t *extent)
{
rtree_elm_t *elm;
assert(extent != NULL); /* Use rtree_clear() for this case. */
assert(((uintptr_t)extent & (uintptr_t)0x1) == (uintptr_t)0x0);
- elm = rtree_elm_lookup(rtree, key, false, true);
+ elm = rtree_elm_lookup(tsdn, rtree, key, false, true);
if (elm == NULL)
return (true);
assert(rtree_elm_read(elm, false) == NULL);
@@ -363,11 +362,11 @@ rtree_write(rtree_t *rtree, uintptr_t key, const extent_t *extent)
}
JEMALLOC_ALWAYS_INLINE extent_t *
-rtree_read(rtree_t *rtree, uintptr_t key, bool dependent)
+rtree_read(tsdn_t *tsdn, rtree_t *rtree, uintptr_t key, bool dependent)
{
rtree_elm_t *elm;
- elm = rtree_elm_lookup(rtree, key, dependent, false);
+ elm = rtree_elm_lookup(tsdn, rtree, key, dependent, false);
if (elm == NULL)
return (NULL);
@@ -375,12 +374,12 @@ rtree_read(rtree_t *rtree, uintptr_t key, bool dependent)
}
JEMALLOC_INLINE rtree_elm_t *
-rtree_elm_acquire(rtree_t *rtree, uintptr_t key, bool dependent,
+rtree_elm_acquire(tsdn_t *tsdn, rtree_t *rtree, uintptr_t key, bool dependent,
bool init_missing)
{
rtree_elm_t *elm;
- elm = rtree_elm_lookup(rtree, key, dependent, init_missing);
+ elm = rtree_elm_lookup(tsdn, rtree, key, dependent, init_missing);
if (!dependent && elm == NULL)
return (NULL);
{
@@ -427,11 +426,11 @@ rtree_elm_release(rtree_elm_t *elm)
}
JEMALLOC_INLINE void
-rtree_clear(rtree_t *rtree, uintptr_t key)
+rtree_clear(tsdn_t *tsdn, rtree_t *rtree, uintptr_t key)
{
rtree_elm_t *elm;
- elm = rtree_elm_acquire(rtree, key, true, false);
+ elm = rtree_elm_acquire(tsdn, rtree, key, true, false);
rtree_elm_write_acquired(elm, NULL);
rtree_elm_release(elm);
}
diff --git a/include/jemalloc/internal/tcache.h b/include/jemalloc/internal/tcache.h
index d6d2750..ee63a65 100644
--- a/include/jemalloc/internal/tcache.h
+++ b/include/jemalloc/internal/tcache.h
@@ -370,8 +370,8 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
}
if (config_prof && usize == LARGE_MINCLASS) {
- arena_chunk_t *chunk =
- (arena_chunk_t *)extent_addr_get(iealloc(ret));
+ arena_chunk_t *chunk =(arena_chunk_t *)extent_addr_get(
+ iealloc(tsd_tsdn(tsd), ret));
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
LG_PAGE);
arena_mapbits_large_binind_set(chunk, pageind,