summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2016-03-24 04:09:28 (GMT)
committerJason Evans <jasone@canonware.com>2016-05-16 19:21:28 (GMT)
commita7a6f5bc96500d4821d72cdfafe731d564460890 (patch)
treed9a5fe600e1c9ff6b80f0963684800b274614572 /include
parent3aea827f5e7d07ce156476bba8a843640969de51 (diff)
downloadjemalloc-a7a6f5bc96500d4821d72cdfafe731d564460890.zip
jemalloc-a7a6f5bc96500d4821d72cdfafe731d564460890.tar.gz
jemalloc-a7a6f5bc96500d4821d72cdfafe731d564460890.tar.bz2
Rename extent_node_t to extent_t.
Diffstat (limited to 'include')
-rw-r--r--include/jemalloc/internal/arena.h80
-rw-r--r--include/jemalloc/internal/chunk.h11
-rw-r--r--include/jemalloc/internal/extent.h169
-rw-r--r--include/jemalloc/internal/jemalloc_internal.h.in18
-rw-r--r--include/jemalloc/internal/private_symbols.txt41
-rw-r--r--include/jemalloc/internal/rtree.h18
-rw-r--r--include/jemalloc/internal/witness.h2
7 files changed, 173 insertions, 166 deletions
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index 11863fc..93d0a32 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -177,11 +177,11 @@ typedef ph(arena_chunk_map_misc_t) arena_run_heap_t;
/* Arena chunk header. */
struct arena_chunk_s {
/*
- * A pointer to the arena that owns the chunk is stored within the node.
- * This field as a whole is used by chunks_rtree to support both
- * ivsalloc() and core-based debugging.
+ * A pointer to the arena that owns the chunk is stored within the
+ * extent structure. This field as a whole is used by chunks_rtree to
+ * support both ivsalloc() and core-based debugging.
*/
- extent_node_t node;
+ extent_t extent;
/*
* Map of pages within chunk that keeps track of free/large/small. The
@@ -303,7 +303,7 @@ struct arena_s {
/* Extant arena chunks. */
- ql_head(extent_node_t) achunks;
+ ql_head(extent_t) achunks;
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
@@ -345,25 +345,25 @@ struct arena_s {
* /-- arena ---\
* | |
* | |
- * |------------| /- chunk -\
- * ...->|chunks_cache|<--------------------------->| /----\ |<--...
- * |------------| | |node| |
- * | | | | | |
- * | | /- run -\ /- run -\ | | | |
- * | | | | | | | | | |
- * | | | | | | | | | |
- * |------------| |-------| |-------| | |----| |
- * ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----...
- * |------------| |-------| |-------| | |----| |
- * | | | | | | | | | |
- * | | | | | | | \----/ |
- * | | \-------/ \-------/ | |
- * | | | |
- * | | | |
- * \------------/ \---------/
+ * |------------| /-- chunk --\
+ * ...->|chunks_cache|<--------------------------->| /------\ |<--...
+ * |------------| | |extent| |
+ * | | | | | |
+ * | | /- run -\ /- run -\ | | | |
+ * | | | | | | | | | |
+ * | | | | | | | | | |
+ * |------------| |-------| |-------| | |------| |
+ * ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----...
+ * |------------| |-------| |-------| | |------| |
+ * | | | | | | | | | |
+ * | | | | | | | \------/ |
+ * | | \-------/ \-------/ | |
+ * | | | |
+ * | | | |
+ * \------------/ \-----------/
*/
arena_runs_dirty_link_t runs_dirty;
- extent_node_t chunks_cache;
+ extent_t chunks_cache;
/*
* Approximate time in seconds from the creation of a set of unused
@@ -413,16 +413,16 @@ struct arena_s {
size_t decay_backlog[SMOOTHSTEP_NSTEPS];
/* Extant huge allocations. */
- ql_head(extent_node_t) huge;
+ ql_head(extent_t) huge;
/* Synchronizes all huge allocation/update/deallocation. */
malloc_mutex_t huge_mtx;
/*
* Trees of chunks that were previously allocated (trees differ only in
- * node ordering). These are used when allocating chunks, in an attempt
- * to re-use address space. Depending on function, different tree
- * orderings are needed, which is why there are two trees with the same
- * contents.
+ * extent ordering). These are used when allocating chunks, in an
+ * attempt to re-use address space. Depending on function, different
+ * tree orderings are needed, which is why there are two trees with the
+ * same contents.
*/
extent_tree_t chunks_szad_cached;
extent_tree_t chunks_ad_cached;
@@ -430,9 +430,9 @@ struct arena_s {
extent_tree_t chunks_ad_retained;
malloc_mutex_t chunks_mtx;
- /* Cache of nodes that were allocated via base_alloc(). */
- ql_head(extent_node_t) node_cache;
- malloc_mutex_t node_cache_mtx;
+ /* Cache of extent structures that were allocated via base_alloc(). */
+ ql_head(extent_t) extent_cache;
+ malloc_mutex_t extent_cache_mtx;
/* User-configurable chunk hook functions. */
chunk_hooks_t chunk_hooks;
@@ -486,12 +486,12 @@ typedef size_t (run_quantize_t)(size_t);
extern run_quantize_t *run_quantize_floor;
extern run_quantize_t *run_quantize_ceil;
#endif
-void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node,
+void arena_chunk_cache_maybe_insert(arena_t *arena, extent_t *extent,
bool cache);
-void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
+void arena_chunk_cache_maybe_remove(arena_t *arena, extent_t *extent,
bool cache);
-extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena);
-void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node);
+extent_t *arena_extent_alloc(tsdn_t *tsdn, arena_t *arena);
+void arena_extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool *zero);
void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
@@ -1066,7 +1066,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
assert(binind != BININD_INVALID);
assert(binind < NBINS);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- arena = extent_node_arena_get(&chunk->node);
+ arena = extent_arena_get(&chunk->extent);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
actual_mapbits = arena_mapbits_get(chunk, pageind);
assert(mapbits == actual_mapbits);
@@ -1317,7 +1317,7 @@ arena_aalloc(const void *ptr)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr))
- return (extent_node_arena_get(&chunk->node));
+ return (extent_arena_get(&chunk->extent));
else
return (huge_aalloc(ptr));
}
@@ -1395,7 +1395,7 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path)
binind, slow_path);
} else {
arena_dalloc_small(tsdn,
- extent_node_arena_get(&chunk->node), chunk,
+ extent_arena_get(&chunk->extent), chunk,
ptr, pageind);
}
} else {
@@ -1411,7 +1411,7 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path)
size - large_pad, slow_path);
} else {
arena_dalloc_large(tsdn,
- extent_node_arena_get(&chunk->node), chunk,
+ extent_arena_get(&chunk->extent), chunk,
ptr);
}
}
@@ -1455,7 +1455,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
arena_dalloc_small(tsdn,
- extent_node_arena_get(&chunk->node), chunk,
+ extent_arena_get(&chunk->extent), chunk,
ptr, pageind);
}
} else {
@@ -1467,7 +1467,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
size, slow_path);
} else {
arena_dalloc_large(tsdn,
- extent_node_arena_get(&chunk->node), chunk,
+ extent_arena_get(&chunk->extent), chunk,
ptr);
}
}
diff --git a/include/jemalloc/internal/chunk.h b/include/jemalloc/internal/chunk.h
index c9fd4ec..4666a64 100644
--- a/include/jemalloc/internal/chunk.h
+++ b/include/jemalloc/internal/chunk.h
@@ -52,13 +52,12 @@ chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena);
chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
const chunk_hooks_t *chunk_hooks);
-bool chunk_register(tsdn_t *tsdn, const void *chunk,
- const extent_node_t *node);
-void chunk_deregister(const void *chunk, const extent_node_t *node);
+bool chunk_register(tsdn_t *tsdn, const void *chunk, const extent_t *extent);
+void chunk_deregister(const void *chunk, const extent_t *extent);
void *chunk_alloc_base(size_t size);
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
- bool *zero, bool dalloc_node);
+ bool *zero, bool dalloc_extent);
void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit);
@@ -80,11 +79,11 @@ void chunk_postfork_child(tsdn_t *tsdn);
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
-extent_node_t *chunk_lookup(const void *chunk, bool dependent);
+extent_t *chunk_lookup(const void *chunk, bool dependent);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_))
-JEMALLOC_INLINE extent_node_t *
+JEMALLOC_INLINE extent_t *
chunk_lookup(const void *ptr, bool dependent)
{
diff --git a/include/jemalloc/internal/extent.h b/include/jemalloc/internal/extent.h
index 49d76a5..acc67f0 100644
--- a/include/jemalloc/internal/extent.h
+++ b/include/jemalloc/internal/extent.h
@@ -1,237 +1,236 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
-typedef struct extent_node_s extent_node_t;
+typedef struct extent_s extent_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
-/* Tree of extents. Use accessor functions for en_* fields. */
-struct extent_node_s {
+/* Extent (span of pages). Use accessor functions for e_* fields. */
+struct extent_s {
/* Arena from which this extent came, if any. */
- arena_t *en_arena;
+ arena_t *e_arena;
- /* Pointer to the extent that this tree node is responsible for. */
- void *en_addr;
+ /* Pointer to the extent that this structure is responsible for. */
+ void *e_addr;
/* Total region size. */
- size_t en_size;
+ size_t e_size;
/*
* The zeroed flag is used by chunk recycling code to track whether
* memory is zero-filled.
*/
- bool en_zeroed;
+ bool e_zeroed;
/*
* True if physical memory is committed to the extent, whether
* explicitly or implicitly as on a system that overcommits and
* satisfies physical memory needs on demand via soft page faults.
*/
- bool en_committed;
+ bool e_committed;
/*
* The achunk flag is used to validate that huge allocation lookups
* don't return arena chunks.
*/
- bool en_achunk;
+ bool e_achunk;
/* Profile counters, used for huge objects. */
- prof_tctx_t *en_prof_tctx;
+ prof_tctx_t *e_prof_tctx;
/* Linkage for arena's runs_dirty and chunks_cache rings. */
arena_runs_dirty_link_t rd;
- qr(extent_node_t) cc_link;
+ qr(extent_t) cc_link;
union {
/* Linkage for the size/address-ordered tree. */
- rb_node(extent_node_t) szad_link;
+ rb_node(extent_t) szad_link;
/* Linkage for arena's achunks, huge, and node_cache lists. */
- ql_elm(extent_node_t) ql_link;
+ ql_elm(extent_t) ql_link;
};
/* Linkage for the address-ordered tree. */
- rb_node(extent_node_t) ad_link;
+ rb_node(extent_t) ad_link;
};
-typedef rb_tree(extent_node_t) extent_tree_t;
+typedef rb_tree(extent_t) extent_tree_t;
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
+rb_proto(, extent_tree_szad_, extent_tree_t, extent_t)
-rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
+rb_proto(, extent_tree_ad_, extent_tree_t, extent_t)
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
-arena_t *extent_node_arena_get(const extent_node_t *node);
-void *extent_node_addr_get(const extent_node_t *node);
-size_t extent_node_size_get(const extent_node_t *node);
-bool extent_node_zeroed_get(const extent_node_t *node);
-bool extent_node_committed_get(const extent_node_t *node);
-bool extent_node_achunk_get(const extent_node_t *node);
-prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
-void extent_node_arena_set(extent_node_t *node, arena_t *arena);
-void extent_node_addr_set(extent_node_t *node, void *addr);
-void extent_node_size_set(extent_node_t *node, size_t size);
-void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
-void extent_node_committed_set(extent_node_t *node, bool committed);
-void extent_node_achunk_set(extent_node_t *node, bool achunk);
-void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
-void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
+arena_t *extent_arena_get(const extent_t *extent);
+void *extent_addr_get(const extent_t *extent);
+size_t extent_size_get(const extent_t *extent);
+bool extent_zeroed_get(const extent_t *extent);
+bool extent_committed_get(const extent_t *extent);
+bool extent_achunk_get(const extent_t *extent);
+prof_tctx_t *extent_prof_tctx_get(const extent_t *extent);
+void extent_arena_set(extent_t *extent, arena_t *arena);
+void extent_addr_set(extent_t *extent, void *addr);
+void extent_size_set(extent_t *extent, size_t size);
+void extent_zeroed_set(extent_t *extent, bool zeroed);
+void extent_committed_set(extent_t *extent, bool committed);
+void extent_achunk_set(extent_t *extent, bool achunk);
+void extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx);
+void extent_init(extent_t *extent, arena_t *arena, void *addr,
size_t size, bool zeroed, bool committed);
-void extent_node_dirty_linkage_init(extent_node_t *node);
-void extent_node_dirty_insert(extent_node_t *node,
- arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
-void extent_node_dirty_remove(extent_node_t *node);
+void extent_dirty_linkage_init(extent_t *extent);
+void extent_dirty_insert(extent_t *extent,
+ arena_runs_dirty_link_t *runs_dirty, extent_t *chunks_dirty);
+void extent_dirty_remove(extent_t *extent);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
JEMALLOC_INLINE arena_t *
-extent_node_arena_get(const extent_node_t *node)
+extent_arena_get(const extent_t *extent)
{
- return (node->en_arena);
+ return (extent->e_arena);
}
JEMALLOC_INLINE void *
-extent_node_addr_get(const extent_node_t *node)
+extent_addr_get(const extent_t *extent)
{
- return (node->en_addr);
+ return (extent->e_addr);
}
JEMALLOC_INLINE size_t
-extent_node_size_get(const extent_node_t *node)
+extent_size_get(const extent_t *extent)
{
- return (node->en_size);
+ return (extent->e_size);
}
JEMALLOC_INLINE bool
-extent_node_zeroed_get(const extent_node_t *node)
+extent_zeroed_get(const extent_t *extent)
{
- return (node->en_zeroed);
+ return (extent->e_zeroed);
}
JEMALLOC_INLINE bool
-extent_node_committed_get(const extent_node_t *node)
+extent_committed_get(const extent_t *extent)
{
- assert(!node->en_achunk);
- return (node->en_committed);
+ assert(!extent->e_achunk);
+ return (extent->e_committed);
}
JEMALLOC_INLINE bool
-extent_node_achunk_get(const extent_node_t *node)
+extent_achunk_get(const extent_t *extent)
{
- return (node->en_achunk);
+ return (extent->e_achunk);
}
JEMALLOC_INLINE prof_tctx_t *
-extent_node_prof_tctx_get(const extent_node_t *node)
+extent_prof_tctx_get(const extent_t *extent)
{
- return (node->en_prof_tctx);
+ return (extent->e_prof_tctx);
}
JEMALLOC_INLINE void
-extent_node_arena_set(extent_node_t *node, arena_t *arena)
+extent_arena_set(extent_t *extent, arena_t *arena)
{
- node->en_arena = arena;
+ extent->e_arena = arena;
}
JEMALLOC_INLINE void
-extent_node_addr_set(extent_node_t *node, void *addr)
+extent_addr_set(extent_t *extent, void *addr)
{
- node->en_addr = addr;
+ extent->e_addr = addr;
}
JEMALLOC_INLINE void
-extent_node_size_set(extent_node_t *node, size_t size)
+extent_size_set(extent_t *extent, size_t size)
{
- node->en_size = size;
+ extent->e_size = size;
}
JEMALLOC_INLINE void
-extent_node_zeroed_set(extent_node_t *node, bool zeroed)
+extent_zeroed_set(extent_t *extent, bool zeroed)
{
- node->en_zeroed = zeroed;
+ extent->e_zeroed = zeroed;
}
JEMALLOC_INLINE void
-extent_node_committed_set(extent_node_t *node, bool committed)
+extent_committed_set(extent_t *extent, bool committed)
{
- node->en_committed = committed;
+ extent->e_committed = committed;
}
JEMALLOC_INLINE void
-extent_node_achunk_set(extent_node_t *node, bool achunk)
+extent_achunk_set(extent_t *extent, bool achunk)
{
- node->en_achunk = achunk;
+ extent->e_achunk = achunk;
}
JEMALLOC_INLINE void
-extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
+extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx)
{
- node->en_prof_tctx = tctx;
+ extent->e_prof_tctx = tctx;
}
JEMALLOC_INLINE void
-extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
+extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
bool zeroed, bool committed)
{
- extent_node_arena_set(node, arena);
- extent_node_addr_set(node, addr);
- extent_node_size_set(node, size);
- extent_node_zeroed_set(node, zeroed);
- extent_node_committed_set(node, committed);
- extent_node_achunk_set(node, false);
+ extent_arena_set(extent, arena);
+ extent_addr_set(extent, addr);
+ extent_size_set(extent, size);
+ extent_zeroed_set(extent, zeroed);
+ extent_committed_set(extent, committed);
+ extent_achunk_set(extent, false);
if (config_prof)
- extent_node_prof_tctx_set(node, NULL);
+ extent_prof_tctx_set(extent, NULL);
}
JEMALLOC_INLINE void
-extent_node_dirty_linkage_init(extent_node_t *node)
+extent_dirty_linkage_init(extent_t *extent)
{
- qr_new(&node->rd, rd_link);
- qr_new(node, cc_link);
+ qr_new(&extent->rd, rd_link);
+ qr_new(extent, cc_link);
}
JEMALLOC_INLINE void
-extent_node_dirty_insert(extent_node_t *node,
- arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty)
+extent_dirty_insert(extent_t *extent,
+ arena_runs_dirty_link_t *runs_dirty, extent_t *chunks_dirty)
{
- qr_meld(runs_dirty, &node->rd, rd_link);
- qr_meld(chunks_dirty, node, cc_link);
+ qr_meld(runs_dirty, &extent->rd, rd_link);
+ qr_meld(chunks_dirty, extent, cc_link);
}
JEMALLOC_INLINE void
-extent_node_dirty_remove(extent_node_t *node)
+extent_dirty_remove(extent_t *extent)
{
- qr_remove(&node->rd, rd_link);
- qr_remove(node, cc_link);
+ qr_remove(&extent->rd, rd_link);
+ qr_remove(extent, cc_link);
}
-
#endif
#endif /* JEMALLOC_H_INLINES */
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index eabb9ce..e487db1 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -966,6 +966,7 @@ decay_ticker_get(tsd_t *tsd, unsigned ind)
#include "jemalloc/internal/hash.h"
#ifndef JEMALLOC_ENABLE_INLINE
+extent_t *iealloc(const void *ptr);
arena_t *iaalloc(const void *ptr);
size_t isalloc(tsdn_t *tsdn, const void *ptr, bool demote);
void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
@@ -995,6 +996,13 @@ bool ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
+JEMALLOC_ALWAYS_INLINE extent_t *
+iealloc(const void *ptr)
+{
+
+ return (chunk_lookup(ptr, true));
+}
+
JEMALLOC_ALWAYS_INLINE arena_t *
iaalloc(const void *ptr)
{
@@ -1086,15 +1094,15 @@ ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
JEMALLOC_ALWAYS_INLINE size_t
ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
{
- extent_node_t *node;
+ extent_t *extent;
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
- node = chunk_lookup(ptr, false);
- if (node == NULL)
+ extent = chunk_lookup(ptr, false);
+ if (extent == NULL)
return (0);
/* Only arena chunks should be looked up via interior pointers. */
- assert(extent_node_addr_get(node) == ptr ||
- extent_node_achunk_get(node));
+ assert(extent_addr_get(extent) == ptr ||
+ extent_achunk_get(extent));
return (isalloc(tsdn, ptr, demote));
}
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index e046c3b..61b29b9 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -35,6 +35,8 @@ arena_decay_time_get
arena_decay_time_set
arena_dss_prec_get
arena_dss_prec_set
+arena_extent_alloc
+arena_extent_dalloc
arena_get
arena_ichoose
arena_init
@@ -78,8 +80,6 @@ arena_miscelm_get_mutable
arena_miscelm_to_pageind
arena_miscelm_to_rpages
arena_new
-arena_node_alloc
-arena_node_dalloc
arena_nthreads_dec
arena_nthreads_get
arena_nthreads_inc
@@ -204,24 +204,22 @@ ctl_postfork_parent
ctl_prefork
decay_ticker_get
dss_prec_names
-extent_node_achunk_get
-extent_node_achunk_set
-extent_node_addr_get
-extent_node_addr_set
-extent_node_arena_get
-extent_node_arena_set
-extent_node_committed_get
-extent_node_committed_set
-extent_node_dirty_insert
-extent_node_dirty_linkage_init
-extent_node_dirty_remove
-extent_node_init
-extent_node_prof_tctx_get
-extent_node_prof_tctx_set
-extent_node_size_get
-extent_node_size_set
-extent_node_zeroed_get
-extent_node_zeroed_set
+extent_achunk_get
+extent_achunk_set
+extent_addr_get
+extent_addr_set
+extent_arena_get
+extent_arena_set
+extent_committed_get
+extent_committed_set
+extent_dirty_insert
+extent_dirty_linkage_init
+extent_dirty_remove
+extent_init
+extent_prof_tctx_get
+extent_prof_tctx_set
+extent_size_get
+extent_size_set
extent_tree_ad_destroy
extent_tree_ad_destroy_recurse
extent_tree_ad_empty
@@ -260,6 +258,8 @@ extent_tree_szad_reverse_iter
extent_tree_szad_reverse_iter_recurse
extent_tree_szad_reverse_iter_start
extent_tree_szad_search
+extent_zeroed_get
+extent_zeroed_set
ffs_llu
ffs_lu
ffs_u
@@ -294,6 +294,7 @@ iallocztm
iarena_cleanup
idalloc
idalloctm
+iealloc
index2size
index2size_compute
index2size_lookup
diff --git a/include/jemalloc/internal/rtree.h b/include/jemalloc/internal/rtree.h
index 8d0c584..45e49b7 100644
--- a/include/jemalloc/internal/rtree.h
+++ b/include/jemalloc/internal/rtree.h
@@ -39,7 +39,7 @@ struct rtree_node_elm_s {
union {
void *pun;
rtree_node_elm_t *child;
- extent_node_t *val;
+ extent_t *val;
};
};
@@ -116,17 +116,17 @@ rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm,
bool dependent);
rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
unsigned level, bool dependent);
-extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm,
+extent_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm,
bool dependent);
void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm,
- const extent_node_t *val);
+ const extent_t *val);
rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level,
bool dependent);
rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level,
bool dependent);
-extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent);
-bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val);
+extent_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent);
+bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_t *val);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
@@ -186,7 +186,7 @@ rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level,
return (child);
}
-JEMALLOC_ALWAYS_INLINE extent_node_t *
+JEMALLOC_ALWAYS_INLINE extent_t *
rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
{
@@ -209,7 +209,7 @@ rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
}
JEMALLOC_INLINE void
-rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val)
+rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_t *val)
{
atomic_write_p(&elm->pun, val);
@@ -240,7 +240,7 @@ rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent)
return (subtree);
}
-JEMALLOC_ALWAYS_INLINE extent_node_t *
+JEMALLOC_ALWAYS_INLINE extent_t *
rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
{
uintptr_t subkey;
@@ -332,7 +332,7 @@ rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
}
JEMALLOC_INLINE bool
-rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
+rtree_set(rtree_t *rtree, uintptr_t key, const extent_t *val)
{
uintptr_t subkey;
unsigned i, start_level;
diff --git a/include/jemalloc/internal/witness.h b/include/jemalloc/internal/witness.h
index d78dca2..c68c969 100644
--- a/include/jemalloc/internal/witness.h
+++ b/include/jemalloc/internal/witness.h
@@ -24,7 +24,7 @@ typedef int witness_comp_t (const witness_t *, const witness_t *);
#define WITNESS_RANK_ARENA 8U
#define WITNESS_RANK_ARENA_CHUNKS 9U
-#define WITNESS_RANK_ARENA_NODE_CACHE 10
+#define WITNESS_RANK_ARENA_EXTENT_CACHE 10
#define WITNESS_RANK_BASE 11U