summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Goldblatt <davidgoldblatt@fb.com>2019-12-09 22:36:45 (GMT)
committerDavid Goldblatt <davidtgoldblatt@gmail.com>2019-12-20 18:18:40 (GMT)
commita7862df6169f27d9f347343ffef2bef3e167317c (patch)
tree192337360f39d30a5b1d22e0bd95735d0db519fa
parent865debda2276fee0257c90678bafd1bd2f73df6a (diff)
downloadjemalloc-a7862df6169f27d9f347343ffef2bef3e167317c.zip
jemalloc-a7862df6169f27d9f347343ffef2bef3e167317c.tar.gz
jemalloc-a7862df6169f27d9f347343ffef2bef3e167317c.tar.bz2
Rename extent_t to edata_t.
This frees us up from the unfortunate extent/extent2 naming collision.
-rw-r--r--include/jemalloc/internal/arena_externs.h14
-rw-r--r--include/jemalloc/internal/arena_inlines_b.h86
-rw-r--r--include/jemalloc/internal/arena_stats.h4
-rw-r--r--include/jemalloc/internal/arena_structs.h14
-rw-r--r--include/jemalloc/internal/base_externs.h2
-rw-r--r--include/jemalloc/internal/base_structs.h4
-rw-r--r--include/jemalloc/internal/bin.h6
-rw-r--r--include/jemalloc/internal/bin_types.h2
-rw-r--r--include/jemalloc/internal/edata.h469
-rw-r--r--include/jemalloc/internal/eset.h10
-rw-r--r--include/jemalloc/internal/extent2.h34
-rw-r--r--include/jemalloc/internal/jemalloc_internal_inlines_b.h4
-rw-r--r--include/jemalloc/internal/large_externs.h16
-rw-r--r--include/jemalloc/internal/rtree.h72
-rw-r--r--include/jemalloc/internal/witness.h2
-rw-r--r--src/arena.c296
-rw-r--r--src/base.c76
-rw-r--r--src/bin.c4
-rw-r--r--src/ctl.c16
-rw-r--r--src/edata.c6
-rw-r--r--src/ehooks.c4
-rw-r--r--src/eset.c68
-rw-r--r--src/extent2.c837
-rw-r--r--src/extent_dss.c10
-rw-r--r--src/inspect.c38
-rw-r--r--src/large.c144
-rw-r--r--src/tcache.c64
-rw-r--r--test/unit/arena_reset.c10
-rw-r--r--test/unit/base.c6
-rw-r--r--test/unit/binshard.c10
-rw-r--r--test/unit/rtree.c72
-rw-r--r--test/unit/slab.c10
32 files changed, 1201 insertions, 1209 deletions
diff --git a/include/jemalloc/internal/arena_externs.h b/include/jemalloc/internal/arena_externs.h
index b6b33ce..608dda7 100644
--- a/include/jemalloc/internal/arena_externs.h
+++ b/include/jemalloc/internal/arena_externs.h
@@ -28,18 +28,18 @@ void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
bin_stats_data_t *bstats, arena_stats_large_t *lstats,
arena_stats_extents_t *estats);
void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
- ehooks_t *ehooks, extent_t *extent);
+ ehooks_t *ehooks, edata_t *edata);
#ifdef JEMALLOC_JET
-size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr);
+size_t arena_slab_regind(edata_t *slab, szind_t binind, const void *ptr);
#endif
-extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
+edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
size_t usize, size_t alignment, bool *zero);
void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
- extent_t *extent);
+ edata_t *edata);
void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
- extent_t *extent, size_t oldsize);
+ edata_t *edata, size_t oldsize);
void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
- extent_t *extent, size_t oldsize);
+ edata_t *edata, size_t oldsize);
ssize_t arena_dirty_decay_ms_get(arena_t *arena);
bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
ssize_t arena_muzzy_decay_ms_get(arena_t *arena);
@@ -64,7 +64,7 @@ void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize);
void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
bool slow_path);
void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
- szind_t binind, extent_t *extent, void *ptr);
+ szind_t binind, edata_t *edata, void *ptr);
void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero, size_t *newsize);
diff --git a/include/jemalloc/internal/arena_inlines_b.h b/include/jemalloc/internal/arena_inlines_b.h
index 16da67e..6dacab3 100644
--- a/include/jemalloc/internal/arena_inlines_b.h
+++ b/include/jemalloc/internal/arena_inlines_b.h
@@ -9,8 +9,8 @@
#include "jemalloc/internal/ticker.h"
static inline arena_t *
-arena_get_from_extent(extent_t *extent) {
- return (arena_t *)atomic_load_p(&arenas[extent_arena_ind_get(extent)],
+arena_get_from_edata(edata_t *edata) {
+ return (arena_t *)atomic_load_p(&arenas[edata_arena_ind_get(edata)],
ATOMIC_RELAXED);
}
@@ -42,20 +42,20 @@ arena_prof_info_get(tsd_t *tsd, const void *ptr, alloc_ctx_t *alloc_ctx,
assert(ptr != NULL);
assert(prof_info != NULL);
- const extent_t *extent;
+ const edata_t *edata;
bool is_slab;
/* Static check. */
if (alloc_ctx == NULL) {
- extent = iealloc(tsd_tsdn(tsd), ptr);
- is_slab = extent_slab_get(extent);
+ edata = iealloc(tsd_tsdn(tsd), ptr);
+ is_slab = edata_slab_get(edata);
} else if (!unlikely(is_slab = alloc_ctx->slab)) {
- extent = iealloc(tsd_tsdn(tsd), ptr);
+ edata = iealloc(tsd_tsdn(tsd), ptr);
}
if (unlikely(!is_slab)) {
- /* extent must have been initialized at this point. */
- large_prof_info_get(extent, prof_info);
+ /* edata must have been initialized at this point. */
+ large_prof_info_get(edata, prof_info);
} else {
memset(prof_info, 0, sizeof(prof_info_t));
prof_info->alloc_tctx = (prof_tctx_t *)(uintptr_t)1U;
@@ -69,9 +69,9 @@ arena_prof_tctx_reset(tsd_t *tsd, const void *ptr, alloc_ctx_t *alloc_ctx) {
/* Static check. */
if (alloc_ctx == NULL) {
- extent_t *extent = iealloc(tsd_tsdn(tsd), ptr);
- if (unlikely(!extent_slab_get(extent))) {
- large_prof_tctx_reset(extent);
+ edata_t *edata = iealloc(tsd_tsdn(tsd), ptr);
+ if (unlikely(!edata_slab_get(edata))) {
+ large_prof_tctx_reset(edata);
}
} else {
if (unlikely(!alloc_ctx->slab)) {
@@ -85,10 +85,10 @@ arena_prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) {
cassert(config_prof);
assert(ptr != NULL);
- extent_t *extent = iealloc(tsd_tsdn(tsd), ptr);
- assert(!extent_slab_get(extent));
+ edata_t *edata = iealloc(tsd_tsdn(tsd), ptr);
+ assert(!edata_slab_get(edata));
- large_prof_tctx_reset(extent);
+ large_prof_tctx_reset(edata);
}
JEMALLOC_ALWAYS_INLINE void
@@ -96,9 +96,9 @@ arena_prof_info_set(tsd_t *tsd, const void *ptr, prof_tctx_t *tctx) {
cassert(config_prof);
assert(ptr != NULL);
- extent_t *extent = iealloc(tsd_tsdn(tsd), ptr);
- assert(!extent_slab_get(extent));
- large_prof_info_set(extent, tctx);
+ edata_t *edata = iealloc(tsd_tsdn(tsd), ptr);
+ assert(!edata_slab_get(edata));
+ large_prof_info_set(edata, tctx);
}
JEMALLOC_ALWAYS_INLINE void
@@ -130,9 +130,9 @@ arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
/* Purge a single extent to retained / unmapped directly. */
JEMALLOC_ALWAYS_INLINE void
arena_decay_extent(tsdn_t *tsdn,arena_t *arena, ehooks_t *ehooks,
- extent_t *extent) {
- size_t extent_size = extent_size_get(extent);
- extent_dalloc_wrapper(tsdn, arena, ehooks, extent);
+ edata_t *edata) {
+ size_t extent_size = edata_size_get(edata);
+ extent_dalloc_wrapper(tsdn, arena, ehooks, edata);
if (config_stats) {
/* Update stats accordingly. */
arena_stats_lock(tsdn, &arena->stats);
@@ -169,7 +169,7 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
JEMALLOC_ALWAYS_INLINE arena_t *
arena_aalloc(tsdn_t *tsdn, const void *ptr) {
- return (arena_t *)atomic_load_p(&arenas[extent_arena_ind_get(
+ return (arena_t *)atomic_load_p(&arenas[edata_arena_ind_get(
iealloc(tsdn, ptr))], ATOMIC_RELAXED);
}
@@ -201,19 +201,19 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- extent_t *extent;
+ edata_t *edata;
szind_t szind;
- if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, false, &extent, &szind)) {
+ if (rtree_edata_szind_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, false, &edata, &szind)) {
return 0;
}
- if (extent == NULL) {
+ if (edata == NULL) {
return 0;
}
- assert(extent_state_get(extent) == extent_state_active);
+ assert(edata_state_get(edata) == extent_state_active);
/* Only slab members should be looked up via interior pointers. */
- assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
+ assert(edata_addr_get(edata) == ptr || edata_slab_get(edata));
assert(szind != SC_NSIZES);
@@ -225,8 +225,8 @@ arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) {
if (config_prof && unlikely(szind < SC_NBINS)) {
arena_dalloc_promoted(tsdn, ptr, NULL, true);
} else {
- extent_t *extent = iealloc(tsdn, ptr);
- large_dalloc(tsdn, extent);
+ edata_t *edata = iealloc(tsdn, ptr);
+ large_dalloc(tsdn, edata);
}
}
@@ -243,11 +243,11 @@ arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
true, &szind, &slab);
if (config_debug) {
- extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
+ edata_t *edata = rtree_edata_read(tsdn, &extents_rtree,
rtree_ctx, (uintptr_t)ptr, true);
- assert(szind == extent_szind_get(extent));
+ assert(szind == edata_szind_get(edata));
assert(szind < SC_NSIZES);
- assert(slab == extent_slab_get(extent));
+ assert(slab == edata_slab_get(edata));
}
if (likely(slab)) {
@@ -269,8 +269,8 @@ arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
slow_path);
}
} else {
- extent_t *extent = iealloc(tsdn, ptr);
- large_dalloc(tsdn, extent);
+ edata_t *edata = iealloc(tsdn, ptr);
+ large_dalloc(tsdn, edata);
}
}
@@ -300,11 +300,11 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
if (config_debug) {
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
- extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
+ edata_t *edata = rtree_edata_read(tsdn, &extents_rtree,
rtree_ctx, (uintptr_t)ptr, true);
- assert(szind == extent_szind_get(extent));
+ assert(szind == edata_szind_get(edata));
assert(szind < SC_NSIZES);
- assert(slab == extent_slab_get(extent));
+ assert(slab == edata_slab_get(edata));
}
if (likely(slab)) {
@@ -344,10 +344,10 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
assert((config_prof && opt_prof) || slab == (szind < SC_NBINS));
if (config_debug) {
- extent_t *extent = rtree_extent_read(tsdn,
+ edata_t *edata = rtree_edata_read(tsdn,
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
- assert(szind == extent_szind_get(extent));
- assert(slab == extent_slab_get(extent));
+ assert(szind == edata_szind_get(edata));
+ assert(slab == edata_slab_get(edata));
}
}
@@ -401,10 +401,10 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &szind, &slab);
- extent_t *extent = rtree_extent_read(tsdn,
+ edata_t *edata = rtree_edata_read(tsdn,
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
- assert(szind == extent_szind_get(extent));
- assert(slab == extent_slab_get(extent));
+ assert(szind == edata_szind_get(edata));
+ assert(slab == edata_slab_get(edata));
}
if (likely(slab)) {
diff --git a/include/jemalloc/internal/arena_stats.h b/include/jemalloc/internal/arena_stats.h
index 23949ed..4166705 100644
--- a/include/jemalloc/internal/arena_stats.h
+++ b/include/jemalloc/internal/arena_stats.h
@@ -94,8 +94,8 @@ struct arena_stats_s {
*/
atomic_zu_t retained; /* Derived. */
- /* Number of extent_t structs allocated by base, but not being used. */
- atomic_zu_t extent_avail;
+ /* Number of edata_t structs allocated by base, but not being used. */
+ atomic_zu_t edata_avail;
arena_stats_decay_t decay_dirty;
arena_stats_decay_t decay_muzzy;
diff --git a/include/jemalloc/internal/arena_structs.h b/include/jemalloc/internal/arena_structs.h
index bc8c039..aac620b 100644
--- a/include/jemalloc/internal/arena_structs.h
+++ b/include/jemalloc/internal/arena_structs.h
@@ -144,7 +144,7 @@ struct arena_s {
*
* Synchronization: large_mtx.
*/
- extent_list_t large;
+ edata_list_t large;
/* Synchronizes all large allocation/update/deallocation. */
malloc_mutex_t large_mtx;
@@ -185,14 +185,14 @@ struct arena_s {
malloc_mutex_t extent_grow_mtx;
/*
- * Available extent structures that were allocated via
- * base_alloc_extent().
+ * Available edata structures that were allocated via
+ * base_alloc_edata().
*
- * Synchronization: extent_avail_mtx.
+ * Synchronization: edata_avail_mtx.
*/
- extent_tree_t extent_avail;
- atomic_zu_t extent_avail_cnt;
- malloc_mutex_t extent_avail_mtx;
+ edata_tree_t edata_avail;
+ atomic_zu_t edata_avail_cnt;
+ malloc_mutex_t edata_avail_mtx;
/*
* bins is used to store heaps of free regions.
diff --git a/include/jemalloc/internal/base_externs.h b/include/jemalloc/internal/base_externs.h
index 35734c3..2f24131 100644
--- a/include/jemalloc/internal/base_externs.h
+++ b/include/jemalloc/internal/base_externs.h
@@ -11,7 +11,7 @@ ehooks_t *base_ehooks_get(base_t *base);
extent_hooks_t *base_extent_hooks_set(base_t *base,
extent_hooks_t *extent_hooks);
void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
-extent_t *base_alloc_extent(tsdn_t *tsdn, base_t *base);
+edata_t *base_alloc_edata(tsdn_t *tsdn, base_t *base);
void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
size_t *resident, size_t *mapped, size_t *n_thp);
void base_prefork(tsdn_t *tsdn, base_t *base);
diff --git a/include/jemalloc/internal/base_structs.h b/include/jemalloc/internal/base_structs.h
index 1097892..fb7e68a 100644
--- a/include/jemalloc/internal/base_structs.h
+++ b/include/jemalloc/internal/base_structs.h
@@ -16,7 +16,7 @@ struct base_block_s {
base_block_t *next;
/* Tracks unused trailing space. */
- extent_t extent;
+ edata_t edata;
};
struct base_s {
@@ -47,7 +47,7 @@ struct base_s {
base_block_t *blocks;
/* Heap of extents that track unused trailing space within blocks. */
- extent_heap_t avail[SC_NSIZES];
+ edata_heap_t avail[SC_NSIZES];
/* Stats, only maintained if config_stats. */
size_t allocated;
diff --git a/include/jemalloc/internal/bin.h b/include/jemalloc/internal/bin.h
index 8cc7fed..9a774e9 100644
--- a/include/jemalloc/internal/bin.h
+++ b/include/jemalloc/internal/bin.h
@@ -22,17 +22,17 @@ struct bin_s {
* slabcur is reassigned, the previous slab must be deallocated or
* inserted into slabs_{nonfull,full}.
*/
- extent_t *slabcur;
+ edata_t *slabcur;
/*
* Heap of non-full slabs. This heap is used to assure that new
* allocations come from the non-full slab that is oldest/lowest in
* memory.
*/
- extent_heap_t slabs_nonfull;
+ edata_heap_t slabs_nonfull;
/* List used to track full slabs. */
- extent_list_t slabs_full;
+ edata_list_t slabs_full;
/* Bin statistics. */
bin_stats_t stats;
diff --git a/include/jemalloc/internal/bin_types.h b/include/jemalloc/internal/bin_types.h
index 3533606..945e832 100644
--- a/include/jemalloc/internal/bin_types.h
+++ b/include/jemalloc/internal/bin_types.h
@@ -3,7 +3,7 @@
#include "jemalloc/internal/sc.h"
-#define BIN_SHARDS_MAX (1 << EXTENT_BITS_BINSHARD_WIDTH)
+#define BIN_SHARDS_MAX (1 << EDATA_BITS_BINSHARD_WIDTH)
#define N_BIN_SHARDS_DEFAULT 1
/* Used in TSD static initializer only. Real init in arena_bind(). */
diff --git a/include/jemalloc/internal/edata.h b/include/jemalloc/internal/edata.h
index 2fd6e90..990c325 100644
--- a/include/jemalloc/internal/edata.h
+++ b/include/jemalloc/internal/edata.h
@@ -1,5 +1,5 @@
-#ifndef JEMALLOC_INTERNAL_EXTENT_H
-#define JEMALLOC_INTERNAL_EXTENT_H
+#ifndef JEMALLOC_INTERNAL_EDATA_H
+#define JEMALLOC_INTERNAL_EDATA_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bin_info.h"
@@ -26,11 +26,11 @@ enum extent_head_state_e {
typedef enum extent_head_state_e extent_head_state_t;
/* Extent (span of pages). Use accessor functions for e_* fields. */
-typedef struct extent_s extent_t;
-typedef ql_head(extent_t) extent_list_t;
-typedef ph(extent_t) extent_tree_t;
-typedef ph(extent_t) extent_heap_t;
-struct extent_s {
+typedef struct edata_s edata_t;
+typedef ql_head(edata_t) edata_list_t;
+typedef ph(edata_t) edata_tree_t;
+typedef ph(edata_t) edata_heap_t;
+struct edata_s {
/*
* Bitfield containing several fields:
*
@@ -105,48 +105,48 @@ struct extent_s {
uint64_t e_bits;
#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
-#define EXTENT_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
-#define EXTENT_BITS_ARENA_SHIFT 0
-#define EXTENT_BITS_ARENA_MASK MASK(EXTENT_BITS_ARENA_WIDTH, EXTENT_BITS_ARENA_SHIFT)
+#define EDATA_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
+#define EDATA_BITS_ARENA_SHIFT 0
+#define EDATA_BITS_ARENA_MASK MASK(EDATA_BITS_ARENA_WIDTH, EDATA_BITS_ARENA_SHIFT)
-#define EXTENT_BITS_SLAB_WIDTH 1
-#define EXTENT_BITS_SLAB_SHIFT (EXTENT_BITS_ARENA_WIDTH + EXTENT_BITS_ARENA_SHIFT)
-#define EXTENT_BITS_SLAB_MASK MASK(EXTENT_BITS_SLAB_WIDTH, EXTENT_BITS_SLAB_SHIFT)
+#define EDATA_BITS_SLAB_WIDTH 1
+#define EDATA_BITS_SLAB_SHIFT (EDATA_BITS_ARENA_WIDTH + EDATA_BITS_ARENA_SHIFT)
+#define EDATA_BITS_SLAB_MASK MASK(EDATA_BITS_SLAB_WIDTH, EDATA_BITS_SLAB_SHIFT)
-#define EXTENT_BITS_COMMITTED_WIDTH 1
-#define EXTENT_BITS_COMMITTED_SHIFT (EXTENT_BITS_SLAB_WIDTH + EXTENT_BITS_SLAB_SHIFT)
-#define EXTENT_BITS_COMMITTED_MASK MASK(EXTENT_BITS_COMMITTED_WIDTH, EXTENT_BITS_COMMITTED_SHIFT)
+#define EDATA_BITS_COMMITTED_WIDTH 1
+#define EDATA_BITS_COMMITTED_SHIFT (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT)
+#define EDATA_BITS_COMMITTED_MASK MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT)
-#define EXTENT_BITS_DUMPABLE_WIDTH 1
-#define EXTENT_BITS_DUMPABLE_SHIFT (EXTENT_BITS_COMMITTED_WIDTH + EXTENT_BITS_COMMITTED_SHIFT)
-#define EXTENT_BITS_DUMPABLE_MASK MASK(EXTENT_BITS_DUMPABLE_WIDTH, EXTENT_BITS_DUMPABLE_SHIFT)
+#define EDATA_BITS_DUMPABLE_WIDTH 1
+#define EDATA_BITS_DUMPABLE_SHIFT (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT)
+#define EDATA_BITS_DUMPABLE_MASK MASK(EDATA_BITS_DUMPABLE_WIDTH, EDATA_BITS_DUMPABLE_SHIFT)
-#define EXTENT_BITS_ZEROED_WIDTH 1
-#define EXTENT_BITS_ZEROED_SHIFT (EXTENT_BITS_DUMPABLE_WIDTH + EXTENT_BITS_DUMPABLE_SHIFT)
-#define EXTENT_BITS_ZEROED_MASK MASK(EXTENT_BITS_ZEROED_WIDTH, EXTENT_BITS_ZEROED_SHIFT)
+#define EDATA_BITS_ZEROED_WIDTH 1
+#define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_DUMPABLE_WIDTH + EDATA_BITS_DUMPABLE_SHIFT)
+#define EDATA_BITS_ZEROED_MASK MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT)
-#define EXTENT_BITS_STATE_WIDTH 2
-#define EXTENT_BITS_STATE_SHIFT (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT)
-#define EXTENT_BITS_STATE_MASK MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT)
+#define EDATA_BITS_STATE_WIDTH 2
+#define EDATA_BITS_STATE_SHIFT (EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT)
+#define EDATA_BITS_STATE_MASK MASK(EDATA_BITS_STATE_WIDTH, EDATA_BITS_STATE_SHIFT)
-#define EXTENT_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
-#define EXTENT_BITS_SZIND_SHIFT (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT)
-#define EXTENT_BITS_SZIND_MASK MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT)
+#define EDATA_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
+#define EDATA_BITS_SZIND_SHIFT (EDATA_BITS_STATE_WIDTH + EDATA_BITS_STATE_SHIFT)
+#define EDATA_BITS_SZIND_MASK MASK(EDATA_BITS_SZIND_WIDTH, EDATA_BITS_SZIND_SHIFT)
-#define EXTENT_BITS_NFREE_WIDTH (SC_LG_SLAB_MAXREGS + 1)
-#define EXTENT_BITS_NFREE_SHIFT (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT)
-#define EXTENT_BITS_NFREE_MASK MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT)
+#define EDATA_BITS_NFREE_WIDTH (SC_LG_SLAB_MAXREGS + 1)
+#define EDATA_BITS_NFREE_SHIFT (EDATA_BITS_SZIND_WIDTH + EDATA_BITS_SZIND_SHIFT)
+#define EDATA_BITS_NFREE_MASK MASK(EDATA_BITS_NFREE_WIDTH, EDATA_BITS_NFREE_SHIFT)
-#define EXTENT_BITS_BINSHARD_WIDTH 6
-#define EXTENT_BITS_BINSHARD_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT)
-#define EXTENT_BITS_BINSHARD_MASK MASK(EXTENT_BITS_BINSHARD_WIDTH, EXTENT_BITS_BINSHARD_SHIFT)
+#define EDATA_BITS_BINSHARD_WIDTH 6
+#define EDATA_BITS_BINSHARD_SHIFT (EDATA_BITS_NFREE_WIDTH + EDATA_BITS_NFREE_SHIFT)
+#define EDATA_BITS_BINSHARD_MASK MASK(EDATA_BITS_BINSHARD_WIDTH, EDATA_BITS_BINSHARD_SHIFT)
-#define EXTENT_BITS_IS_HEAD_WIDTH 1
-#define EXTENT_BITS_IS_HEAD_SHIFT (EXTENT_BITS_BINSHARD_WIDTH + EXTENT_BITS_BINSHARD_SHIFT)
-#define EXTENT_BITS_IS_HEAD_MASK MASK(EXTENT_BITS_IS_HEAD_WIDTH, EXTENT_BITS_IS_HEAD_SHIFT)
+#define EDATA_BITS_IS_HEAD_WIDTH 1
+#define EDATA_BITS_IS_HEAD_SHIFT (EDATA_BITS_BINSHARD_WIDTH + EDATA_BITS_BINSHARD_SHIFT)
+#define EDATA_BITS_IS_HEAD_MASK MASK(EDATA_BITS_IS_HEAD_WIDTH, EDATA_BITS_IS_HEAD_SHIFT)
-#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_IS_HEAD_WIDTH + EXTENT_BITS_IS_HEAD_SHIFT)
-#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
+#define EDATA_BITS_SN_SHIFT (EDATA_BITS_IS_HEAD_WIDTH + EDATA_BITS_IS_HEAD_SHIFT)
+#define EDATA_BITS_SN_MASK (UINT64_MAX << EDATA_BITS_SN_SHIFT)
/* Pointer to the extent that this structure is responsible for. */
void *e_addr;
@@ -160,8 +160,8 @@ struct extent_s {
* ssssssss [...] ssssssss ssssnnnn nnnnnnnn
*/
size_t e_size_esn;
- #define EXTENT_SIZE_MASK ((size_t)~(PAGE-1))
- #define EXTENT_ESN_MASK ((size_t)PAGE-1)
+ #define EDATA_SIZE_MASK ((size_t)~(PAGE-1))
+ #define EDATA_ESN_MASK ((size_t)PAGE-1)
/* Base extent size, which may not be a multiple of PAGE. */
size_t e_bsize;
};
@@ -173,13 +173,13 @@ struct extent_s {
* - stashed dirty extents
* - arena's large allocations
*/
- ql_elm(extent_t) ql_link;
+ ql_elm(edata_t) ql_link;
/*
* Linkage for per size class sn/address-ordered heaps, and
* for extent_avail
*/
- phn(extent_t) ph_link;
+ phn(edata_t) ph_link;
union {
/* Small region slab metadata. */
@@ -196,398 +196,397 @@ struct extent_s {
};
static inline unsigned
-extent_arena_ind_get(const extent_t *extent) {
- unsigned arena_ind = (unsigned)((extent->e_bits &
- EXTENT_BITS_ARENA_MASK) >> EXTENT_BITS_ARENA_SHIFT);
+edata_arena_ind_get(const edata_t *edata) {
+ unsigned arena_ind = (unsigned)((edata->e_bits &
+ EDATA_BITS_ARENA_MASK) >> EDATA_BITS_ARENA_SHIFT);
assert(arena_ind < MALLOCX_ARENA_LIMIT);
return arena_ind;
}
static inline szind_t
-extent_szind_get_maybe_invalid(const extent_t *extent) {
- szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >>
- EXTENT_BITS_SZIND_SHIFT);
+edata_szind_get_maybe_invalid(const edata_t *edata) {
+ szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) >>
+ EDATA_BITS_SZIND_SHIFT);
assert(szind <= SC_NSIZES);
return szind;
}
static inline szind_t
-extent_szind_get(const extent_t *extent) {
- szind_t szind = extent_szind_get_maybe_invalid(extent);
+edata_szind_get(const edata_t *edata) {
+ szind_t szind = edata_szind_get_maybe_invalid(edata);
assert(szind < SC_NSIZES); /* Never call when "invalid". */
return szind;
}
static inline size_t
-extent_usize_get(const extent_t *extent) {
- return sz_index2size(extent_szind_get(extent));
+edata_usize_get(const edata_t *edata) {
+ return sz_index2size(edata_szind_get(edata));
}
static inline unsigned
-extent_binshard_get(const extent_t *extent) {
- unsigned binshard = (unsigned)((extent->e_bits &
- EXTENT_BITS_BINSHARD_MASK) >> EXTENT_BITS_BINSHARD_SHIFT);
- assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
+edata_binshard_get(const edata_t *edata) {
+ unsigned binshard = (unsigned)((edata->e_bits &
+ EDATA_BITS_BINSHARD_MASK) >> EDATA_BITS_BINSHARD_SHIFT);
+ assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
return binshard;
}
static inline size_t
-extent_sn_get(const extent_t *extent) {
- return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >>
- EXTENT_BITS_SN_SHIFT);
+edata_sn_get(const edata_t *edata) {
+ return (size_t)((edata->e_bits & EDATA_BITS_SN_MASK) >>
+ EDATA_BITS_SN_SHIFT);
}
static inline extent_state_t
-extent_state_get(const extent_t *extent) {
- return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK) >>
- EXTENT_BITS_STATE_SHIFT);
+edata_state_get(const edata_t *edata) {
+ return (extent_state_t)((edata->e_bits & EDATA_BITS_STATE_MASK) >>
+ EDATA_BITS_STATE_SHIFT);
}
static inline bool
-extent_zeroed_get(const extent_t *extent) {
- return (bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK) >>
- EXTENT_BITS_ZEROED_SHIFT);
+edata_zeroed_get(const edata_t *edata) {
+ return (bool)((edata->e_bits & EDATA_BITS_ZEROED_MASK) >>
+ EDATA_BITS_ZEROED_SHIFT);
}
static inline bool
-extent_committed_get(const extent_t *extent) {
- return (bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK) >>
- EXTENT_BITS_COMMITTED_SHIFT);
+edata_committed_get(const edata_t *edata) {
+ return (bool)((edata->e_bits & EDATA_BITS_COMMITTED_MASK) >>
+ EDATA_BITS_COMMITTED_SHIFT);
}
static inline bool
-extent_dumpable_get(const extent_t *extent) {
- return (bool)((extent->e_bits & EXTENT_BITS_DUMPABLE_MASK) >>
- EXTENT_BITS_DUMPABLE_SHIFT);
+edata_dumpable_get(const edata_t *edata) {
+ return (bool)((edata->e_bits & EDATA_BITS_DUMPABLE_MASK) >>
+ EDATA_BITS_DUMPABLE_SHIFT);
}
static inline bool
-extent_slab_get(const extent_t *extent) {
- return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >>
- EXTENT_BITS_SLAB_SHIFT);
+edata_slab_get(const edata_t *edata) {
+ return (bool)((edata->e_bits & EDATA_BITS_SLAB_MASK) >>
+ EDATA_BITS_SLAB_SHIFT);
}
static inline unsigned
-extent_nfree_get(const extent_t *extent) {
- assert(extent_slab_get(extent));
- return (unsigned)((extent->e_bits & EXTENT_BITS_NFREE_MASK) >>
- EXTENT_BITS_NFREE_SHIFT);
+edata_nfree_get(const edata_t *edata) {
+ assert(edata_slab_get(edata));
+ return (unsigned)((edata->e_bits & EDATA_BITS_NFREE_MASK) >>
+ EDATA_BITS_NFREE_SHIFT);
}
static inline void *
-extent_base_get(const extent_t *extent) {
- assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
- !extent_slab_get(extent));
- return PAGE_ADDR2BASE(extent->e_addr);
+edata_base_get(const edata_t *edata) {
+ assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
+ !edata_slab_get(edata));
+ return PAGE_ADDR2BASE(edata->e_addr);
}
static inline void *
-extent_addr_get(const extent_t *extent) {
- assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
- !extent_slab_get(extent));
- return extent->e_addr;
+edata_addr_get(const edata_t *edata) {
+ assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
+ !edata_slab_get(edata));
+ return edata->e_addr;
}
static inline size_t
-extent_size_get(const extent_t *extent) {
- return (extent->e_size_esn & EXTENT_SIZE_MASK);
+edata_size_get(const edata_t *edata) {
+ return (edata->e_size_esn & EDATA_SIZE_MASK);
}
static inline size_t
-extent_esn_get(const extent_t *extent) {
- return (extent->e_size_esn & EXTENT_ESN_MASK);
+edata_esn_get(const edata_t *edata) {
+ return (edata->e_size_esn & EDATA_ESN_MASK);
}
static inline size_t
-extent_bsize_get(const extent_t *extent) {
- return extent->e_bsize;
+edata_bsize_get(const edata_t *edata) {
+ return edata->e_bsize;
}
static inline void *
-extent_before_get(const extent_t *extent) {
- return (void *)((uintptr_t)extent_base_get(extent) - PAGE);
+edata_before_get(const edata_t *edata) {
+ return (void *)((uintptr_t)edata_base_get(edata) - PAGE);
}
static inline void *
-extent_last_get(const extent_t *extent) {
- return (void *)((uintptr_t)extent_base_get(extent) +
- extent_size_get(extent) - PAGE);
+edata_last_get(const edata_t *edata) {
+ return (void *)((uintptr_t)edata_base_get(edata) +
+ edata_size_get(edata) - PAGE);
}
static inline void *
-extent_past_get(const extent_t *extent) {
- return (void *)((uintptr_t)extent_base_get(extent) +
- extent_size_get(extent));
+edata_past_get(const edata_t *edata) {
+ return (void *)((uintptr_t)edata_base_get(edata) +
+ edata_size_get(edata));
}
static inline slab_data_t *
-extent_slab_data_get(extent_t *extent) {
- assert(extent_slab_get(extent));
- return &extent->e_slab_data;
+edata_slab_data_get(edata_t *edata) {
+ assert(edata_slab_get(edata));
+ return &edata->e_slab_data;
}
static inline const slab_data_t *
-extent_slab_data_get_const(const extent_t *extent) {
- assert(extent_slab_get(extent));
- return &extent->e_slab_data;
+edata_slab_data_get_const(const edata_t *edata) {
+ assert(edata_slab_get(edata));
+ return &edata->e_slab_data;
}
static inline void
-extent_prof_info_get(const extent_t *extent, prof_info_t *prof_info) {
+edata_prof_info_get(const edata_t *edata, prof_info_t *prof_info) {
assert(prof_info != NULL);
prof_info->alloc_tctx = (prof_tctx_t *)atomic_load_p(
- &extent->e_prof_tctx, ATOMIC_ACQUIRE);
- prof_info->alloc_time = extent->e_alloc_time;
+ &edata->e_prof_tctx, ATOMIC_ACQUIRE);
+ prof_info->alloc_time = edata->e_alloc_time;
}
static inline void
-extent_arena_ind_set(extent_t *extent, unsigned arena_ind) {
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ARENA_MASK) |
- ((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT);
+edata_arena_ind_set(edata_t *edata, unsigned arena_ind) {
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_ARENA_MASK) |
+ ((uint64_t)arena_ind << EDATA_BITS_ARENA_SHIFT);
}
static inline void
-extent_binshard_set(extent_t *extent, unsigned binshard) {
+edata_binshard_set(edata_t *edata, unsigned binshard) {
/* The assertion assumes szind is set already. */
- assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_BINSHARD_MASK) |
- ((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT);
+ assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_BINSHARD_MASK) |
+ ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT);
}
static inline void
-extent_addr_set(extent_t *extent, void *addr) {
- extent->e_addr = addr;
+edata_addr_set(edata_t *edata, void *addr) {
+ edata->e_addr = addr;
}
static inline void
-extent_size_set(extent_t *extent, size_t size) {
- assert((size & ~EXTENT_SIZE_MASK) == 0);
- extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK);
+edata_size_set(edata_t *edata, size_t size) {
+ assert((size & ~EDATA_SIZE_MASK) == 0);
+ edata->e_size_esn = size | (edata->e_size_esn & ~EDATA_SIZE_MASK);
}
static inline void
-extent_esn_set(extent_t *extent, size_t esn) {
- extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK) | (esn &
- EXTENT_ESN_MASK);
+edata_esn_set(edata_t *edata, size_t esn) {
+ edata->e_size_esn = (edata->e_size_esn & ~EDATA_ESN_MASK) | (esn &
+ EDATA_ESN_MASK);
}
static inline void
-extent_bsize_set(extent_t *extent, size_t bsize) {
- extent->e_bsize = bsize;
+edata_bsize_set(edata_t *edata, size_t bsize) {
+ edata->e_bsize = bsize;
}
static inline void
-extent_szind_set(extent_t *extent, szind_t szind) {
+edata_szind_set(edata_t *edata, szind_t szind) {
assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) |
- ((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT);
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_SZIND_MASK) |
+ ((uint64_t)szind << EDATA_BITS_SZIND_SHIFT);
}
static inline void
-extent_nfree_set(extent_t *extent, unsigned nfree) {
- assert(extent_slab_get(extent));
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_NFREE_MASK) |
- ((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
+edata_nfree_set(edata_t *edata, unsigned nfree) {
+ assert(edata_slab_get(edata));
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_NFREE_MASK) |
+ ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
}
static inline void
-extent_nfree_binshard_set(extent_t *extent, unsigned nfree, unsigned binshard) {
+edata_nfree_binshard_set(edata_t *edata, unsigned nfree, unsigned binshard) {
/* The assertion assumes szind is set already. */
- assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
- extent->e_bits = (extent->e_bits &
- (~EXTENT_BITS_NFREE_MASK & ~EXTENT_BITS_BINSHARD_MASK)) |
- ((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT) |
- ((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
+ assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
+ edata->e_bits = (edata->e_bits &
+ (~EDATA_BITS_NFREE_MASK & ~EDATA_BITS_BINSHARD_MASK)) |
+ ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT) |
+ ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
}
static inline void
-extent_nfree_inc(extent_t *extent) {
- assert(extent_slab_get(extent));
- extent->e_bits += ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
+edata_nfree_inc(edata_t *edata) {
+ assert(edata_slab_get(edata));
+ edata->e_bits += ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
}
static inline void
-extent_nfree_dec(extent_t *extent) {
- assert(extent_slab_get(extent));
- extent->e_bits -= ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
+edata_nfree_dec(edata_t *edata) {
+ assert(edata_slab_get(edata));
+ edata->e_bits -= ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
}
static inline void
-extent_nfree_sub(extent_t *extent, uint64_t n) {
- assert(extent_slab_get(extent));
- extent->e_bits -= (n << EXTENT_BITS_NFREE_SHIFT);
+edata_nfree_sub(edata_t *edata, uint64_t n) {
+ assert(edata_slab_get(edata));
+ edata->e_bits -= (n << EDATA_BITS_NFREE_SHIFT);
}
static inline void
-extent_sn_set(extent_t *extent, size_t sn) {
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK) |
- ((uint64_t)sn << EXTENT_BITS_SN_SHIFT);
+edata_sn_set(edata_t *edata, size_t sn) {
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_SN_MASK) |
+ ((uint64_t)sn << EDATA_BITS_SN_SHIFT);
}
static inline void
-extent_state_set(extent_t *extent, extent_state_t state) {
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK) |
- ((uint64_t)state << EXTENT_BITS_STATE_SHIFT);
+edata_state_set(edata_t *edata, extent_state_t state) {
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_STATE_MASK) |
+ ((uint64_t)state << EDATA_BITS_STATE_SHIFT);
}
static inline void
-extent_zeroed_set(extent_t *extent, bool zeroed) {
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK) |
- ((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT);
+edata_zeroed_set(edata_t *edata, bool zeroed) {
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_ZEROED_MASK) |
+ ((uint64_t)zeroed << EDATA_BITS_ZEROED_SHIFT);
}
static inline void
-extent_committed_set(extent_t *extent, bool committed) {
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK) |
- ((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT);
+edata_committed_set(edata_t *edata, bool committed) {
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_COMMITTED_MASK) |
+ ((uint64_t)committed << EDATA_BITS_COMMITTED_SHIFT);
}
static inline void
-extent_dumpable_set(extent_t *extent, bool dumpable) {
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_DUMPABLE_MASK) |
- ((uint64_t)dumpable << EXTENT_BITS_DUMPABLE_SHIFT);
+edata_dumpable_set(edata_t *edata, bool dumpable) {
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_DUMPABLE_MASK) |
+ ((uint64_t)dumpable << EDATA_BITS_DUMPABLE_SHIFT);
}
static inline void
-extent_slab_set(extent_t *extent, bool slab) {
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) |
- ((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT);
+edata_slab_set(edata_t *edata, bool slab) {
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_SLAB_MASK) |
+ ((uint64_t)slab << EDATA_BITS_SLAB_SHIFT);
}
static inline void
-extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
- atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASE);
+edata_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) {
+ atomic_store_p(&edata->e_prof_tctx, tctx, ATOMIC_RELEASE);
}
static inline void
-extent_prof_alloc_time_set(extent_t *extent, nstime_t *t) {
- nstime_copy(&extent->e_alloc_time, t);
+edata_prof_alloc_time_set(edata_t *edata, nstime_t *t) {
+ nstime_copy(&edata->e_alloc_time, t);
}
static inline bool
-extent_is_head_get(extent_t *extent) {
+edata_is_head_get(edata_t *edata) {
if (maps_coalesce) {
not_reached();
}
- return (bool)((extent->e_bits & EXTENT_BITS_IS_HEAD_MASK) >>
- EXTENT_BITS_IS_HEAD_SHIFT);
+ return (bool)((edata->e_bits & EDATA_BITS_IS_HEAD_MASK) >>
+ EDATA_BITS_IS_HEAD_SHIFT);
}
static inline void
-extent_is_head_set(extent_t *extent, bool is_head) {
+edata_is_head_set(edata_t *edata, bool is_head) {
if (maps_coalesce) {
not_reached();
}
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_IS_HEAD_MASK) |
- ((uint64_t)is_head << EXTENT_BITS_IS_HEAD_SHIFT);
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_IS_HEAD_MASK) |
+ ((uint64_t)is_head << EDATA_BITS_IS_HEAD_SHIFT);
}
static inline void
-extent_init(extent_t *extent, unsigned arena_ind, void *addr, size_t size,
+edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size,
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
bool committed, bool dumpable, extent_head_state_t is_head) {
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
- extent_arena_ind_set(extent, arena_ind);
- extent_addr_set(extent, addr);
- extent_size_set(extent, size);
- extent_slab_set(extent, slab);
- extent_szind_set(extent, szind);
- extent_sn_set(extent, sn);
- extent_state_set(extent, state);
- extent_zeroed_set(extent, zeroed);
- extent_committed_set(extent, committed);
- extent_dumpable_set(extent, dumpable);
- ql_elm_new(extent, ql_link);
+ edata_arena_ind_set(edata, arena_ind);
+ edata_addr_set(edata, addr);
+ edata_size_set(edata, size);
+ edata_slab_set(edata, slab);
+ edata_szind_set(edata, szind);
+ edata_sn_set(edata, sn);
+ edata_state_set(edata, state);
+ edata_zeroed_set(edata, zeroed);
+ edata_committed_set(edata, committed);
+ edata_dumpable_set(edata, dumpable);
+ ql_elm_new(edata, ql_link);
if (!maps_coalesce) {
- extent_is_head_set(extent, (is_head == EXTENT_IS_HEAD) ? true :
- false);
+ edata_is_head_set(edata, is_head == EXTENT_IS_HEAD);
}
if (config_prof) {
- extent_prof_tctx_set(extent, NULL);
+ edata_prof_tctx_set(edata, NULL);
}
}
static inline void
-extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
- extent_arena_ind_set(extent, (1U << MALLOCX_ARENA_BITS) - 1);
- extent_addr_set(extent, addr);
- extent_bsize_set(extent, bsize);
- extent_slab_set(extent, false);
- extent_szind_set(extent, SC_NSIZES);
- extent_sn_set(extent, sn);
- extent_state_set(extent, extent_state_active);
- extent_zeroed_set(extent, true);
- extent_committed_set(extent, true);
- extent_dumpable_set(extent, true);
+edata_binit(edata_t *edata, void *addr, size_t bsize, size_t sn) {
+ edata_arena_ind_set(edata, (1U << MALLOCX_ARENA_BITS) - 1);
+ edata_addr_set(edata, addr);
+ edata_bsize_set(edata, bsize);
+ edata_slab_set(edata, false);
+ edata_szind_set(edata, SC_NSIZES);
+ edata_sn_set(edata, sn);
+ edata_state_set(edata, extent_state_active);
+ edata_zeroed_set(edata, true);
+ edata_committed_set(edata, true);
+ edata_dumpable_set(edata, true);
}
static inline void
-extent_list_init(extent_list_t *list) {
+edata_list_init(edata_list_t *list) {
ql_new(list);
}
-static inline extent_t *
-extent_list_first(const extent_list_t *list) {
+static inline edata_t *
+edata_list_first(const edata_list_t *list) {
return ql_first(list);
}
-static inline extent_t *
-extent_list_last(const extent_list_t *list) {
+static inline edata_t *
+edata_list_last(const edata_list_t *list) {
return ql_last(list, ql_link);
}
static inline void
-extent_list_append(extent_list_t *list, extent_t *extent) {
- ql_tail_insert(list, extent, ql_link);
+edata_list_append(edata_list_t *list, edata_t *edata) {
+ ql_tail_insert(list, edata, ql_link);
}
static inline void
-extent_list_prepend(extent_list_t *list, extent_t *extent) {
- ql_head_insert(list, extent, ql_link);
+edata_list_prepend(edata_list_t *list, edata_t *edata) {
+ ql_head_insert(list, edata, ql_link);
}
static inline void
-extent_list_replace(extent_list_t *list, extent_t *to_remove,
- extent_t *to_insert) {
+edata_list_replace(edata_list_t *list, edata_t *to_remove,
+ edata_t *to_insert) {
ql_after_insert(to_remove, to_insert, ql_link);
ql_remove(list, to_remove, ql_link);
}
static inline void
-extent_list_remove(extent_list_t *list, extent_t *extent) {
- ql_remove(list, extent, ql_link);
+edata_list_remove(edata_list_t *list, edata_t *edata) {
+ ql_remove(list, edata, ql_link);
}
static inline int
-extent_sn_comp(const extent_t *a, const extent_t *b) {
- size_t a_sn = extent_sn_get(a);
- size_t b_sn = extent_sn_get(b);
+edata_sn_comp(const edata_t *a, const edata_t *b) {
+ size_t a_sn = edata_sn_get(a);
+ size_t b_sn = edata_sn_get(b);
return (a_sn > b_sn) - (a_sn < b_sn);
}
static inline int
-extent_esn_comp(const extent_t *a, const extent_t *b) {
- size_t a_esn = extent_esn_get(a);
- size_t b_esn = extent_esn_get(b);
+edata_esn_comp(const edata_t *a, const edata_t *b) {
+ size_t a_esn = edata_esn_get(a);
+ size_t b_esn = edata_esn_get(b);
return (a_esn > b_esn) - (a_esn < b_esn);
}
static inline int
-extent_ad_comp(const extent_t *a, const extent_t *b) {
- uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
- uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
+edata_ad_comp(const edata_t *a, const edata_t *b) {
+ uintptr_t a_addr = (uintptr_t)edata_addr_get(a);
+ uintptr_t b_addr = (uintptr_t)edata_addr_get(b);
return (a_addr > b_addr) - (a_addr < b_addr);
}
static inline int
-extent_ead_comp(const extent_t *a, const extent_t *b) {
+edata_ead_comp(const edata_t *a, const edata_t *b) {
uintptr_t a_eaddr = (uintptr_t)a;
uintptr_t b_eaddr = (uintptr_t)b;
@@ -595,32 +594,32 @@ extent_ead_comp(const extent_t *a, const extent_t *b) {
}
static inline int
-extent_snad_comp(const extent_t *a, const extent_t *b) {
+edata_snad_comp(const edata_t *a, const edata_t *b) {
int ret;
- ret = extent_sn_comp(a, b);
+ ret = edata_sn_comp(a, b);
if (ret != 0) {
return ret;
}
- ret = extent_ad_comp(a, b);
+ ret = edata_ad_comp(a, b);
return ret;
}
static inline int
-extent_esnead_comp(const extent_t *a, const extent_t *b) {
+edata_esnead_comp(const edata_t *a, const edata_t *b) {
int ret;
- ret = extent_esn_comp(a, b);
+ ret = edata_esn_comp(a, b);
if (ret != 0) {
return ret;
}
- ret = extent_ead_comp(a, b);
+ ret = edata_ead_comp(a, b);
return ret;
}
-ph_proto(, extent_avail_, extent_tree_t, extent_t)
-ph_proto(, extent_heap_, extent_heap_t, extent_t)
+ph_proto(, edata_avail_, edata_tree_t, edata_t)
+ph_proto(, edata_heap_, edata_heap_t, edata_t)
-#endif /* JEMALLOC_INTERNAL_EXTENT_H */
+#endif /* JEMALLOC_INTERNAL_EDATA_H */
diff --git a/include/jemalloc/internal/eset.h b/include/jemalloc/internal/eset.h
index 833f19c..e76257a 100644
--- a/include/jemalloc/internal/eset.h
+++ b/include/jemalloc/internal/eset.h
@@ -19,7 +19,7 @@ struct eset_s {
*
* Synchronization: mtx.
*/
- extent_heap_t heaps[SC_NPSIZES + 1];
+ edata_heap_t heaps[SC_NPSIZES + 1];
atomic_zu_t nextents[SC_NPSIZES + 1];
atomic_zu_t nbytes[SC_NPSIZES + 1];
@@ -35,7 +35,7 @@ struct eset_s {
*
* Synchronization: mtx.
*/
- extent_list_t lru;
+ edata_list_t lru;
/*
* Page sum for all extents in heaps.
@@ -67,13 +67,13 @@ size_t eset_nextents_get(eset_t *eset, pszind_t ind);
/* Get the sum total bytes of the extents in the given page size index. */
size_t eset_nbytes_get(eset_t *eset, pszind_t ind);
-void eset_insert_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent);
-void eset_remove_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent);
+void eset_insert_locked(tsdn_t *tsdn, eset_t *eset, edata_t *edata);
+void eset_remove_locked(tsdn_t *tsdn, eset_t *eset, edata_t *edata);
/*
* Select an extent from this eset of the given size and alignment. Returns
* null if no such item could be found.
*/
-extent_t *eset_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t esize,
+edata_t *eset_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t esize,
size_t alignment);
void eset_prefork(tsdn_t *tsdn, eset_t *eset);
diff --git a/include/jemalloc/internal/extent2.h b/include/jemalloc/internal/extent2.h
index 7a18a61..ef23267 100644
--- a/include/jemalloc/internal/extent2.h
+++ b/include/jemalloc/internal/extent2.h
@@ -26,38 +26,38 @@ extern size_t opt_lg_extent_max_active_fit;
extern rtree_t extents_rtree;
-extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena);
-void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
+edata_t *extent_alloc(tsdn_t *tsdn, arena_t *arena);
+void extent_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *edata);
-extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
+edata_t *extents_alloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
eset_t *eset, void *new_addr, size_t size, size_t pad, size_t alignment,
bool slab, szind_t szind, bool *zero, bool *commit);
void extents_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- eset_t *eset, extent_t *extent);
-extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
+ eset_t *eset, edata_t *edata);
+edata_t *extents_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
eset_t *eset, size_t npages_min);
-extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
+edata_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
szind_t szind, bool *zero, bool *commit);
-void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
+void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, edata_t *edata);
void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *extent);
+ edata_t *edata);
void extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *extent);
+ edata_t *edata);
bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *extent, size_t offset, size_t length);
+ edata_t *edata, size_t offset, size_t length);
bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *extent, size_t offset, size_t length);
+ edata_t *edata, size_t offset, size_t length);
bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *extent, size_t offset, size_t length);
+ edata_t *edata, size_t offset, size_t length);
bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *extent, size_t offset, size_t length);
-extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *extent, size_t size_a, szind_t szind_a, bool slab_a,
+ edata_t *edata, size_t offset, size_t length);
+edata_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
+ edata_t *edata, size_t size_a, szind_t szind_a, bool slab_a,
size_t size_b, szind_t szind_b, bool slab_b);
bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *a, extent_t *b);
-bool extent_head_no_merge(extent_t *a, extent_t *b);
+ edata_t *a, edata_t *b);
+bool extent_head_no_merge(edata_t *a, edata_t *b);
bool extent_boot(void);
diff --git a/include/jemalloc/internal/jemalloc_internal_inlines_b.h b/include/jemalloc/internal/jemalloc_internal_inlines_b.h
index d4cb04c..8367ee2 100644
--- a/include/jemalloc/internal/jemalloc_internal_inlines_b.h
+++ b/include/jemalloc/internal/jemalloc_internal_inlines_b.h
@@ -76,12 +76,12 @@ arena_is_auto(arena_t *arena) {
return (arena_ind_get(arena) < manual_arena_base);
}
-JEMALLOC_ALWAYS_INLINE extent_t *
+JEMALLOC_ALWAYS_INLINE edata_t *
iealloc(tsdn_t *tsdn, const void *ptr) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- return rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
+ return rtree_edata_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true);
}
diff --git a/include/jemalloc/internal/large_externs.h b/include/jemalloc/internal/large_externs.h
index 2299920..fe5e606 100644
--- a/include/jemalloc/internal/large_externs.h
+++ b/include/jemalloc/internal/large_externs.h
@@ -6,7 +6,7 @@
void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero);
-bool large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
+bool large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
size_t usize_max, bool zero);
void *large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
size_t alignment, bool zero, tcache_t *tcache,
@@ -18,12 +18,12 @@ extern large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk;
typedef void (large_dalloc_maybe_junk_t)(void *, size_t);
extern large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk;
-void large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent);
-void large_dalloc_finish(tsdn_t *tsdn, extent_t *extent);
-void large_dalloc(tsdn_t *tsdn, extent_t *extent);
-size_t large_salloc(tsdn_t *tsdn, const extent_t *extent);
-void large_prof_info_get(const extent_t *extent, prof_info_t *prof_info);
-void large_prof_tctx_reset(extent_t *extent);
-void large_prof_info_set(extent_t *extent, prof_tctx_t *tctx);
+void large_dalloc_prep_junked_locked(tsdn_t *tsdn, edata_t *edata);
+void large_dalloc_finish(tsdn_t *tsdn, edata_t *edata);
+void large_dalloc(tsdn_t *tsdn, edata_t *edata);
+size_t large_salloc(tsdn_t *tsdn, const edata_t *edata);
+void large_prof_info_get(const edata_t *edata, prof_info_t *prof_info);
+void large_prof_tctx_reset(edata_t *edata);
+void large_prof_info_set(edata_t *edata, prof_tctx_t *tctx);
#endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */
diff --git a/include/jemalloc/internal/rtree.h b/include/jemalloc/internal/rtree.h
index 16ccbeb..339c7e5 100644
--- a/include/jemalloc/internal/rtree.h
+++ b/include/jemalloc/internal/rtree.h
@@ -48,18 +48,18 @@ struct rtree_leaf_elm_s {
/*
* Single pointer-width field containing all three leaf element fields.
* For example, on a 64-bit x64 system with 48 significant virtual
- * memory address bits, the index, extent, and slab fields are packed as
+ * memory address bits, the index, edata, and slab fields are packed as
* such:
*
* x: index
- * e: extent
+ * e: edata
* b: slab
*
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b
*/
atomic_p_t le_bits;
#else
- atomic_p_t le_extent; /* (extent_t *) */
+ atomic_p_t le_edata; /* (edata_t *) */
atomic_u_t le_szind; /* (szind_t) */
atomic_b_t le_slab; /* (bool) */
#endif
@@ -176,8 +176,8 @@ rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree,
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
}
-JEMALLOC_ALWAYS_INLINE extent_t *
-rtree_leaf_elm_bits_extent_get(uintptr_t bits) {
+JEMALLOC_ALWAYS_INLINE edata_t *
+rtree_leaf_elm_bits_edata_get(uintptr_t bits) {
# ifdef __aarch64__
/*
* aarch64 doesn't sign extend the highest virtual address bit to set
@@ -187,10 +187,10 @@ rtree_leaf_elm_bits_extent_get(uintptr_t bits) {
/* Mask off the slab bit. */
uintptr_t low_bit_mask = ~(uintptr_t)1;
uintptr_t mask = high_bit_mask & low_bit_mask;
- return (extent_t *)(bits & mask);
+ return (edata_t *)(bits & mask);
# else
/* Restore sign-extended high bits, mask slab bit. */
- return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >>
+ return (edata_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >>
RTREE_NHIB) & ~((uintptr_t)0x1));
# endif
}
@@ -207,16 +207,16 @@ rtree_leaf_elm_bits_slab_get(uintptr_t bits) {
# endif
-JEMALLOC_ALWAYS_INLINE extent_t *
-rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree,
+JEMALLOC_ALWAYS_INLINE edata_t *
+rtree_leaf_elm_edata_read(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, bool dependent) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
- return rtree_leaf_elm_bits_extent_get(bits);
+ return rtree_leaf_elm_bits_edata_get(bits);
#else
- extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent
+ edata_t *edata = (edata_t *)atomic_load_p(&elm->le_edata, dependent
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
- return extent;
+ return edata;
#endif
}
@@ -245,16 +245,16 @@ rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree,
}
static inline void
-rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, extent_t *extent) {
+rtree_leaf_elm_edata_write(tsdn_t *tsdn, rtree_t *rtree,
+ rtree_leaf_elm_t *elm, edata_t *edata) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true);
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
- LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1))
+ LG_VADDR) | ((uintptr_t)edata & (((uintptr_t)0x1 << LG_VADDR) - 1))
| ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
#else
- atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE);
+ atomic_store_p(&elm->le_edata, edata, ATOMIC_RELEASE);
#endif
}
@@ -267,7 +267,7 @@ rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree,
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
true);
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
- ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
+ ((uintptr_t)rtree_leaf_elm_bits_edata_get(old_bits) &
(((uintptr_t)0x1 << LG_VADDR) - 1)) |
((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
@@ -283,7 +283,7 @@ rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree,
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
true);
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
- LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
+ LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_edata_get(old_bits) &
(((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab);
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
#else
@@ -293,20 +293,20 @@ rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree,
static inline void
rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, extent_t *extent, szind_t szind, bool slab) {
+ rtree_leaf_elm_t *elm, edata_t *edata, szind_t szind, bool slab) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
- ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) |
+ ((uintptr_t)edata & (((uintptr_t)0x1 << LG_VADDR) - 1)) |
((uintptr_t)slab);
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
#else
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
/*
- * Write extent last, since the element is atomically considered valid
- * as soon as the extent field is non-NULL.
+ * Write edata last, since the element is atomically considered valid
+ * as soon as the edata field is non-NULL.
*/
- rtree_leaf_elm_extent_write(tsdn, rtree, elm, extent);
+ rtree_leaf_elm_edata_write(tsdn, rtree, elm, edata);
#endif
}
@@ -317,7 +317,7 @@ rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
/*
* The caller implicitly assures that it is the only writer to the szind
- * and slab fields, and that the extent field cannot currently change.
+ * and slab fields, and that the edata field cannot currently change.
*/
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
@@ -384,9 +384,9 @@ rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
static inline bool
rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
- extent_t *extent, szind_t szind, bool slab) {
- /* Use rtree_clear() to set the extent to NULL. */
- assert(extent != NULL);
+ edata_t *edata, szind_t szind, bool slab) {
+ /* Use rtree_clear() to set the edata to NULL. */
+ assert(edata != NULL);
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
key, false, true);
@@ -394,8 +394,8 @@ rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
return true;
}
- assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL);
- rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab);
+ assert(rtree_leaf_elm_edata_read(tsdn, rtree, elm, false) == NULL);
+ rtree_leaf_elm_write(tsdn, rtree, elm, edata, szind, slab);
return false;
}
@@ -412,15 +412,15 @@ rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
return elm;
}
-JEMALLOC_ALWAYS_INLINE extent_t *
-rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+JEMALLOC_ALWAYS_INLINE edata_t *
+rtree_edata_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, bool dependent) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
dependent);
if (!dependent && elm == NULL) {
return NULL;
}
- return rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
+ return rtree_leaf_elm_edata_read(tsdn, rtree, elm, dependent);
}
JEMALLOC_ALWAYS_INLINE szind_t
@@ -440,14 +440,14 @@ rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
*/
JEMALLOC_ALWAYS_INLINE bool
-rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, bool dependent, extent_t **r_extent, szind_t *r_szind) {
+rtree_edata_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key, bool dependent, edata_t **r_edata, szind_t *r_szind) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
dependent);
if (!dependent && elm == NULL) {
return true;
}
- *r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
+ *r_edata = rtree_leaf_elm_edata_read(tsdn, rtree, elm, dependent);
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
return false;
}
@@ -520,7 +520,7 @@ static inline void
rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key) {
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
- assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) !=
+ assert(rtree_leaf_elm_edata_read(tsdn, rtree, elm, false) !=
NULL);
rtree_leaf_elm_write(tsdn, rtree, elm, NULL, SC_NSIZES, false);
}
diff --git a/include/jemalloc/internal/witness.h b/include/jemalloc/internal/witness.h
index d76b790..ddbcf9d 100644
--- a/include/jemalloc/internal/witness.h
+++ b/include/jemalloc/internal/witness.h
@@ -43,7 +43,7 @@
#define WITNESS_RANK_TCACHE_QL 13U
#define WITNESS_RANK_EXTENT_GROW 14U
#define WITNESS_RANK_EXTENTS 15U
-#define WITNESS_RANK_EXTENT_AVAIL 16U
+#define WITNESS_RANK_EDATA_AVAIL 16U
#define WITNESS_RANK_EXTENT_POOL 17U
#define WITNESS_RANK_RTREE 18U
diff --git a/src/arena.c b/src/arena.c
index 2d46b9e..f05a1d1 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -60,9 +60,9 @@ static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
size_t npages_decay_max, bool is_background_thread);
static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
bool is_background_thread, bool all);
-static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
bin_t *bin);
-static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
bin_t *bin);
/******************************************************************************/
@@ -102,8 +102,8 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
arena_stats_accum_zu(&astats->retained,
eset_npages_get(&arena->eset_retained) << LG_PAGE);
- atomic_store_zu(&astats->extent_avail,
- atomic_load_zu(&arena->extent_avail_cnt, ATOMIC_RELAXED),
+ atomic_store_zu(&astats->edata_avail,
+ atomic_load_zu(&arena->edata_avail_cnt, ATOMIC_RELAXED),
ATOMIC_RELAXED);
arena_stats_accum_u64(&astats->decay_dirty.npurge,
@@ -224,7 +224,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
/* Gather per arena mutex profiling data. */
READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
- READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx,
+ READ_ARENA_MUTEX_PROF_DATA(edata_avail_mtx,
arena_prof_mutex_extent_avail)
READ_ARENA_MUTEX_PROF_DATA(eset_dirty.mtx,
arena_prof_mutex_extents_dirty)
@@ -254,11 +254,11 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
void
arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *extent) {
+ edata_t *edata) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
- extents_dalloc(tsdn, arena, ehooks, &arena->eset_dirty, extent);
+ extents_dalloc(tsdn, arena, ehooks, &arena->eset_dirty, edata);
if (arena_dirty_decay_ms_get(arena) == 0) {
arena_decay_dirty(tsdn, arena, false, true);
} else {
@@ -267,34 +267,34 @@ arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
}
static void *
-arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) {
+arena_slab_reg_alloc(edata_t *slab, const bin_info_t *bin_info) {
void *ret;
- slab_data_t *slab_data = extent_slab_data_get(slab);
+ slab_data_t *slab_data = edata_slab_data_get(slab);
size_t regind;
- assert(extent_nfree_get(slab) > 0);
+ assert(edata_nfree_get(slab) > 0);
assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
- ret = (void *)((uintptr_t)extent_addr_get(slab) +
+ ret = (void *)((uintptr_t)edata_addr_get(slab) +
(uintptr_t)(bin_info->reg_size * regind));
- extent_nfree_dec(slab);
+ edata_nfree_dec(slab);
return ret;
}
static void
-arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
+arena_slab_reg_alloc_batch(edata_t *slab, const bin_info_t *bin_info,
unsigned cnt, void** ptrs) {
- slab_data_t *slab_data = extent_slab_data_get(slab);
+ slab_data_t *slab_data = edata_slab_data_get(slab);
- assert(extent_nfree_get(slab) >= cnt);
+ assert(edata_nfree_get(slab) >= cnt);
assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
#if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE)
for (unsigned i = 0; i < cnt; i++) {
size_t regind = bitmap_sfu(slab_data->bitmap,
&bin_info->bitmap_info);
- *(ptrs + i) = (void *)((uintptr_t)extent_addr_get(slab) +
+ *(ptrs + i) = (void *)((uintptr_t)edata_addr_get(slab) +
(uintptr_t)(bin_info->reg_size * regind));
}
#else
@@ -315,7 +315,7 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
* Load from memory locations only once, outside the
* hot loop below.
*/
- uintptr_t base = (uintptr_t)extent_addr_get(slab);
+ uintptr_t base = (uintptr_t)edata_addr_get(slab);
uintptr_t regsize = (uintptr_t)bin_info->reg_size;
while (pop--) {
size_t bit = cfs_lu(&g);
@@ -327,24 +327,24 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
slab_data->bitmap[group] = g;
}
#endif
- extent_nfree_sub(slab, cnt);
+ edata_nfree_sub(slab, cnt);
}
#ifndef JEMALLOC_JET
static
#endif
size_t
-arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
+arena_slab_regind(edata_t *slab, szind_t binind, const void *ptr) {
size_t diff, regind;
/* Freeing a pointer outside the slab can cause assertion failure. */
- assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
- assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
+ assert((uintptr_t)ptr >= (uintptr_t)edata_addr_get(slab));
+ assert((uintptr_t)ptr < (uintptr_t)edata_past_get(slab));
/* Freeing an interior pointer can cause assertion failure. */
- assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
+ assert(((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab)) %
(uintptr_t)bin_infos[binind].reg_size == 0);
- diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
+ diff = (size_t)((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab));
/* Avoid doing division with a variable divisor. */
regind = div_compute(&arena_binind_div_info[binind], diff);
@@ -355,17 +355,17 @@ arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
}
static void
-arena_slab_reg_dalloc(extent_t *slab, slab_data_t *slab_data, void *ptr) {
- szind_t binind = extent_szind_get(slab);
+arena_slab_reg_dalloc(edata_t *slab, slab_data_t *slab_data, void *ptr) {
+ szind_t binind = edata_szind_get(slab);
const bin_info_t *bin_info = &bin_infos[binind];
size_t regind = arena_slab_regind(slab, binind, ptr);
- assert(extent_nfree_get(slab) < bin_info->nregs);
+ assert(edata_nfree_get(slab) < bin_info->nregs);
/* Freeing an unallocated pointer can cause assertion failure. */
assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
- extent_nfree_inc(slab);
+ edata_nfree_inc(slab);
}
static void
@@ -423,7 +423,7 @@ arena_may_have_muzzy(arena_t *arena) {
return arena_muzzy_decay_ms_get(arena) != 0;
}
-extent_t *
+edata_t *
arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool *zero) {
ehooks_t *ehooks = arena_get_ehooks(arena);
@@ -434,23 +434,22 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
szind_t szind = sz_size2index(usize);
size_t mapped_add;
bool commit = true;
- extent_t *extent = extents_alloc(tsdn, arena, ehooks,
- &arena->eset_dirty, NULL, usize, sz_large_pad, alignment, false,
- szind, zero, &commit);
- if (extent == NULL && arena_may_have_muzzy(arena)) {
- extent = extents_alloc(tsdn, arena, ehooks, &arena->eset_muzzy,
+ edata_t *edata = extents_alloc(tsdn, arena, ehooks, &arena->eset_dirty,
+ NULL, usize, sz_large_pad, alignment, false, szind, zero, &commit);
+ if (edata == NULL && arena_may_have_muzzy(arena)) {
+ edata = extents_alloc(tsdn, arena, ehooks, &arena->eset_muzzy,
NULL, usize, sz_large_pad, alignment, false, szind, zero,
&commit);
}
size_t size = usize + sz_large_pad;
- if (extent == NULL) {
- extent = extent_alloc_wrapper(tsdn, arena, ehooks, NULL, usize,
+ if (edata == NULL) {
+ edata = extent_alloc_wrapper(tsdn, arena, ehooks, NULL, usize,
sz_large_pad, alignment, false, szind, zero, &commit);
if (config_stats) {
/*
- * extent may be NULL on OOM, but in that case
- * mapped_add isn't used below, so there's no need to
- * conditionlly set it to 0 here.
+ * edata may be NULL on OOM, but in that case mapped_add
+ * isn't used below, so there's no need to conditionlly
+ * set it to 0 here.
*/
mapped_add = size;
}
@@ -458,7 +457,7 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
mapped_add = 0;
}
- if (extent != NULL) {
+ if (edata != NULL) {
if (config_stats) {
arena_stats_lock(tsdn, &arena->stats);
arena_large_malloc_stats_update(tsdn, arena, usize);
@@ -471,24 +470,24 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
arena_nactive_add(arena, size >> LG_PAGE);
}
- return extent;
+ return edata;
}
void
-arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
+arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
if (config_stats) {
arena_stats_lock(tsdn, &arena->stats);
arena_large_dalloc_stats_update(tsdn, arena,
- extent_usize_get(extent));
+ edata_usize_get(edata));
arena_stats_unlock(tsdn, &arena->stats);
}
- arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
+ arena_nactive_sub(arena, edata_size_get(edata) >> LG_PAGE);
}
void
-arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
size_t oldusize) {
- size_t usize = extent_usize_get(extent);
+ size_t usize = edata_usize_get(edata);
size_t udiff = oldusize - usize;
if (config_stats) {
@@ -500,9 +499,9 @@ arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
}
void
-arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
size_t oldusize) {
- size_t usize = extent_usize_get(extent);
+ size_t usize = edata_usize_get(edata);
size_t udiff = usize - oldusize;
if (config_stats) {
@@ -819,25 +818,25 @@ arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
static size_t
arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
ehooks_t *ehooks, eset_t *eset, size_t npages_limit,
- size_t npages_decay_max, extent_list_t *decay_extents) {
+ size_t npages_decay_max, edata_list_t *decay_extents) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
/* Stash extents according to npages_limit. */
size_t nstashed = 0;
- extent_t *extent;
+ edata_t *edata;
while (nstashed < npages_decay_max &&
- (extent = extents_evict(tsdn, arena, ehooks, eset, npages_limit))
+ (edata = extents_evict(tsdn, arena, ehooks, eset, npages_limit))
!= NULL) {
- extent_list_append(decay_extents, extent);
- nstashed += extent_size_get(extent) >> LG_PAGE;
+ edata_list_append(decay_extents, edata);
+ nstashed += edata_size_get(edata) >> LG_PAGE;
}
return nstashed;
}
static size_t
arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- arena_decay_t *decay, eset_t *eset, bool all, extent_list_t *decay_extents,
+ arena_decay_t *decay, eset_t *eset, bool all, edata_list_t *decay_extents,
bool is_background_thread) {
size_t nmadvise, nunmapped;
size_t npurged;
@@ -849,31 +848,30 @@ arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
npurged = 0;
ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
- for (extent_t *extent = extent_list_first(decay_extents); extent !=
- NULL; extent = extent_list_first(decay_extents)) {
+ for (edata_t *edata = edata_list_first(decay_extents); edata !=
+ NULL; edata = edata_list_first(decay_extents)) {
if (config_stats) {
nmadvise++;
}
- size_t npages = extent_size_get(extent) >> LG_PAGE;
+ size_t npages = edata_size_get(edata) >> LG_PAGE;
npurged += npages;
- extent_list_remove(decay_extents, extent);
+ edata_list_remove(decay_extents, edata);
switch (eset_state_get(eset)) {
case extent_state_active:
not_reached();
case extent_state_dirty:
if (!all && muzzy_decay_ms != 0 &&
!extent_purge_lazy_wrapper(tsdn, arena,
- ehooks, extent, 0,
- extent_size_get(extent))) {
+ ehooks, edata, 0, edata_size_get(edata))) {
extents_dalloc(tsdn, arena, ehooks,
- &arena->eset_muzzy, extent);
+ &arena->eset_muzzy, edata);
arena_background_thread_inactivity_check(tsdn,
arena, is_background_thread);
break;
}
JEMALLOC_FALLTHROUGH;
case extent_state_muzzy:
- extent_dalloc_wrapper(tsdn, arena, ehooks, extent);
+ extent_dalloc_wrapper(tsdn, arena, ehooks, edata);
if (config_stats) {
nunmapped += npages;
}
@@ -923,8 +921,8 @@ arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
ehooks_t *ehooks = arena_get_ehooks(arena);
- extent_list_t decay_extents;
- extent_list_init(&decay_extents);
+ edata_list_t decay_extents;
+ edata_list_init(&decay_extents);
size_t npurge = arena_stash_decayed(tsdn, arena, ehooks, eset,
npages_limit, npages_decay_max, &decay_extents);
@@ -1000,33 +998,33 @@ arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
}
static void
-arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
- arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
+arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab) {
+ arena_nactive_sub(arena, edata_size_get(slab) >> LG_PAGE);
ehooks_t *ehooks = arena_get_ehooks(arena);
arena_extents_dirty_dalloc(tsdn, arena, ehooks, slab);
}
static void
-arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) {
- assert(extent_nfree_get(slab) > 0);
- extent_heap_insert(&bin->slabs_nonfull, slab);
+arena_bin_slabs_nonfull_insert(bin_t *bin, edata_t *slab) {
+ assert(edata_nfree_get(slab) > 0);
+ edata_heap_insert(&bin->slabs_nonfull, slab);
if (config_stats) {
bin->stats.nonfull_slabs++;
}
}
static void
-arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) {
- extent_heap_remove(&bin->slabs_nonfull, slab);
+arena_bin_slabs_nonfull_remove(bin_t *bin, edata_t *slab) {
+ edata_heap_remove(&bin->slabs_nonfull, slab);
if (config_stats) {
bin->stats.nonfull_slabs--;
}
}
-static extent_t *
+static edata_t *
arena_bin_slabs_nonfull_tryget(bin_t *bin) {
- extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
+ edata_t *slab = edata_heap_remove_first(&bin->slabs_nonfull);
if (slab == NULL) {
return NULL;
}
@@ -1038,30 +1036,30 @@ arena_bin_slabs_nonfull_tryget(bin_t *bin) {
}
static void
-arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) {
- assert(extent_nfree_get(slab) == 0);
+arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, edata_t *slab) {
+ assert(edata_nfree_get(slab) == 0);
/*
* Tracking extents is required by arena_reset, which is not allowed
- * for auto arenas. Bypass this step to avoid touching the extent
+ * for auto arenas. Bypass this step to avoid touching the edata
* linkage (often results in cache misses) for auto arenas.
*/
if (arena_is_auto(arena)) {
return;
}
- extent_list_append(&bin->slabs_full, slab);
+ edata_list_append(&bin->slabs_full, slab);
}
static void
-arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) {
+arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, edata_t *slab) {
if (arena_is_auto(arena)) {
return;
}
- extent_list_remove(&bin->slabs_full, slab);
+ edata_list_remove(&bin->slabs_full, slab);
}
static void
arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
- extent_t *slab;
+ edata_t *slab;
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
if (bin->slabcur != NULL) {
@@ -1071,13 +1069,13 @@ arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
- while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
+ while ((slab = edata_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
- for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
- slab = extent_list_first(&bin->slabs_full)) {
+ for (slab = edata_list_first(&bin->slabs_full); slab != NULL;
+ slab = edata_list_first(&bin->slabs_full)) {
arena_bin_slabs_full_remove(arena, bin, slab);
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
@@ -1109,9 +1107,9 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
/* Large allocations. */
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
- for (extent_t *extent = extent_list_first(&arena->large); extent !=
- NULL; extent = extent_list_first(&arena->large)) {
- void *ptr = extent_base_get(extent);
+ for (edata_t *edata = edata_list_first(&arena->large); edata !=
+ NULL; edata = edata_list_first(&arena->large)) {
+ void *ptr = edata_base_get(edata);
size_t usize;
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
@@ -1129,7 +1127,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
if (config_prof && opt_prof) {
prof_free(tsd, ptr, usize, &alloc_ctx);
}
- large_dalloc(tsd_tsdn(tsd), extent);
+ large_dalloc(tsd_tsdn(tsd), edata);
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
}
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
@@ -1157,10 +1155,10 @@ arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
* dss-based extents for later reuse.
*/
ehooks_t *ehooks = arena_get_ehooks(arena);
- extent_t *extent;
- while ((extent = extents_evict(tsdn, arena, ehooks,
+ edata_t *edata;
+ while ((edata = extents_evict(tsdn, arena, ehooks,
&arena->eset_retained, 0)) != NULL) {
- extent_destroy_wrapper(tsdn, arena, ehooks, extent);
+ extent_destroy_wrapper(tsdn, arena, ehooks, edata);
}
}
@@ -1200,10 +1198,10 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
base_delete(tsd_tsdn(tsd), arena->base);
}
-static extent_t *
+static edata_t *
arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
const bin_info_t *bin_info, szind_t szind) {
- extent_t *slab;
+ edata_t *slab;
bool zero, commit;
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
@@ -1222,7 +1220,7 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
return slab;
}
-static extent_t *
+static edata_t *
arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard,
const bin_info_t *bin_info) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
@@ -1232,7 +1230,7 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard
szind_t szind = sz_size2index(bin_info->reg_size);
bool zero = false;
bool commit = true;
- extent_t *slab = extents_alloc(tsdn, arena, ehooks, &arena->eset_dirty,
+ edata_t *slab = extents_alloc(tsdn, arena, ehooks, &arena->eset_dirty,
NULL, bin_info->slab_size, 0, PAGE, true, binind, &zero, &commit);
if (slab == NULL && arena_may_have_muzzy(arena)) {
slab = extents_alloc(tsdn, arena, ehooks, &arena->eset_muzzy,
@@ -1246,22 +1244,22 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard
return NULL;
}
}
- assert(extent_slab_get(slab));
+ assert(edata_slab_get(slab));
/* Initialize slab internals. */
- slab_data_t *slab_data = extent_slab_data_get(slab);
- extent_nfree_binshard_set(slab, bin_info->nregs, binshard);
+ slab_data_t *slab_data = edata_slab_data_get(slab);
+ edata_nfree_binshard_set(slab, bin_info->nregs, binshard);
bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
- arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
+ arena_nactive_add(arena, edata_size_get(slab) >> LG_PAGE);
return slab;
}
-static extent_t *
+static edata_t *
arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind, unsigned binshard) {
- extent_t *slab;
+ edata_t *slab;
const bin_info_t *bin_info;
/* Look for a usable slab. */
@@ -1307,14 +1305,14 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
if (bin->slabcur != NULL) {
/* Only attempted when current slab is full. */
- assert(extent_nfree_get(bin->slabcur) == 0);
+ assert(edata_nfree_get(bin->slabcur) == 0);
}
const bin_info_t *bin_info = &bin_infos[binind];
- extent_t *slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind,
+ edata_t *slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind,
binshard);
if (bin->slabcur != NULL) {
- if (extent_nfree_get(bin->slabcur) > 0) {
+ if (edata_nfree_get(bin->slabcur) > 0) {
/*
* Another thread updated slabcur while this one ran
* without the bin lock in arena_bin_nonfull_slab_get().
@@ -1331,7 +1329,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
* arena_bin_lower_slab() must be called, as if
* a region were just deallocated from the slab.
*/
- if (extent_nfree_get(slab) == bin_info->nregs) {
+ if (edata_nfree_get(slab) == bin_info->nregs) {
arena_dalloc_bin_slab(tsdn, arena, slab,
bin);
} else {
@@ -1350,7 +1348,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
return NULL;
}
bin->slabcur = slab;
- assert(extent_nfree_get(bin->slabcur) > 0);
+ assert(edata_nfree_get(bin->slabcur) > 0);
return arena_slab_reg_alloc(slab, bin_info);
}
@@ -1386,12 +1384,12 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
void **empty_position = cache_bin_empty_position_get(tbin, binind);
for (i = 0, nfill = (cache_bin_ncached_max_get(binind) >>
tcache->lg_fill_div[binind]); i < nfill; i += cnt) {
- extent_t *slab;
- if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) >
+ edata_t *slab;
+ if ((slab = bin->slabcur) != NULL && edata_nfree_get(slab) >
0) {
unsigned tofill = nfill - i;
- cnt = tofill < extent_nfree_get(slab) ?
- tofill : extent_nfree_get(slab);
+ cnt = tofill < edata_nfree_get(slab) ?
+ tofill : edata_nfree_get(slab);
arena_slab_reg_alloc_batch(
slab, &bin_infos[binind], cnt,
empty_position - nfill + i);
@@ -1454,14 +1452,14 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
void *ret;
bin_t *bin;
size_t usize;
- extent_t *slab;
+ edata_t *slab;
assert(binind < SC_NBINS);
usize = sz_index2size(binind);
unsigned binshard;
bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard);
- if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
+ if ((slab = bin->slabcur) != NULL && edata_nfree_get(slab) > 0) {
ret = arena_slab_reg_alloc(slab, &bin_infos[binind]);
} else {
ret = arena_bin_malloc_hard(tsdn, arena, bin, binind, binshard);
@@ -1554,11 +1552,11 @@ arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
+ edata_t *edata = rtree_edata_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true);
szind_t szind = sz_size2index(usize);
- extent_szind_set(extent, szind);
+ edata_szind_set(edata, szind);
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
szind, false);
@@ -1568,11 +1566,11 @@ arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
}
static size_t
-arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
+arena_prof_demote(tsdn_t *tsdn, edata_t *edata, const void *ptr) {
cassert(config_prof);
assert(ptr != NULL);
- extent_szind_set(extent, SC_NBINS);
+ edata_szind_set(edata, SC_NBINS);
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
@@ -1589,9 +1587,9 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
cassert(config_prof);
assert(opt_prof);
- extent_t *extent = iealloc(tsdn, ptr);
- size_t usize = extent_usize_get(extent);
- size_t bumped_usize = arena_prof_demote(tsdn, extent, ptr);
+ edata_t *edata = iealloc(tsdn, ptr);
+ size_t usize = edata_usize_get(edata);
+ size_t bumped_usize = arena_prof_demote(tsdn, edata, ptr);
if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) {
/*
* Currently, we only do redzoning for small sampled
@@ -1604,17 +1602,17 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
sz_size2index(bumped_usize), slow_path);
} else {
- large_dalloc(tsdn, extent);
+ large_dalloc(tsdn, edata);
}
}
static void
-arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
+arena_dissociate_bin_slab(arena_t *arena, edata_t *slab, bin_t *bin) {
/* Dissociate slab from bin. */
if (slab == bin->slabcur) {
bin->slabcur = NULL;
} else {
- szind_t binind = extent_szind_get(slab);
+ szind_t binind = edata_szind_get(slab);
const bin_info_t *bin_info = &bin_infos[binind];
/*
@@ -1631,7 +1629,7 @@ arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
}
static void
-arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
bin_t *bin) {
assert(slab != bin->slabcur);
@@ -1646,9 +1644,9 @@ arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
}
static void
-arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
bin_t *bin) {
- assert(extent_nfree_get(slab) > 0);
+ assert(edata_nfree_get(slab) > 0);
/*
* Make sure that if bin->slabcur is non-NULL, it refers to the
@@ -1656,9 +1654,9 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
* than proactively keeping it pointing at the oldest/lowest non-full
* slab.
*/
- if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) {
+ if (bin->slabcur != NULL && edata_snad_comp(bin->slabcur, slab) > 0) {
/* Switch slabcur. */
- if (extent_nfree_get(bin->slabcur) > 0) {
+ if (edata_nfree_get(bin->slabcur) > 0) {
arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
} else {
arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
@@ -1674,8 +1672,8 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
static void
arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
- szind_t binind, extent_t *slab, void *ptr, bool junked) {
- slab_data_t *slab_data = extent_slab_data_get(slab);
+ szind_t binind, edata_t *slab, void *ptr, bool junked) {
+ slab_data_t *slab_data = edata_slab_data_get(slab);
const bin_info_t *bin_info = &bin_infos[binind];
if (!junked && config_fill && unlikely(opt_junk_free)) {
@@ -1683,7 +1681,7 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
}
arena_slab_reg_dalloc(slab, slab_data, ptr);
- unsigned nfree = extent_nfree_get(slab);
+ unsigned nfree = edata_nfree_get(slab);
if (nfree == bin_info->nregs) {
arena_dissociate_bin_slab(arena, slab, bin);
arena_dalloc_bin_slab(tsdn, arena, slab, bin);
@@ -1700,29 +1698,29 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
void
arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
- szind_t binind, extent_t *extent, void *ptr) {
- arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
+ szind_t binind, edata_t *edata, void *ptr) {
+ arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, edata, ptr,
true);
}
static void
-arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
- szind_t binind = extent_szind_get(extent);
- unsigned binshard = extent_binshard_get(extent);
+arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, edata_t *edata, void *ptr) {
+ szind_t binind = edata_szind_get(edata);
+ unsigned binshard = edata_binshard_get(edata);
bin_t *bin = &arena->bins[binind].bin_shards[binshard];
malloc_mutex_lock(tsdn, &bin->lock);
- arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
+ arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, edata, ptr,
false);
malloc_mutex_unlock(tsdn, &bin->lock);
}
void
arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
- extent_t *extent = iealloc(tsdn, ptr);
- arena_t *arena = arena_get_from_extent(extent);
+ edata_t *edata = iealloc(tsdn, ptr);
+ arena_t *arena = arena_get_from_edata(edata);
- arena_dalloc_bin(tsdn, arena, extent, ptr);
+ arena_dalloc_bin(tsdn, arena, edata, ptr);
arena_decay_tick(tsdn, arena);
}
@@ -1733,7 +1731,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
/* Calls with non-zero extra had to clamp extra. */
assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS);
- extent_t *extent = iealloc(tsdn, ptr);
+ edata_t *edata = iealloc(tsdn, ptr);
if (unlikely(size > SC_LARGE_MAXCLASS)) {
ret = true;
goto done;
@@ -1756,19 +1754,19 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
goto done;
}
- arena_t *arena = arena_get_from_extent(extent);
+ arena_t *arena = arena_get_from_edata(edata);
arena_decay_tick(tsdn, arena);
ret = false;
} else if (oldsize >= SC_LARGE_MINCLASS
&& usize_max >= SC_LARGE_MINCLASS) {
- ret = large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
+ ret = large_ralloc_no_move(tsdn, edata, usize_min, usize_max,
zero);
} else {
ret = true;
}
done:
- assert(extent == iealloc(tsdn, ptr));
- *newsize = extent_usize_get(extent);
+ assert(edata == iealloc(tsdn, ptr));
+ *newsize = edata_usize_get(edata);
return ret;
}
@@ -2006,7 +2004,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
- extent_list_init(&arena->large);
+ edata_list_init(&arena->large);
if (malloc_mutex_init(&arena->large_mtx, "arena_large",
WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
goto label_error;
@@ -2055,9 +2053,9 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
goto label_error;
}
- extent_avail_new(&arena->extent_avail);
- if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail",
- WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) {
+ edata_avail_new(&arena->edata_avail);
+ if (malloc_mutex_init(&arena->edata_avail_mtx, "edata_avail",
+ WITNESS_RANK_EDATA_AVAIL, malloc_mutex_rank_exclusive)) {
goto label_error;
}
@@ -2203,7 +2201,7 @@ arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
void
arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
- malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx);
+ malloc_mutex_prefork(tsdn, &arena->edata_avail_mtx);
}
void
@@ -2237,7 +2235,7 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
}
malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
base_postfork_parent(tsdn, arena->base);
- malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx);
+ malloc_mutex_postfork_parent(tsdn, &arena->edata_avail_mtx);
eset_postfork_parent(tsdn, &arena->eset_dirty);
eset_postfork_parent(tsdn, &arena->eset_muzzy);
eset_postfork_parent(tsdn, &arena->eset_retained);
@@ -2283,7 +2281,7 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
}
malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
base_postfork_child(tsdn, arena->base);
- malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx);
+ malloc_mutex_postfork_child(tsdn, &arena->edata_avail_mtx);
eset_postfork_child(tsdn, &arena->eset_dirty);
eset_postfork_child(tsdn, &arena->eset_muzzy);
eset_postfork_child(tsdn, &arena->eset_retained);
diff --git a/src/base.c b/src/base.c
index 79736cd..76d7655 100644
--- a/src/base.c
+++ b/src/base.c
@@ -105,14 +105,14 @@ label_done:
}
static void
-base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
+base_edata_init(size_t *extent_sn_next, edata_t *edata, void *addr,
size_t size) {
size_t sn;
sn = *extent_sn_next;
(*extent_sn_next)++;
- extent_binit(extent, addr, size, sn);
+ edata_binit(edata, addr, size, sn);
}
static size_t
@@ -158,7 +158,7 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
pages_huge(block, block->size);
if (config_stats) {
base->n_thp += HUGEPAGE_CEILING(block->size -
- extent_bsize_get(&block->extent)) >> LG_HUGEPAGE;
+ edata_bsize_get(&block->edata)) >> LG_HUGEPAGE;
}
block = block->next;
assert(block == NULL || (base_ind_get(base) == 0));
@@ -166,34 +166,34 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
}
static void *
-base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
+base_extent_bump_alloc_helper(edata_t *edata, size_t *gap_size, size_t size,
size_t alignment) {
void *ret;
assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
assert(size == ALIGNMENT_CEILING(size, alignment));
- *gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
- alignment) - (uintptr_t)extent_addr_get(extent);
- ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
- assert(extent_bsize_get(extent) >= *gap_size + size);
- extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
- *gap_size + size), extent_bsize_get(extent) - *gap_size - size,
- extent_sn_get(extent));
+ *gap_size = ALIGNMENT_CEILING((uintptr_t)edata_addr_get(edata),
+ alignment) - (uintptr_t)edata_addr_get(edata);
+ ret = (void *)((uintptr_t)edata_addr_get(edata) + *gap_size);
+ assert(edata_bsize_get(edata) >= *gap_size + size);
+ edata_binit(edata, (void *)((uintptr_t)edata_addr_get(edata) +
+ *gap_size + size), edata_bsize_get(edata) - *gap_size - size,
+ edata_sn_get(edata));
return ret;
}
static void
-base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
+base_extent_bump_alloc_post(base_t *base, edata_t *edata, size_t gap_size,
void *addr, size_t size) {
- if (extent_bsize_get(extent) > 0) {
+ if (edata_bsize_get(edata) > 0) {
/*
* Compute the index for the largest size class that does not
* exceed extent's size.
*/
szind_t index_floor =
- sz_size2index(extent_bsize_get(extent) + 1) - 1;
- extent_heap_insert(&base->avail[index_floor], extent);
+ sz_size2index(edata_bsize_get(edata) + 1) - 1;
+ edata_heap_insert(&base->avail[index_floor], edata);
}
if (config_stats) {
@@ -218,13 +218,13 @@ base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
}
static void *
-base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
+base_extent_bump_alloc(base_t *base, edata_t *edata, size_t size,
size_t alignment) {
void *ret;
size_t gap_size;
- ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
- base_extent_bump_alloc_post(base, extent, gap_size, ret, size);
+ ret = base_extent_bump_alloc_helper(edata, &gap_size, size, alignment);
+ base_extent_bump_alloc_post(base, edata, gap_size, ret, size);
return ret;
}
@@ -284,7 +284,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind,
block->size = block_size;
block->next = NULL;
assert(block_size >= header_size);
- base_extent_init(extent_sn_next, &block->extent,
+ base_edata_init(extent_sn_next, &block->edata,
(void *)((uintptr_t)block + header_size), block_size - header_size);
return block;
}
@@ -293,7 +293,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind,
* Allocate an extent that is at least as large as specified size, with
* specified alignment.
*/
-static extent_t *
+static edata_t *
base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
malloc_mutex_assert_owner(tsdn, &base->mtx);
@@ -327,7 +327,7 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
assert(base->resident <= base->mapped);
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
}
- return &block->extent;
+ return &block->edata;
}
base_t *
@@ -357,7 +357,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
size_t gap_size;
size_t base_alignment = CACHELINE;
size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
- base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
+ base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->edata,
&gap_size, base_size, base_alignment);
base->ind = ind;
ehooks_init(&base->ehooks, extent_hooks);
@@ -371,7 +371,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
base->blocks = block;
base->auto_thp_switched = false;
for (szind_t i = 0; i < SC_NSIZES; i++) {
- extent_heap_new(&base->avail[i]);
+ edata_heap_new(&base->avail[i]);
}
if (config_stats) {
base->allocated = sizeof(base_block_t);
@@ -384,7 +384,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
assert(base->resident <= base->mapped);
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
}
- base_extent_bump_alloc_post(base, &block->extent, gap_size, base,
+ base_extent_bump_alloc_post(base, &block->edata, gap_size, base,
base_size);
return base;
@@ -422,28 +422,28 @@ base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
size_t usize = ALIGNMENT_CEILING(size, alignment);
size_t asize = usize + alignment - QUANTUM;
- extent_t *extent = NULL;
+ edata_t *edata = NULL;
malloc_mutex_lock(tsdn, &base->mtx);
for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) {
- extent = extent_heap_remove_first(&base->avail[i]);
- if (extent != NULL) {
+ edata = edata_heap_remove_first(&base->avail[i]);
+ if (edata != NULL) {
/* Use existing space. */
break;
}
}
- if (extent == NULL) {
+ if (edata == NULL) {
/* Try to allocate more space. */
- extent = base_extent_alloc(tsdn, base, usize, alignment);
+ edata = base_extent_alloc(tsdn, base, usize, alignment);
}
void *ret;
- if (extent == NULL) {
+ if (edata == NULL) {
ret = NULL;
goto label_return;
}
- ret = base_extent_bump_alloc(base, extent, usize, alignment);
+ ret = base_extent_bump_alloc(base, edata, usize, alignment);
if (esn != NULL) {
- *esn = extent_sn_get(extent);
+ *esn = edata_sn_get(edata);
}
label_return:
malloc_mutex_unlock(tsdn, &base->mtx);
@@ -463,16 +463,16 @@ base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
return base_alloc_impl(tsdn, base, size, alignment, NULL);
}
-extent_t *
-base_alloc_extent(tsdn_t *tsdn, base_t *base) {
+edata_t *
+base_alloc_edata(tsdn_t *tsdn, base_t *base) {
size_t esn;
- extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
+ edata_t *edata = base_alloc_impl(tsdn, base, sizeof(edata_t),
CACHELINE, &esn);
- if (extent == NULL) {
+ if (edata == NULL) {
return NULL;
}
- extent_esn_set(extent, esn);
- return extent;
+ edata_esn_set(edata, esn);
+ return edata;
}
void
diff --git a/src/bin.c b/src/bin.c
index d7cbfb5..52de9ff 100644
--- a/src/bin.c
+++ b/src/bin.c
@@ -45,8 +45,8 @@ bin_init(bin_t *bin) {
return true;
}
bin->slabcur = NULL;
- extent_heap_new(&bin->slabs_nonfull);
- extent_list_init(&bin->slabs_full);
+ edata_heap_new(&bin->slabs_nonfull);
+ edata_list_init(&bin->slabs_full);
if (config_stats) {
memset(&bin->stats, 0, sizeof(bin_stats_t));
}
diff --git a/src/ctl.c b/src/ctl.c
index 4aa4af8..1e72bf4 100644
--- a/src/ctl.c
+++ b/src/ctl.c
@@ -855,8 +855,8 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
&astats->astats.mapped);
accum_atomic_zu(&sdstats->astats.retained,
&astats->astats.retained);
- accum_atomic_zu(&sdstats->astats.extent_avail,
- &astats->astats.extent_avail);
+ accum_atomic_zu(&sdstats->astats.edata_avail,
+ &astats->astats.edata_avail);
}
ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
@@ -2603,18 +2603,18 @@ arenas_lookup_ctl(tsd_t *tsd, const size_t *mib,
int ret;
unsigned arena_ind;
void *ptr;
- extent_t *extent;
+ edata_t *edata;
arena_t *arena;
ptr = NULL;
ret = EINVAL;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(ptr, void *);
- extent = iealloc(tsd_tsdn(tsd), ptr);
- if (extent == NULL)
+ edata = iealloc(tsd_tsdn(tsd), ptr);
+ if (edata == NULL)
goto label_return;
- arena = arena_get_from_extent(extent);
+ arena = arena_get_from_edata(edata);
if (arena == NULL)
goto label_return;
@@ -2860,7 +2860,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED),
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.extent_avail,
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.edata_avail,
ATOMIC_RELAXED),
size_t)
@@ -3010,7 +3010,7 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
continue;
}
MUTEX_PROF_RESET(arena->large_mtx);
- MUTEX_PROF_RESET(arena->extent_avail_mtx);
+ MUTEX_PROF_RESET(arena->edata_avail_mtx);
MUTEX_PROF_RESET(arena->eset_dirty.mtx);
MUTEX_PROF_RESET(arena->eset_muzzy.mtx);
MUTEX_PROF_RESET(arena->eset_retained.mtx);
diff --git a/src/edata.c b/src/edata.c
index 1a5a1fa..5e53e99 100644
--- a/src/edata.c
+++ b/src/edata.c
@@ -1,6 +1,6 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
-ph_gen(, extent_avail_, extent_tree_t, extent_t, ph_link,
- extent_esnead_comp)
-ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
+ph_gen(, edata_avail_, edata_tree_t, edata_t, ph_link,
+ edata_esnead_comp)
+ph_gen(, edata_heap_, edata_heap_t, edata_t, ph_link, edata_snad_comp)
diff --git a/src/ehooks.c b/src/ehooks.c
index 25aef1c..a62586b 100644
--- a/src/ehooks.c
+++ b/src/ehooks.c
@@ -200,8 +200,8 @@ ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
if (!maps_coalesce) {
tsdn_t *tsdn = tsdn_fetch();
- extent_t *a = iealloc(tsdn, addr_a);
- extent_t *b = iealloc(tsdn, addr_b);
+ edata_t *a = iealloc(tsdn, addr_a);
+ edata_t *b = iealloc(tsdn, addr_b);
if (extent_head_no_merge(a, b)) {
return true;
}
diff --git a/src/eset.c b/src/eset.c
index 9cc8cee..88b9c8c 100644
--- a/src/eset.c
+++ b/src/eset.c
@@ -16,10 +16,10 @@ eset_init(tsdn_t *tsdn, eset_t *eset, extent_state_t state,
return true;
}
for (unsigned i = 0; i < SC_NPSIZES + 1; i++) {
- extent_heap_new(&eset->heaps[i]);
+ edata_heap_new(&eset->heaps[i]);
}
bitmap_init(eset->bitmap, &eset_bitmap_info, true);
- extent_list_init(&eset->lru);
+ edata_list_init(&eset->lru);
atomic_store_zu(&eset->npages, 0, ATOMIC_RELAXED);
eset->state = state;
eset->delay_coalesce = delay_coalesce;
@@ -63,24 +63,24 @@ eset_stats_sub(eset_t *eset, pszind_t pind, size_t sz) {
}
void
-eset_insert_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) {
+eset_insert_locked(tsdn_t *tsdn, eset_t *eset, edata_t *edata) {
malloc_mutex_assert_owner(tsdn, &eset->mtx);
- assert(extent_state_get(extent) == eset->state);
+ assert(edata_state_get(edata) == eset->state);
- size_t size = extent_size_get(extent);
+ size_t size = edata_size_get(edata);
size_t psz = sz_psz_quantize_floor(size);
pszind_t pind = sz_psz2ind(psz);
- if (extent_heap_empty(&eset->heaps[pind])) {
+ if (edata_heap_empty(&eset->heaps[pind])) {
bitmap_unset(eset->bitmap, &eset_bitmap_info,
(size_t)pind);
}
- extent_heap_insert(&eset->heaps[pind], extent);
+ edata_heap_insert(&eset->heaps[pind], edata);
if (config_stats) {
eset_stats_add(eset, pind, size);
}
- extent_list_append(&eset->lru, extent);
+ edata_list_append(&eset->lru, edata);
size_t npages = size >> LG_PAGE;
/*
* All modifications to npages hold the mutex (as asserted above), so we
@@ -94,24 +94,24 @@ eset_insert_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) {
}
void
-eset_remove_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) {
+eset_remove_locked(tsdn_t *tsdn, eset_t *eset, edata_t *edata) {
malloc_mutex_assert_owner(tsdn, &eset->mtx);
- assert(extent_state_get(extent) == eset->state);
+ assert(edata_state_get(edata) == eset->state);
- size_t size = extent_size_get(extent);
+ size_t size = edata_size_get(edata);
size_t psz = sz_psz_quantize_floor(size);
pszind_t pind = sz_psz2ind(psz);
- extent_heap_remove(&eset->heaps[pind], extent);
+ edata_heap_remove(&eset->heaps[pind], edata);
if (config_stats) {
eset_stats_sub(eset, pind, size);
}
- if (extent_heap_empty(&eset->heaps[pind])) {
+ if (edata_heap_empty(&eset->heaps[pind])) {
bitmap_set(eset->bitmap, &eset_bitmap_info,
(size_t)pind);
}
- extent_list_remove(&eset->lru, extent);
+ edata_list_remove(&eset->lru, edata);
size_t npages = size >> LG_PAGE;
/*
* As in eset_insert_locked, we hold eset->mtx and so don't need atomic
@@ -128,7 +128,7 @@ eset_remove_locked(tsdn_t *tsdn, eset_t *eset, extent_t *extent) {
* Find an extent with size [min_size, max_size) to satisfy the alignment
* requirement. For each size, try only the first extent in the heap.
*/
-static extent_t *
+static edata_t *
eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
size_t alignment) {
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(min_size));
@@ -139,10 +139,10 @@ eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
(pszind_t)bitmap_ffu(eset->bitmap, &eset_bitmap_info,
(size_t)i+1)) {
assert(i < SC_NPSIZES);
- assert(!extent_heap_empty(&eset->heaps[i]));
- extent_t *extent = extent_heap_first(&eset->heaps[i]);
- uintptr_t base = (uintptr_t)extent_base_get(extent);
- size_t candidate_size = extent_size_get(extent);
+ assert(!edata_heap_empty(&eset->heaps[i]));
+ edata_t *edata = edata_heap_first(&eset->heaps[i]);
+ uintptr_t base = (uintptr_t)edata_base_get(edata);
+ size_t candidate_size = edata_size_get(edata);
assert(candidate_size >= min_size);
uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
@@ -154,7 +154,7 @@ eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
size_t leadsize = next_align - base;
if (candidate_size - leadsize >= min_size) {
- return extent;
+ return edata;
}
}
@@ -165,9 +165,9 @@ eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
* Do first-fit extent selection, i.e. select the oldest/lowest extent that is
* large enough.
*/
-static extent_t *
+static edata_t *
eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) {
- extent_t *ret = NULL;
+ edata_t *ret = NULL;
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(size));
@@ -176,8 +176,8 @@ eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) {
* No split / merge allowed (Windows w/o retain). Try exact fit
* only.
*/
- return extent_heap_empty(&eset->heaps[pind]) ? NULL :
- extent_heap_first(&eset->heaps[pind]);
+ return edata_heap_empty(&eset->heaps[pind]) ? NULL :
+ edata_heap_first(&eset->heaps[pind]);
}
for (pszind_t i = (pszind_t)bitmap_ffu(eset->bitmap,
@@ -185,9 +185,9 @@ eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) {
i < SC_NPSIZES + 1;
i = (pszind_t)bitmap_ffu(eset->bitmap, &eset_bitmap_info,
(size_t)i+1)) {
- assert(!extent_heap_empty(&eset->heaps[i]));
- extent_t *extent = extent_heap_first(&eset->heaps[i]);
- assert(extent_size_get(extent) >= size);
+ assert(!edata_heap_empty(&eset->heaps[i]));
+ edata_t *edata = edata_heap_first(&eset->heaps[i]);
+ assert(edata_size_get(edata) >= size);
/*
* In order to reduce fragmentation, avoid reusing and splitting
* large eset for much smaller sizes.
@@ -198,8 +198,8 @@ eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) {
(sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
break;
}
- if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
- ret = extent;
+ if (ret == NULL || edata_snad_comp(edata, ret) < 0) {
+ ret = edata;
}
if (i == SC_NPSIZES) {
break;
@@ -210,7 +210,7 @@ eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) {
return ret;
}
-extent_t *
+edata_t *
eset_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t esize, size_t alignment) {
malloc_mutex_assert_owner(tsdn, &eset->mtx);
@@ -220,18 +220,18 @@ eset_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t esize, size_t alignment) {
return NULL;
}
- extent_t *extent = eset_first_fit_locked(tsdn, eset, max_size);
+ edata_t *edata = eset_first_fit_locked(tsdn, eset, max_size);
- if (alignment > PAGE && extent == NULL) {
+ if (alignment > PAGE && edata == NULL) {
/*
* max_size guarantees the alignment requirement but is rather
* pessimistic. Next we try to satisfy the aligned allocation
* with sizes in [esize, max_size).
*/
- extent = eset_fit_alignment(eset, esize, max_size, alignment);
+ edata = eset_fit_alignment(eset, esize, max_size, alignment);
}
- return extent;
+ return edata;
}
void
diff --git a/src/extent2.c b/src/extent2.c
index 4001d17..5bacb8f 100644
--- a/src/extent2.c
+++ b/src/extent2.c
@@ -13,25 +13,25 @@
/* Data. */
rtree_t extents_rtree;
-/* Keyed by the address of the extent_t being protected. */
+/* Keyed by the address of the edata_t being protected. */
mutex_pool_t extent_mutex_pool;
size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *extent, size_t offset, size_t length, bool growing_retained);
+ edata_t *edata, size_t offset, size_t length, bool growing_retained);
static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
- ehooks_t *ehooks, extent_t *extent, size_t offset, size_t length,
+ ehooks_t *ehooks, edata_t *edata, size_t offset, size_t length,
bool growing_retained);
static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
- ehooks_t *ehooks, extent_t *extent, size_t offset, size_t length,
+ ehooks_t *ehooks, edata_t *edata, size_t offset, size_t length,
bool growing_retained);
-static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
- ehooks_t *ehooks, extent_t *extent, size_t size_a, szind_t szind_a,
+static edata_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
+ ehooks_t *ehooks, edata_t *edata, size_t size_a, szind_t szind_a,
bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
bool growing_retained);
static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *a, extent_t *b, bool growing_retained);
+ edata_t *a, edata_t *b, bool growing_retained);
/* Used exclusively for gdump triggering. */
static atomic_zu_t curpages;
@@ -43,15 +43,15 @@ static atomic_zu_t highpages;
* definition.
*/
-static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
-static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
+static void extent_deregister(tsdn_t *tsdn, edata_t *edata);
+static edata_t *extent_recycle(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
eset_t *eset, void *new_addr, size_t usize, size_t pad, size_t alignment,
bool slab, szind_t szind, bool *zero, bool *commit, bool growing_retained);
-static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
- ehooks_t *ehooks, rtree_ctx_t *rtree_ctx, eset_t *eset, extent_t *extent,
+static edata_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
+ ehooks_t *ehooks, rtree_ctx_t *rtree_ctx, eset_t *eset, edata_t *edata,
bool *coalesced, bool growing_retained);
static void extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- eset_t *eset, extent_t *extent, bool growing_retained);
+ eset_t *eset, edata_t *edata, bool growing_retained);
/******************************************************************************/
@@ -62,68 +62,68 @@ typedef enum {
} lock_result_t;
static inline void
-extent_lock(tsdn_t *tsdn, extent_t *extent) {
- assert(extent != NULL);
- mutex_pool_lock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
+extent_lock_edata(tsdn_t *tsdn, edata_t *edata) {
+ assert(edata != NULL);
+ mutex_pool_lock(tsdn, &extent_mutex_pool, (uintptr_t)edata);
}
static inline void
-extent_unlock(tsdn_t *tsdn, extent_t *extent) {
- assert(extent != NULL);
- mutex_pool_unlock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
+extent_unlock_edata(tsdn_t *tsdn, edata_t *edata) {
+ assert(edata != NULL);
+ mutex_pool_unlock(tsdn, &extent_mutex_pool, (uintptr_t)edata);
}
static inline void
-extent_lock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
- assert(extent1 != NULL && extent2 != NULL);
- mutex_pool_lock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
- (uintptr_t)extent2);
+extent_lock_edata2(tsdn_t *tsdn, edata_t *edata1, edata_t *edata2) {
+ assert(edata1 != NULL && edata2 != NULL);
+ mutex_pool_lock2(tsdn, &extent_mutex_pool, (uintptr_t)edata1,
+ (uintptr_t)edata2);
}
static inline void
-extent_unlock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
- assert(extent1 != NULL && extent2 != NULL);
- mutex_pool_unlock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
- (uintptr_t)extent2);
+extent_unlock_edata2(tsdn_t *tsdn, edata_t *edata1, edata_t *edata2) {
+ assert(edata1 != NULL && edata2 != NULL);
+ mutex_pool_unlock2(tsdn, &extent_mutex_pool, (uintptr_t)edata1,
+ (uintptr_t)edata2);
}
static lock_result_t
extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
- extent_t **result, bool inactive_only) {
- extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
+ edata_t **result, bool inactive_only) {
+ edata_t *edata1 = rtree_leaf_elm_edata_read(tsdn, &extents_rtree,
elm, true);
/* Slab implies active extents and should be skipped. */
- if (extent1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn,
+ if (edata1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn,
&extents_rtree, elm, true))) {
return lock_result_no_extent;
}
/*
* It's possible that the extent changed out from under us, and with it
- * the leaf->extent mapping. We have to recheck while holding the lock.
+ * the leaf->edata mapping. We have to recheck while holding the lock.
*/
- extent_lock(tsdn, extent1);
- extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
- &extents_rtree, elm, true);
+ extent_lock_edata(tsdn, edata1);
+ edata_t *edata2 = rtree_leaf_elm_edata_read(tsdn, &extents_rtree, elm,
+ true);
- if (extent1 == extent2) {
- *result = extent1;
+ if (edata1 == edata2) {
+ *result = edata1;
return lock_result_success;
} else {
- extent_unlock(tsdn, extent1);
+ extent_unlock_edata(tsdn, edata1);
return lock_result_failure;
}
}
/*
- * Returns a pool-locked extent_t * if there's one associated with the given
+ * Returns a pool-locked edata_t * if there's one associated with the given
* address, and NULL otherwise.
*/
-static extent_t *
-extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr,
+static edata_t *
+extent_lock_edata_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr,
bool inactive_only) {
- extent_t *ret = NULL;
+ edata_t *ret = NULL;
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
rtree_ctx, (uintptr_t)addr, false, false);
if (elm == NULL) {
@@ -138,9 +138,9 @@ extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr,
}
static void
-extent_addr_randomize(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+extent_addr_randomize(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
size_t alignment) {
- assert(extent_base_get(extent) == extent_addr_get(extent));
+ assert(edata_base_get(edata) == edata_addr_get(edata));
if (alignment < PAGE) {
unsigned lg_range = LG_PAGE -
@@ -156,52 +156,52 @@ extent_addr_randomize(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
}
uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
lg_range);
- extent->e_addr = (void *)((uintptr_t)extent->e_addr +
+ edata->e_addr = (void *)((uintptr_t)edata->e_addr +
random_offset);
- assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) ==
- extent->e_addr);
+ assert(ALIGNMENT_ADDR2BASE(edata->e_addr, alignment) ==
+ edata->e_addr);
}
}
-extent_t *
+edata_t *
extent_alloc(tsdn_t *tsdn, arena_t *arena) {
- malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
- extent_t *extent = extent_avail_first(&arena->extent_avail);
- if (extent == NULL) {
- malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
- return base_alloc_extent(tsdn, arena->base);
- }
- extent_avail_remove(&arena->extent_avail, extent);
- atomic_fetch_sub_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
- malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
- return extent;
+ malloc_mutex_lock(tsdn, &arena->edata_avail_mtx);
+ edata_t *edata = edata_avail_first(&arena->edata_avail);
+ if (edata == NULL) {
+ malloc_mutex_unlock(tsdn, &arena->edata_avail_mtx);
+ return base_alloc_edata(tsdn, arena->base);
+ }
+ edata_avail_remove(&arena->edata_avail, edata);
+ atomic_fetch_sub_zu(&arena->edata_avail_cnt, 1, ATOMIC_RELAXED);
+ malloc_mutex_unlock(tsdn, &arena->edata_avail_mtx);
+ return edata;
}
void
-extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
- malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
- extent_avail_insert(&arena->extent_avail, extent);
- atomic_fetch_add_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
- malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
+extent_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
+ malloc_mutex_lock(tsdn, &arena->edata_avail_mtx);
+ edata_avail_insert(&arena->edata_avail, edata);
+ atomic_fetch_add_zu(&arena->edata_avail_cnt, 1, ATOMIC_RELAXED);
+ malloc_mutex_unlock(tsdn, &arena->edata_avail_mtx);
}
static bool
extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- rtree_ctx_t *rtree_ctx, eset_t *eset, extent_t *extent) {
- extent_state_set(extent, extent_state_active);
+ rtree_ctx_t *rtree_ctx, eset_t *eset, edata_t *edata) {
+ edata_state_set(edata, extent_state_active);
bool coalesced;
- extent = extent_try_coalesce(tsdn, arena, ehooks, rtree_ctx, eset,
- extent, &coalesced, false);
- extent_state_set(extent, eset_state_get(eset));
+ edata = extent_try_coalesce(tsdn, arena, ehooks, rtree_ctx, eset,
+ edata, &coalesced, false);
+ edata_state_set(edata, eset_state_get(eset));
if (!coalesced) {
return true;
}
- eset_insert_locked(tsdn, eset, extent);
+ eset_insert_locked(tsdn, eset, edata);
return false;
}
-extent_t *
+edata_t *
extents_alloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
szind_t szind, bool *zero, bool *commit) {
@@ -210,28 +210,28 @@ extents_alloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
- extent_t *extent = extent_recycle(tsdn, arena, ehooks, eset, new_addr,
+ edata_t *edata = extent_recycle(tsdn, arena, ehooks, eset, new_addr,
size, pad, alignment, slab, szind, zero, commit, false);
- assert(extent == NULL || extent_dumpable_get(extent));
- return extent;
+ assert(edata == NULL || edata_dumpable_get(edata));
+ return edata;
}
void
extents_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
- extent_t *extent) {
- assert(extent_base_get(extent) != NULL);
- assert(extent_size_get(extent) != 0);
- assert(extent_dumpable_get(extent));
+ edata_t *edata) {
+ assert(edata_base_get(edata) != NULL);
+ assert(edata_size_get(edata) != 0);
+ assert(edata_dumpable_get(edata));
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
- extent_addr_set(extent, extent_base_get(extent));
- extent_zeroed_set(extent, false);
+ edata_addr_set(edata, edata_base_get(edata));
+ edata_zeroed_set(edata, false);
- extent_record(tsdn, arena, ehooks, eset, extent, false);
+ extent_record(tsdn, arena, ehooks, eset, edata, false);
}
-extent_t *
+edata_t *
extents_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
size_t npages_min) {
rtree_ctx_t rtree_ctx_fallback;
@@ -243,27 +243,27 @@ extents_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
* Get the LRU coalesced extent, if any. If coalescing was delayed,
* the loop will iterate until the LRU extent is fully coalesced.
*/
- extent_t *extent;
+ edata_t *edata;
while (true) {
/* Get the LRU extent, if any. */
- extent = extent_list_first(&eset->lru);
- if (extent == NULL) {
+ edata = edata_list_first(&eset->lru);
+ if (edata == NULL) {
goto label_return;
}
/* Check the eviction limit. */
size_t extents_npages = atomic_load_zu(&eset->npages,
ATOMIC_RELAXED);
if (extents_npages <= npages_min) {
- extent = NULL;
+ edata = NULL;
goto label_return;
}
- eset_remove_locked(tsdn, eset, extent);
+ eset_remove_locked(tsdn, eset, edata);
if (!eset->delay_coalesce) {
break;
}
/* Try to coalesce. */
if (extent_try_delayed_coalesce(tsdn, arena, ehooks, rtree_ctx,
- eset, extent)) {
+ eset, edata)) {
break;
}
/*
@@ -281,10 +281,10 @@ extents_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
not_reached();
case extent_state_dirty:
case extent_state_muzzy:
- extent_state_set(extent, extent_state_active);
+ edata_state_set(edata, extent_state_active);
break;
case extent_state_retained:
- extent_deregister(tsdn, extent);
+ extent_deregister(tsdn, edata);
break;
default:
not_reached();
@@ -292,7 +292,7 @@ extents_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
label_return:
malloc_mutex_unlock(tsdn, &eset->mtx);
- return extent;
+ return edata;
}
/*
@@ -301,8 +301,8 @@ label_return:
*/
static void
extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
- extent_t *extent, bool growing_retained) {
- size_t sz = extent_size_get(extent);
+ edata_t *edata, bool growing_retained) {
+ size_t sz = edata_size_get(edata);
if (config_stats) {
arena_stats_accum_zu(&arena->stats.abandoned_vm, sz);
}
@@ -311,56 +311,56 @@ extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
* that this is only a virtual memory leak.
*/
if (eset_state_get(eset) == extent_state_dirty) {
- if (extent_purge_lazy_impl(tsdn, arena, ehooks, extent, 0, sz,
+ if (extent_purge_lazy_impl(tsdn, arena, ehooks, edata, 0, sz,
growing_retained)) {
- extent_purge_forced_impl(tsdn, arena, ehooks, extent, 0,
- extent_size_get(extent), growing_retained);
+ extent_purge_forced_impl(tsdn, arena, ehooks, edata, 0,
+ edata_size_get(edata), growing_retained);
}
}
- extent_dalloc(tsdn, arena, extent);
+ extent_dalloc(tsdn, arena, edata);
}
static void
extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, eset_t *eset,
- extent_t *extent) {
- assert(extent_arena_ind_get(extent) == arena_ind_get(arena));
- assert(extent_state_get(extent) == extent_state_active);
+ edata_t *edata) {
+ assert(edata_arena_ind_get(edata) == arena_ind_get(arena));
+ assert(edata_state_get(edata) == extent_state_active);
- extent_state_set(extent, eset_state_get(eset));
- eset_insert_locked(tsdn, eset, extent);
+ edata_state_set(edata, eset_state_get(eset));
+ eset_insert_locked(tsdn, eset, edata);
}
static void
extent_deactivate(tsdn_t *tsdn, arena_t *arena, eset_t *eset,
- extent_t *extent) {
+ edata_t *edata) {
malloc_mutex_lock(tsdn, &eset->mtx);
- extent_deactivate_locked(tsdn, arena, eset, extent);
+ extent_deactivate_locked(tsdn, arena, eset, edata);
malloc_mutex_unlock(tsdn, &eset->mtx);
}
static void
extent_activate_locked(tsdn_t *tsdn, arena_t *arena, eset_t *eset,
- extent_t *extent) {
- assert(extent_arena_ind_get(extent) == arena_ind_get(arena));
- assert(extent_state_get(extent) == eset_state_get(eset));
+ edata_t *edata) {
+ assert(edata_arena_ind_get(edata) == arena_ind_get(arena));
+ assert(edata_state_get(edata) == eset_state_get(eset));
- eset_remove_locked(tsdn, eset, extent);
- extent_state_set(extent, extent_state_active);
+ eset_remove_locked(tsdn, eset, edata);
+ edata_state_set(edata, extent_state_active);
}
static bool
extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
- const extent_t *extent, bool dependent, bool init_missing,
+ const edata_t *edata, bool dependent, bool init_missing,
rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
*r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_base_get(extent), dependent, init_missing);
+ (uintptr_t)edata_base_get(edata), dependent, init_missing);
if (!dependent && *r_elm_a == NULL) {
return true;
}
assert(*r_elm_a != NULL);
*r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_last_get(extent), dependent, init_missing);
+ (uintptr_t)edata_last_get(edata), dependent, init_missing);
if (!dependent && *r_elm_b == NULL) {
return true;
}
@@ -371,36 +371,36 @@ extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
static void
extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
- rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
- rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
+ rtree_leaf_elm_t *elm_b, edata_t *edata, szind_t szind, bool slab) {
+ rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, edata, szind, slab);
if (elm_b != NULL) {
- rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
+ rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, edata, szind,
slab);
}
}
static void
-extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
+extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, edata_t *edata,
szind_t szind) {
- assert(extent_slab_get(extent));
+ assert(edata_slab_get(edata));
/* Register interior. */
- for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
+ for (size_t i = 1; i < (edata_size_get(edata) >> LG_PAGE) - 1; i++) {
rtree_write(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
- LG_PAGE), extent, szind, true);
+ (uintptr_t)edata_base_get(edata) + (uintptr_t)(i <<
+ LG_PAGE), edata, szind, true);
}
}
static void
-extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
+extent_gdump_add(tsdn_t *tsdn, const edata_t *edata) {
cassert(config_prof);
/* prof_gdump() requirement. */
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
- if (opt_prof && extent_state_get(extent) == extent_state_active) {
- size_t nadd = extent_size_get(extent) >> LG_PAGE;
+ if (opt_prof && edata_state_get(edata) == extent_state_active) {
+ size_t nadd = edata_size_get(edata) >> LG_PAGE;
size_t cur = atomic_fetch_add_zu(&curpages, nadd,
ATOMIC_RELAXED) + nadd;
size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
@@ -419,18 +419,18 @@ extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
}
static void
-extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
+extent_gdump_sub(tsdn_t *tsdn, const edata_t *edata) {
cassert(config_prof);
- if (opt_prof && extent_state_get(extent) == extent_state_active) {
- size_t nsub = extent_size_get(extent) >> LG_PAGE;
+ if (opt_prof && edata_state_get(edata) == extent_state_active) {
+ size_t nsub = edata_size_get(edata) >> LG_PAGE;
assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
}
}
static bool
-extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
+extent_register_impl(tsdn_t *tsdn, edata_t *edata, bool gdump_add) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_leaf_elm_t *elm_a, *elm_b;
@@ -439,43 +439,43 @@ extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
* We need to hold the lock to protect against a concurrent coalesce
* operation that sees us in a partial state.
*/
- extent_lock(tsdn, extent);
+ extent_lock_edata(tsdn, edata);
- if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
+ if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, edata, false, true,
&elm_a, &elm_b)) {
- extent_unlock(tsdn, extent);
+ extent_unlock_edata(tsdn, edata);
return true;
}
- szind_t szind = extent_szind_get_maybe_invalid(extent);
- bool slab = extent_slab_get(extent);
- extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
+ szind_t szind = edata_szind_get_maybe_invalid(edata);
+ bool slab = edata_slab_get(edata);
+ extent_rtree_write_acquired(tsdn, elm_a, elm_b, edata, szind, slab);
if (slab) {
- extent_interior_register(tsdn, rtree_ctx, extent, szind);
+ extent_interior_register(tsdn, rtree_ctx, edata, szind);
}
- extent_unlock(tsdn, extent);
+ extent_unlock_edata(tsdn, edata);
if (config_prof && gdump_add) {
- extent_gdump_add(tsdn, extent);
+ extent_gdump_add(tsdn, edata);
}
return false;
}
static bool
-extent_register(tsdn_t *tsdn, extent_t *extent) {
- return extent_register_impl(tsdn, extent, true);
+extent_register(tsdn_t *tsdn, edata_t *edata) {
+ return extent_register_impl(tsdn, edata, true);
}
static bool
-extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
- return extent_register_impl(tsdn, extent, false);
+extent_register_no_gdump_add(tsdn_t *tsdn, edata_t *edata) {
+ return extent_register_impl(tsdn, edata, false);
}
static void
-extent_reregister(tsdn_t *tsdn, extent_t *extent) {
- bool err = extent_register(tsdn, extent);
+extent_reregister(tsdn_t *tsdn, edata_t *edata) {
+ bool err = extent_register(tsdn, edata);
assert(!err);
}
@@ -488,14 +488,14 @@ extent_reregister(tsdn_t *tsdn, extent_t *extent) {
*/
static void
extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
- extent_t *extent) {
+ edata_t *edata) {
size_t i;
- assert(extent_slab_get(extent));
+ assert(edata_slab_get(edata));
- for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
+ for (i = 1; i < (edata_size_get(edata) >> LG_PAGE) - 1; i++) {
rtree_clear(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
+ (uintptr_t)edata_base_get(edata) + (uintptr_t)(i <<
LG_PAGE));
}
}
@@ -504,43 +504,43 @@ extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
* Removes all pointers to the given extent from the global rtree.
*/
static void
-extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
+extent_deregister_impl(tsdn_t *tsdn, edata_t *edata, bool gdump) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_leaf_elm_t *elm_a, *elm_b;
- extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
+ extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, edata, true, false,
&elm_a, &elm_b);
- extent_lock(tsdn, extent);
+ extent_lock_edata(tsdn, edata);
extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, SC_NSIZES, false);
- if (extent_slab_get(extent)) {
- extent_interior_deregister(tsdn, rtree_ctx, extent);
- extent_slab_set(extent, false);
+ if (edata_slab_get(edata)) {
+ extent_interior_deregister(tsdn, rtree_ctx, edata);
+ edata_slab_set(edata, false);
}
- extent_unlock(tsdn, extent);
+ extent_unlock_edata(tsdn, edata);
if (config_prof && gdump) {
- extent_gdump_sub(tsdn, extent);
+ extent_gdump_sub(tsdn, edata);
}
}
static void
-extent_deregister(tsdn_t *tsdn, extent_t *extent) {
- extent_deregister_impl(tsdn, extent, true);
+extent_deregister(tsdn_t *tsdn, edata_t *edata) {
+ extent_deregister_impl(tsdn, edata, true);
}
static void
-extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
- extent_deregister_impl(tsdn, extent, false);
+extent_deregister_no_gdump_sub(tsdn_t *tsdn, edata_t *edata) {
+ extent_deregister_impl(tsdn, edata, false);
}
/*
* Tries to find and remove an extent from eset that can be used for the
* given allocation request.
*/
-static extent_t *
+static edata_t *
extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
rtree_ctx_t *rtree_ctx, eset_t *eset, void *new_addr, size_t size,
size_t pad, size_t alignment, bool slab, bool growing_retained) {
@@ -566,62 +566,60 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
size_t esize = size + pad;
malloc_mutex_lock(tsdn, &eset->mtx);
- extent_t *extent;
+ edata_t *edata;
if (new_addr != NULL) {
- extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr,
+ edata = extent_lock_edata_from_addr(tsdn, rtree_ctx, new_addr,
false);
- if (extent != NULL) {
+ if (edata != NULL) {
/*
- * We might null-out extent to report an error, but we
+ * We might null-out edata to report an error, but we
* still need to unlock the associated mutex after.
*/
- extent_t *unlock_extent = extent;
- assert(extent_base_get(extent) == new_addr);
- if (extent_arena_ind_get(extent)
- != arena_ind_get(arena) ||
- extent_size_get(extent) < esize ||
- extent_state_get(extent) !=
- eset_state_get(eset)) {
- extent = NULL;
+ edata_t *unlock_edata = edata;
+ assert(edata_base_get(edata) == new_addr);
+ if (edata_arena_ind_get(edata) != arena_ind_get(arena)
+ || edata_size_get(edata) < esize
+ || edata_state_get(edata) != eset_state_get(eset)) {
+ edata = NULL;
}
- extent_unlock(tsdn, unlock_extent);
+ extent_unlock_edata(tsdn, unlock_edata);
}
} else {
- extent = eset_fit_locked(tsdn, eset, esize, alignment);
+ edata = eset_fit_locked(tsdn, eset, esize, alignment);
}
- if (extent == NULL) {
+ if (edata == NULL) {
malloc_mutex_unlock(tsdn, &eset->mtx);
return NULL;
}
- extent_activate_locked(tsdn, arena, eset, extent);
+ extent_activate_locked(tsdn, arena, eset, edata);
malloc_mutex_unlock(tsdn, &eset->mtx);
- return extent;
+ return edata;
}
/*
* Given an allocation request and an extent guaranteed to be able to satisfy
- * it, this splits off lead and trail extents, leaving extent pointing to an
+ * it, this splits off lead and trail extents, leaving edata pointing to an
* extent satisfying the allocation.
* This function doesn't put lead or trail into any eset_t; it's the caller's
* job to ensure that they can be reused.
*/
typedef enum {
/*
- * Split successfully. lead, extent, and trail, are modified to extents
+ * Split successfully. lead, edata, and trail, are modified to extents
* describing the ranges before, in, and after the given allocation.
*/
extent_split_interior_ok,
/*
* The extent can't satisfy the given allocation request. None of the
- * input extent_t *s are touched.
+ * input edata_t *s are touched.
*/
extent_split_interior_cant_alloc,
/*
* In a potentially invalid state. Must leak (if *to_leak is non-NULL),
* and salvage what's still salvageable (if *to_salvage is non-NULL).
- * None of lead, extent, or trail are valid.
+ * None of lead, edata, or trail are valid.
*/
extent_split_interior_error
} extent_split_interior_result_t;
@@ -630,19 +628,19 @@ static extent_split_interior_result_t
extent_split_interior(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
rtree_ctx_t *rtree_ctx,
/* The result of splitting, in case of success. */
- extent_t **extent, extent_t **lead, extent_t **trail,
+ edata_t **edata, edata_t **lead, edata_t **trail,
/* The mess to clean up, in case of error. */
- extent_t **to_leak, extent_t **to_salvage,
+ edata_t **to_leak, edata_t **to_salvage,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
szind_t szind, bool growing_retained) {
size_t esize = size + pad;
- size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
- PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
+ size_t leadsize = ALIGNMENT_CEILING((uintptr_t)edata_base_get(*edata),
+ PAGE_CEILING(alignment)) - (uintptr_t)edata_base_get(*edata);
assert(new_addr == NULL || leadsize == 0);
- if (extent_size_get(*extent) < leadsize + esize) {
+ if (edata_size_get(*edata) < leadsize + esize) {
return extent_split_interior_cant_alloc;
}
- size_t trailsize = extent_size_get(*extent) - leadsize - esize;
+ size_t trailsize = edata_size_get(*edata) - leadsize - esize;
*lead = NULL;
*trail = NULL;
@@ -651,11 +649,11 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
/* Split the lead. */
if (leadsize != 0) {
- *lead = *extent;
- *extent = extent_split_impl(tsdn, arena, ehooks, *lead,
+ *lead = *edata;
+ *edata = extent_split_impl(tsdn, arena, ehooks, *lead,
leadsize, SC_NSIZES, false, esize + trailsize, szind, slab,
growing_retained);
- if (*extent == NULL) {
+ if (*edata == NULL) {
*to_leak = *lead;
*lead = NULL;
return extent_split_interior_error;
@@ -664,13 +662,13 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
/* Split the trail. */
if (trailsize != 0) {
- *trail = extent_split_impl(tsdn, arena, ehooks, *extent, esize,
+ *trail = extent_split_impl(tsdn, arena, ehooks, *edata, esize,
szind, slab, trailsize, SC_NSIZES, false, growing_retained);
if (*trail == NULL) {
- *to_leak = *extent;
+ *to_leak = *edata;
*to_salvage = *lead;
*lead = NULL;
- *extent = NULL;
+ *edata = NULL;
return extent_split_interior_error;
}
}
@@ -680,14 +678,14 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
* Splitting causes szind to be set as a side effect, but no
* splitting occurred.
*/
- extent_szind_set(*extent, szind);
+ edata_szind_set(*edata, szind);
if (szind != SC_NSIZES) {
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_addr_get(*extent), szind, slab);
- if (slab && extent_size_get(*extent) > PAGE) {
+ (uintptr_t)edata_addr_get(*edata), szind, slab);
+ if (slab && edata_size_get(*edata) > PAGE) {
rtree_szind_slab_update(tsdn, &extents_rtree,
rtree_ctx,
- (uintptr_t)extent_past_get(*extent) -
+ (uintptr_t)edata_past_get(*edata) -
(uintptr_t)PAGE, szind, slab);
}
}
@@ -702,18 +700,18 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
* before or after the resulting allocation, that space is given its own extent
* and put back into eset.
*/
-static extent_t *
+static edata_t *
extent_recycle_split(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
rtree_ctx_t *rtree_ctx, eset_t *eset, void *new_addr, size_t size,
- size_t pad, size_t alignment, bool slab, szind_t szind, extent_t *extent,
+ size_t pad, size_t alignment, bool slab, szind_t szind, edata_t *edata,
bool growing_retained) {
- extent_t *lead;
- extent_t *trail;
- extent_t *to_leak;
- extent_t *to_salvage;
+ edata_t *lead;
+ edata_t *trail;
+ edata_t *to_leak;
+ edata_t *to_salvage;
extent_split_interior_result_t result = extent_split_interior(
- tsdn, arena, ehooks, rtree_ctx, &extent, &lead, &trail, &to_leak,
+ tsdn, arena, ehooks, rtree_ctx, &edata, &lead, &trail, &to_leak,
&to_salvage, new_addr, size, pad, alignment, slab, szind,
growing_retained);
@@ -735,7 +733,7 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
if (trail != NULL) {
extent_deactivate(tsdn, arena, eset, trail);
}
- return extent;
+ return edata;
} else {
/*
* We should have picked an extent that was large enough to
@@ -746,11 +744,11 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
extent_deregister(tsdn, to_salvage);
}
if (to_leak != NULL) {
- void *leak = extent_base_get(to_leak);
+ void *leak = edata_base_get(to_leak);
extent_deregister_no_gdump_sub(tsdn, to_leak);
extents_abandon_vm(tsdn, arena, ehooks, eset, to_leak,
growing_retained);
- assert(extent_lock_from_addr(tsdn, rtree_ctx, leak,
+ assert(extent_lock_edata_from_addr(tsdn, rtree_ctx, leak,
false) == NULL);
}
return NULL;
@@ -762,7 +760,7 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
* Tries to satisfy the given allocation request by reusing one of the extents
* in the given eset_t.
*/
-static extent_t *
+static edata_t *
extent_recycle(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
szind_t szind, bool *zero, bool *commit, bool growing_retained) {
@@ -775,54 +773,54 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- extent_t *extent = extent_recycle_extract(tsdn, arena, ehooks,
+ edata_t *edata = extent_recycle_extract(tsdn, arena, ehooks,
rtree_ctx, eset, new_addr, size, pad, alignment, slab,
growing_retained);
- if (extent == NULL) {
+ if (edata == NULL) {
return NULL;
}
- extent = extent_recycle_split(tsdn, arena, ehooks, rtree_ctx, eset,
- new_addr, size, pad, alignment, slab, szind, extent,
+ edata = extent_recycle_split(tsdn, arena, ehooks, rtree_ctx, eset,
+ new_addr, size, pad, alignment, slab, szind, edata,
growing_retained);
- if (extent == NULL) {
+ if (edata == NULL) {
return NULL;
}
- if (*commit && !extent_committed_get(extent)) {
- if (extent_commit_impl(tsdn, arena, ehooks, extent, 0,
- extent_size_get(extent), growing_retained)) {
- extent_record(tsdn, arena, ehooks, eset, extent,
+ if (*commit && !edata_committed_get(edata)) {
+ if (extent_commit_impl(tsdn, arena, ehooks, edata, 0,
+ edata_size_get(edata), growing_retained)) {
+ extent_record(tsdn, arena, ehooks, eset, edata,
growing_retained);
return NULL;
}
}
- if (extent_committed_get(extent)) {
+ if (edata_committed_get(edata)) {
*commit = true;
}
- if (extent_zeroed_get(extent)) {
+ if (edata_zeroed_get(edata)) {
*zero = true;
}
if (pad != 0) {
- extent_addr_randomize(tsdn, arena, extent, alignment);
+ extent_addr_randomize(tsdn, arena, edata, alignment);
}
- assert(extent_state_get(extent) == extent_state_active);
+ assert(edata_state_get(edata) == extent_state_active);
if (slab) {
- extent_slab_set(extent, slab);
- extent_interior_register(tsdn, rtree_ctx, extent, szind);
+ edata_slab_set(edata, slab);
+ extent_interior_register(tsdn, rtree_ctx, edata, szind);
}
if (*zero) {
- void *addr = extent_base_get(extent);
- if (!extent_zeroed_get(extent)) {
- size_t size = extent_size_get(extent);
+ void *addr = edata_base_get(edata);
+ if (!edata_zeroed_get(edata)) {
+ size_t size = edata_size_get(edata);
ehooks_zero(tsdn, ehooks, addr, size,
arena_ind_get(arena));
}
}
- return extent;
+ return edata;
}
/*
@@ -830,7 +828,7 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
* to split requested extents in order to limit the total number of disjoint
* virtual memory ranges retained by each arena.
*/
-static extent_t *
+static edata_t *
extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
size_t size, size_t pad, size_t alignment, bool slab, szind_t szind,
bool *zero, bool *commit) {
@@ -860,8 +858,8 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
}
- extent_t *extent = extent_alloc(tsdn, arena);
- if (extent == NULL) {
+ edata_t *edata = extent_alloc(tsdn, arena);
+ if (edata == NULL) {
goto label_err;
}
bool zeroed = false;
@@ -870,35 +868,35 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
void *ptr = ehooks_alloc(tsdn, ehooks, NULL, alloc_size, PAGE, &zeroed,
&committed, arena_ind_get(arena));
- extent_init(extent, arena_ind_get(arena), ptr, alloc_size, false,
+ edata_init(edata, arena_ind_get(arena), ptr, alloc_size, false,
SC_NSIZES, arena_extent_sn_next(arena), extent_state_active, zeroed,
committed, true, EXTENT_IS_HEAD);
if (ptr == NULL) {
- extent_dalloc(tsdn, arena, extent);
+ extent_dalloc(tsdn, arena, edata);
goto label_err;
}
- if (extent_register_no_gdump_add(tsdn, extent)) {
- extent_dalloc(tsdn, arena, extent);
+ if (extent_register_no_gdump_add(tsdn, edata)) {
+ extent_dalloc(tsdn, arena, edata);
goto label_err;
}
- if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
+ if (edata_zeroed_get(edata) && edata_committed_get(edata)) {
*zero = true;
}
- if (extent_committed_get(extent)) {
+ if (edata_committed_get(edata)) {
*commit = true;
}
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- extent_t *lead;
- extent_t *trail;
- extent_t *to_leak;
- extent_t *to_salvage;
+ edata_t *lead;
+ edata_t *trail;
+ edata_t *to_leak;
+ edata_t *to_salvage;
extent_split_interior_result_t result = extent_split_interior(tsdn,
- arena, ehooks, rtree_ctx, &extent, &lead, &trail, &to_leak,
+ arena, ehooks, rtree_ctx, &edata, &lead, &trail, &to_leak,
&to_salvage, NULL, size, pad, alignment, slab, szind, true);
if (result == extent_split_interior_ok) {
@@ -931,16 +929,16 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
goto label_err;
}
- if (*commit && !extent_committed_get(extent)) {
- if (extent_commit_impl(tsdn, arena, ehooks, extent, 0,
- extent_size_get(extent), true)) {
+ if (*commit && !edata_committed_get(edata)) {
+ if (extent_commit_impl(tsdn, arena, ehooks, edata, 0,
+ edata_size_get(edata), true)) {
extent_record(tsdn, arena, ehooks,
- &arena->eset_retained, extent, true);
+ &arena->eset_retained, edata, true);
goto label_err;
}
/* A successful commit should return zeroed memory. */
if (config_debug) {
- void *addr = extent_addr_get(extent);
+ void *addr = edata_addr_get(edata);
size_t *p = (size_t *)(uintptr_t)addr;
/* Check the first page only. */
for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
@@ -964,32 +962,32 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
if (config_prof) {
/* Adjust gdump stats now that extent is final size. */
- extent_gdump_add(tsdn, extent);
+ extent_gdump_add(tsdn, edata);
}
if (pad != 0) {
- extent_addr_randomize(tsdn, arena, extent, alignment);
+ extent_addr_randomize(tsdn, arena, edata, alignment);
}
if (slab) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
&rtree_ctx_fallback);
- extent_slab_set(extent, true);
- extent_interior_register(tsdn, rtree_ctx, extent, szind);
+ edata_slab_set(edata, true);
+ extent_interior_register(tsdn, rtree_ctx, edata, szind);
}
- if (*zero && !extent_zeroed_get(extent)) {
- void *addr = extent_base_get(extent);
- size_t size = extent_size_get(extent);
+ if (*zero && !edata_zeroed_get(edata)) {
+ void *addr = edata_base_get(edata);
+ size_t size = edata_size_get(edata);
ehooks_zero(tsdn, ehooks, addr, size, arena_ind_get(arena));
}
- return extent;
+ return edata;
label_err:
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
return NULL;
}
-static extent_t *
+static edata_t *
extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
szind_t szind, bool *zero, bool *commit) {
@@ -998,16 +996,16 @@ extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
- extent_t *extent = extent_recycle(tsdn, arena, ehooks,
+ edata_t *edata = extent_recycle(tsdn, arena, ehooks,
&arena->eset_retained, new_addr, size, pad, alignment, slab,
szind, zero, commit, true);
- if (extent != NULL) {
+ if (edata != NULL) {
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
if (config_prof) {
- extent_gdump_add(tsdn, extent);
+ extent_gdump_add(tsdn, edata);
}
} else if (opt_retain && new_addr == NULL) {
- extent = extent_grow_retained(tsdn, arena, ehooks, size, pad,
+ edata = extent_grow_retained(tsdn, arena, ehooks, size, pad,
alignment, slab, szind, zero, commit);
/* extent_grow_retained() always releases extent_grow_mtx. */
} else {
@@ -1015,49 +1013,49 @@ extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
}
malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
- return extent;
+ return edata;
}
-static extent_t *
+static edata_t *
extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
szind_t szind, bool *zero, bool *commit) {
size_t esize = size + pad;
- extent_t *extent = extent_alloc(tsdn, arena);
- if (extent == NULL) {
+ edata_t *edata = extent_alloc(tsdn, arena);
+ if (edata == NULL) {
return NULL;
}
size_t palignment = ALIGNMENT_CEILING(alignment, PAGE);
void *addr = ehooks_alloc(tsdn, ehooks, new_addr, esize, palignment,
zero, commit, arena_ind_get(arena));
if (addr == NULL) {
- extent_dalloc(tsdn, arena, extent);
+ extent_dalloc(tsdn, arena, edata);
return NULL;
}
- extent_init(extent, arena_ind_get(arena), addr, esize, slab, szind,
+ edata_init(edata, arena_ind_get(arena), addr, esize, slab, szind,
arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
true, EXTENT_NOT_HEAD);
if (pad != 0) {
- extent_addr_randomize(tsdn, arena, extent, alignment);
+ extent_addr_randomize(tsdn, arena, edata, alignment);
}
- if (extent_register(tsdn, extent)) {
- extent_dalloc(tsdn, arena, extent);
+ if (extent_register(tsdn, edata)) {
+ extent_dalloc(tsdn, arena, edata);
return NULL;
}
- return extent;
+ return edata;
}
-extent_t *
+edata_t *
extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
szind_t szind, bool *zero, bool *commit) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
- extent_t *extent = extent_alloc_retained(tsdn, arena, ehooks, new_addr,
+ edata_t *edata = extent_alloc_retained(tsdn, arena, ehooks, new_addr,
size, pad, alignment, slab, szind, zero, commit);
- if (extent == NULL) {
+ if (edata == NULL) {
if (opt_retain && new_addr != NULL) {
/*
* When retain is enabled and new_addr is set, we do not
@@ -1067,28 +1065,28 @@ extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
*/
return NULL;
}
- extent = extent_alloc_wrapper_hard(tsdn, arena, ehooks,
+ edata = extent_alloc_wrapper_hard(tsdn, arena, ehooks,
new_addr, size, pad, alignment, slab, szind, zero, commit);
}
- assert(extent == NULL || extent_dumpable_get(extent));
- return extent;
+ assert(edata == NULL || edata_dumpable_get(edata));
+ return edata;
}
static bool
-extent_can_coalesce(arena_t *arena, eset_t *eset, const extent_t *inner,
- const extent_t *outer) {
- assert(extent_arena_ind_get(inner) == arena_ind_get(arena));
- if (extent_arena_ind_get(outer) != arena_ind_get(arena)) {
+extent_can_coalesce(arena_t *arena, eset_t *eset, const edata_t *inner,
+ const edata_t *outer) {
+ assert(edata_arena_ind_get(inner) == arena_ind_get(arena));
+ if (edata_arena_ind_get(outer) != arena_ind_get(arena)) {
return false;
}
- assert(extent_state_get(inner) == extent_state_active);
- if (extent_state_get(outer) != eset->state) {
+ assert(edata_state_get(inner) == extent_state_active);
+ if (edata_state_get(outer) != eset->state) {
return false;
}
- if (extent_committed_get(inner) != extent_committed_get(outer)) {
+ if (edata_committed_get(inner) != edata_committed_get(outer)) {
return false;
}
@@ -1097,7 +1095,7 @@ extent_can_coalesce(arena_t *arena, eset_t *eset, const extent_t *inner,
static bool
extent_coalesce(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
- extent_t *inner, extent_t *outer, bool forward, bool growing_retained) {
+ edata_t *inner, edata_t *outer, bool forward, bool growing_retained) {
assert(extent_can_coalesce(arena, eset, inner, outer));
extent_activate_locked(tsdn, arena, eset, outer);
@@ -1114,9 +1112,9 @@ extent_coalesce(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
return err;
}
-static extent_t *
+static edata_t *
extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- rtree_ctx_t *rtree_ctx, eset_t *eset, extent_t *extent, bool *coalesced,
+ rtree_ctx_t *rtree_ctx, eset_t *eset, edata_t *edata, bool *coalesced,
bool growing_retained, bool inactive_only) {
/*
* We avoid checking / locking inactive neighbors for large size
@@ -1132,8 +1130,8 @@ extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
again = false;
/* Try to coalesce forward. */
- extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
- extent_past_get(extent), inactive_only);
+ edata_t *next = extent_lock_edata_from_addr(tsdn, rtree_ctx,
+ edata_past_get(edata), inactive_only);
if (next != NULL) {
/*
* eset->mtx only protects against races for
@@ -1141,38 +1139,38 @@ extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
* before releasing next's pool lock.
*/
bool can_coalesce = extent_can_coalesce(arena, eset,
- extent, next);
+ edata, next);
- extent_unlock(tsdn, next);
+ extent_unlock_edata(tsdn, next);
if (can_coalesce && !extent_coalesce(tsdn, arena,
- ehooks, eset, extent, next, true,
+ ehooks, eset, edata, next, true,
growing_retained)) {
if (eset->delay_coalesce) {
/* Do minimal coalescing. */
*coalesced = true;
- return extent;
+ return edata;
}
again = true;
}
}
/* Try to coalesce backward. */
- extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
- extent_before_get(extent), inactive_only);
+ edata_t *prev = extent_lock_edata_from_addr(tsdn, rtree_ctx,
+ edata_before_get(edata), inactive_only);
if (prev != NULL) {
bool can_coalesce = extent_can_coalesce(arena, eset,
- extent, prev);
- extent_unlock(tsdn, prev);
+ edata, prev);
+ extent_unlock_edata(tsdn, prev);
if (can_coalesce && !extent_coalesce(tsdn, arena,
- ehooks, eset, extent, prev, false,
+ ehooks, eset, edata, prev, false,
growing_retained)) {
- extent = prev;
+ edata = prev;
if (eset->delay_coalesce) {
/* Do minimal coalescing. */
*coalesced = true;
- return extent;
+ return edata;
}
again = true;
}
@@ -1182,23 +1180,23 @@ extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
if (eset->delay_coalesce) {
*coalesced = false;
}
- return extent;
+ return edata;
}
-static extent_t *
+static edata_t *
extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- rtree_ctx_t *rtree_ctx, eset_t *eset, extent_t *extent, bool *coalesced,
+ rtree_ctx_t *rtree_ctx, eset_t *eset, edata_t *edata, bool *coalesced,
bool growing_retained) {
return extent_try_coalesce_impl(tsdn, arena, ehooks, rtree_ctx, eset,
- extent, coalesced, growing_retained, false);
+ edata, coalesced, growing_retained, false);
}
-static extent_t *
+static edata_t *
extent_try_coalesce_large(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- rtree_ctx_t *rtree_ctx, eset_t *eset, extent_t *extent, bool *coalesced,
+ rtree_ctx_t *rtree_ctx, eset_t *eset, edata_t *edata, bool *coalesced,
bool growing_retained) {
return extent_try_coalesce_impl(tsdn, arena, ehooks, rtree_ctx, eset,
- extent, coalesced, growing_retained, true);
+ edata, coalesced, growing_retained, true);
}
/*
@@ -1207,62 +1205,62 @@ extent_try_coalesce_large(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
*/
static void
extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
- extent_t *extent, bool growing_retained) {
+ edata_t *edata, bool growing_retained) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
assert((eset_state_get(eset) != extent_state_dirty &&
eset_state_get(eset) != extent_state_muzzy) ||
- !extent_zeroed_get(extent));
+ !edata_zeroed_get(edata));
malloc_mutex_lock(tsdn, &eset->mtx);
- extent_szind_set(extent, SC_NSIZES);
- if (extent_slab_get(extent)) {
- extent_interior_deregister(tsdn, rtree_ctx, extent);
- extent_slab_set(extent, false);
+ edata_szind_set(edata, SC_NSIZES);
+ if (edata_slab_get(edata)) {
+ extent_interior_deregister(tsdn, rtree_ctx, edata);
+ edata_slab_set(edata, false);
}
- assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_base_get(extent), true) == extent);
+ assert(rtree_edata_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)edata_base_get(edata), true) == edata);
if (!eset->delay_coalesce) {
- extent = extent_try_coalesce(tsdn, arena, ehooks, rtree_ctx,
- eset, extent, NULL, growing_retained);
- } else if (extent_size_get(extent) >= SC_LARGE_MINCLASS) {
+ edata = extent_try_coalesce(tsdn, arena, ehooks, rtree_ctx,
+ eset, edata, NULL, growing_retained);
+ } else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) {
assert(eset == &arena->eset_dirty);
/* Always coalesce large eset eagerly. */
bool coalesced;
do {
- assert(extent_state_get(extent) == extent_state_active);
- extent = extent_try_coalesce_large(tsdn, arena, ehooks,
- rtree_ctx, eset, extent, &coalesced,
+ assert(edata_state_get(edata) == extent_state_active);
+ edata = extent_try_coalesce_large(tsdn, arena, ehooks,
+ rtree_ctx, eset, edata, &coalesced,
growing_retained);
} while (coalesced);
- if (extent_size_get(extent) >= oversize_threshold) {
+ if (edata_size_get(edata) >= oversize_threshold) {
/* Shortcut to purge the oversize extent eagerly. */
malloc_mutex_unlock(tsdn, &eset->mtx);
- arena_decay_extent(tsdn, arena, ehooks, extent);
+ arena_decay_extent(tsdn, arena, ehooks, edata);
return;
}
}
- extent_deactivate_locked(tsdn, arena, eset, extent);
+ extent_deactivate_locked(tsdn, arena, eset, edata);
malloc_mutex_unlock(tsdn, &eset->mtx);
}
void
-extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
+extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
ehooks_t *ehooks = arena_get_ehooks(arena);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
- if (extent_register(tsdn, extent)) {
- extent_dalloc(tsdn, arena, extent);
+ if (extent_register(tsdn, edata)) {
+ extent_dalloc(tsdn, arena, edata);
return;
}
- extent_dalloc_wrapper(tsdn, arena, ehooks, extent);
+ extent_dalloc_wrapper(tsdn, arena, ehooks, edata);
}
static bool
@@ -1273,23 +1271,23 @@ extent_may_dalloc(void) {
static bool
extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *extent) {
+ edata_t *edata) {
bool err;
- assert(extent_base_get(extent) != NULL);
- assert(extent_size_get(extent) != 0);
+ assert(edata_base_get(edata) != NULL);
+ assert(edata_size_get(edata) != 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
- extent_addr_set(extent, extent_base_get(extent));
+ edata_addr_set(edata, edata_base_get(edata));
/* Try to deallocate. */
- err = ehooks_dalloc(tsdn, ehooks, extent_base_get(extent),
- extent_size_get(extent), extent_committed_get(extent),
+ err = ehooks_dalloc(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), edata_committed_get(edata),
arena_ind_get(arena));
if (!err) {
- extent_dalloc(tsdn, arena, extent);
+ extent_dalloc(tsdn, arena, edata);
}
return err;
@@ -1297,8 +1295,8 @@ extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
void
extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *extent) {
- assert(extent_dumpable_get(extent));
+ edata_t *edata) {
+ assert(edata_dumpable_get(edata));
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
@@ -1308,124 +1306,123 @@ extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
* Deregister first to avoid a race with other allocating
* threads, and reregister if deallocation fails.
*/
- extent_deregister(tsdn, extent);
- if (!extent_dalloc_wrapper_try(tsdn, arena, ehooks, extent)) {
+ extent_deregister(tsdn, edata);
+ if (!extent_dalloc_wrapper_try(tsdn, arena, ehooks, edata)) {
return;
}
- extent_reregister(tsdn, extent);
+ extent_reregister(tsdn, edata);
}
/* Try to decommit; purge if that fails. */
bool zeroed;
- if (!extent_committed_get(extent)) {
+ if (!edata_committed_get(edata)) {
zeroed = true;
- } else if (!extent_decommit_wrapper(tsdn, arena, ehooks, extent, 0,
- extent_size_get(extent))) {
+ } else if (!extent_decommit_wrapper(tsdn, arena, ehooks, edata, 0,
+ edata_size_get(edata))) {
zeroed = true;
- } else if (!ehooks_purge_forced(tsdn, ehooks, extent_base_get(extent),
- extent_size_get(extent), 0, extent_size_get(extent),
+ } else if (!ehooks_purge_forced(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), 0, edata_size_get(edata),
arena_ind_get(arena))) {
zeroed = true;
- } else if (extent_state_get(extent) == extent_state_muzzy ||
- !ehooks_purge_lazy(tsdn, ehooks, extent_base_get(extent),
- extent_size_get(extent), 0, extent_size_get(extent),
+ } else if (edata_state_get(edata) == extent_state_muzzy ||
+ !ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), 0, edata_size_get(edata),
arena_ind_get(arena))) {
zeroed = false;
} else {
zeroed = false;
}
- extent_zeroed_set(extent, zeroed);
+ edata_zeroed_set(edata, zeroed);
if (config_prof) {
- extent_gdump_sub(tsdn, extent);
+ extent_gdump_sub(tsdn, edata);
}
- extent_record(tsdn, arena, ehooks, &arena->eset_retained, extent,
- false);
+ extent_record(tsdn, arena, ehooks, &arena->eset_retained, edata, false);
}
void
extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *extent) {
- assert(extent_base_get(extent) != NULL);
- assert(extent_size_get(extent) != 0);
+ edata_t *edata) {
+ assert(edata_base_get(edata) != NULL);
+ assert(edata_size_get(edata) != 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
/* Deregister first to avoid a race with other allocating threads. */
- extent_deregister(tsdn, extent);
+ extent_deregister(tsdn, edata);
- extent_addr_set(extent, extent_base_get(extent));
+ edata_addr_set(edata, edata_base_get(edata));
/* Try to destroy; silently fail otherwise. */
- ehooks_destroy(tsdn, ehooks, extent_base_get(extent),
- extent_size_get(extent), extent_committed_get(extent),
+ ehooks_destroy(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), edata_committed_get(edata),
arena_ind_get(arena));
- extent_dalloc(tsdn, arena, extent);
+ extent_dalloc(tsdn, arena, edata);
}
static bool
extent_commit_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *extent, size_t offset, size_t length, bool growing_retained) {
+ edata_t *edata, size_t offset, size_t length, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
- bool err = ehooks_commit(tsdn, ehooks, extent_base_get(extent),
- extent_size_get(extent), offset, length, arena_ind_get(arena));
- extent_committed_set(extent, extent_committed_get(extent) || !err);
+ bool err = ehooks_commit(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), offset, length, arena_ind_get(arena));
+ edata_committed_set(edata, edata_committed_get(edata) || !err);
return err;
}
bool
extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *extent, size_t offset,
+ edata_t *edata, size_t offset,
size_t length) {
- return extent_commit_impl(tsdn, arena, ehooks, extent, offset, length,
+ return extent_commit_impl(tsdn, arena, ehooks, edata, offset, length,
false);
}
bool
extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *extent, size_t offset, size_t length) {
+ edata_t *edata, size_t offset, size_t length) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
- bool err = ehooks_decommit(tsdn, ehooks, extent_base_get(extent),
- extent_size_get(extent), offset, length, arena_ind_get(arena));
- extent_committed_set(extent, extent_committed_get(extent) && err);
+ bool err = ehooks_decommit(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), offset, length, arena_ind_get(arena));
+ edata_committed_set(edata, edata_committed_get(edata) && err);
return err;
}
static bool
extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *extent, size_t offset, size_t length, bool growing_retained) {
+ edata_t *edata, size_t offset, size_t length, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
- bool err = ehooks_purge_lazy(tsdn, ehooks, extent_base_get(extent),
- extent_size_get(extent), offset, length, arena_ind_get(arena));
+ bool err = ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), offset, length, arena_ind_get(arena));
return err;
}
bool
extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *extent, size_t offset, size_t length) {
- return extent_purge_lazy_impl(tsdn, arena, ehooks, extent, offset,
+ edata_t *edata, size_t offset, size_t length) {
+ return extent_purge_lazy_impl(tsdn, arena, ehooks, edata, offset,
length, false);
}
static bool
extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *extent, size_t offset, size_t length, bool growing_retained) {
+ edata_t *edata, size_t offset, size_t length, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
- bool err = ehooks_purge_forced(tsdn, ehooks, extent_base_get(extent),
- extent_size_get(extent), offset, length, arena_ind_get(arena));
+ bool err = ehooks_purge_forced(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), offset, length, arena_ind_get(arena));
return err;
}
bool
extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *extent, size_t offset, size_t length) {
- return extent_purge_forced_impl(tsdn, arena, ehooks, extent,
+ edata_t *edata, size_t offset, size_t length) {
+ return extent_purge_forced_impl(tsdn, arena, ehooks, edata,
offset, length, false);
}
@@ -1436,11 +1433,11 @@ extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
* with the trail (the higher addressed portion). This makes 'extent' the lead,
* and returns the trail (except in case of error).
*/
-static extent_t *
+static edata_t *
extent_split_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *extent, size_t size_a, szind_t szind_a, bool slab_a,
+ edata_t *edata, size_t size_a, szind_t szind_a, bool slab_a,
size_t size_b, szind_t szind_b, bool slab_b, bool growing_retained) {
- assert(extent_size_get(extent) == size_a + size_b);
+ assert(edata_size_get(edata) == size_a + size_b);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
@@ -1448,28 +1445,28 @@ extent_split_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
return NULL;
}
- extent_t *trail = extent_alloc(tsdn, arena);
+ edata_t *trail = extent_alloc(tsdn, arena);
if (trail == NULL) {
goto label_error_a;
}
- extent_init(trail, arena_ind_get(arena),
- (void *)((uintptr_t)extent_base_get(extent) + size_a), size_b,
- slab_b, szind_b, extent_sn_get(extent), extent_state_get(extent),
- extent_zeroed_get(extent), extent_committed_get(extent),
- extent_dumpable_get(extent), EXTENT_NOT_HEAD);
+ edata_init(trail, arena_ind_get(arena),
+ (void *)((uintptr_t)edata_base_get(edata) + size_a), size_b,
+ slab_b, szind_b, edata_sn_get(edata), edata_state_get(edata),
+ edata_zeroed_get(edata), edata_committed_get(edata),
+ edata_dumpable_get(edata), EXTENT_NOT_HEAD);
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
{
- extent_t lead;
+ edata_t lead;
- extent_init(&lead, arena_ind_get(arena),
- extent_addr_get(extent), size_a,
- slab_a, szind_a, extent_sn_get(extent),
- extent_state_get(extent), extent_zeroed_get(extent),
- extent_committed_get(extent), extent_dumpable_get(extent),
+ edata_init(&lead, arena_ind_get(arena),
+ edata_addr_get(edata), size_a,
+ slab_a, szind_a, edata_sn_get(edata),
+ edata_state_get(edata), edata_zeroed_get(edata),
+ edata_committed_get(edata), edata_dumpable_get(edata),
EXTENT_NOT_HEAD);
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
@@ -1484,40 +1481,40 @@ extent_split_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
goto label_error_b;
}
- extent_lock2(tsdn, extent, trail);
+ extent_lock_edata2(tsdn, edata, trail);
- bool err = ehooks_split(tsdn, ehooks, extent_base_get(extent),
- size_a + size_b, size_a, size_b, extent_committed_get(extent),
+ bool err = ehooks_split(tsdn, ehooks, edata_base_get(edata),
+ size_a + size_b, size_a, size_b, edata_committed_get(edata),
arena_ind_get(arena));
if (err) {
goto label_error_c;
}
- extent_size_set(extent, size_a);
- extent_szind_set(extent, szind_a);
+ edata_size_set(edata, size_a);
+ edata_szind_set(edata, szind_a);
- extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
+ extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, edata,
szind_a, slab_a);
extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
szind_b, slab_b);
- extent_unlock2(tsdn, extent, trail);
+ extent_unlock_edata2(tsdn, edata, trail);
return trail;
label_error_c:
- extent_unlock2(tsdn, extent, trail);
+ extent_unlock_edata2(tsdn, edata, trail);
label_error_b:
extent_dalloc(tsdn, arena, trail);
label_error_a:
return NULL;
}
-extent_t *
+edata_t *
extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *extent, size_t size_a, szind_t szind_a, bool slab_a,
+ edata_t *edata, size_t size_a, szind_t szind_a, bool slab_a,
size_t size_b, szind_t szind_b, bool slab_b) {
- return extent_split_impl(tsdn, arena, ehooks, extent, size_a, szind_a,
+ return extent_split_impl(tsdn, arena, ehooks, edata, size_a, szind_a,
slab_a, size_b, szind_b, slab_b, false);
}
@@ -1526,8 +1523,8 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
* settings. Assumes the second extent has the higher address.
*/
bool
-extent_head_no_merge(extent_t *a, extent_t *b) {
- assert(extent_base_get(a) < extent_base_get(b));
+extent_head_no_merge(edata_t *a, edata_t *b) {
+ assert(edata_base_get(a) < edata_base_get(b));
/*
* When coalesce is not always allowed (Windows), only merge extents
* from the same VirtualAlloc region under opt.retain (in which case
@@ -1540,33 +1537,33 @@ extent_head_no_merge(extent_t *a, extent_t *b) {
return true;
}
/* If b is a head extent, disallow the cross-region merge. */
- if (extent_is_head_get(b)) {
+ if (edata_is_head_get(b)) {
/*
* Additionally, sn should not overflow with retain; sanity
* check that different regions have unique sn.
*/
- assert(extent_sn_comp(a, b) != 0);
+ assert(edata_sn_comp(a, b) != 0);
return true;
}
- assert(extent_sn_comp(a, b) == 0);
+ assert(edata_sn_comp(a, b) == 0);
return false;
}
static bool
-extent_merge_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, extent_t *a,
- extent_t *b, bool growing_retained) {
+extent_merge_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, edata_t *a,
+ edata_t *b, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
- assert(extent_base_get(a) < extent_base_get(b));
+ assert(edata_base_get(a) < edata_base_get(b));
if (ehooks_merge_will_fail(ehooks) || extent_head_no_merge(a, b)) {
return true;
}
- bool err = ehooks_merge(tsdn, ehooks, extent_base_get(a),
- extent_size_get(a), extent_base_get(b), extent_size_get(b),
- extent_committed_get(a), arena_ind_get(arena));
+ bool err = ehooks_merge(tsdn, ehooks, edata_base_get(a),
+ edata_size_get(a), edata_base_get(b), edata_size_get(b),
+ edata_committed_get(a), arena_ind_get(arena));
if (err) {
return true;
@@ -1585,7 +1582,7 @@ extent_merge_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, extent_t *a,
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
&b_elm_b);
- extent_lock2(tsdn, a, b);
+ extent_lock_edata2(tsdn, a, b);
if (a_elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
@@ -1598,22 +1595,22 @@ extent_merge_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, extent_t *a,
b_elm_b = b_elm_a;
}
- extent_size_set(a, extent_size_get(a) + extent_size_get(b));
- extent_szind_set(a, SC_NSIZES);
- extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
- extent_sn_get(a) : extent_sn_get(b));
- extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
+ edata_size_set(a, edata_size_get(a) + edata_size_get(b));
+ edata_szind_set(a, SC_NSIZES);
+ edata_sn_set(a, (edata_sn_get(a) < edata_sn_get(b)) ?
+ edata_sn_get(a) : edata_sn_get(b));
+ edata_zeroed_set(a, edata_zeroed_get(a) && edata_zeroed_get(b));
extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, SC_NSIZES,
false);
- extent_unlock2(tsdn, a, b);
+ extent_unlock_edata2(tsdn, a, b);
/*
* If we got here, we merged the extents; so they must be from the same
* arena (i.e. this one).
*/
- assert(extent_arena_ind_get(b) == arena_ind_get(arena));
+ assert(edata_arena_ind_get(b) == arena_ind_get(arena));
extent_dalloc(tsdn, arena, b);
return false;
@@ -1621,7 +1618,7 @@ extent_merge_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, extent_t *a,
bool
extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
- extent_t *a, extent_t *b) {
+ edata_t *a, edata_t *b) {
return extent_merge_impl(tsdn, arena, ehooks, a, b, false);
}
diff --git a/src/extent_dss.c b/src/extent_dss.c
index 59e7e7d..a66afb6 100644
--- a/src/extent_dss.c
+++ b/src/extent_dss.c
@@ -109,7 +109,7 @@ extent_dss_max_update(void *new_addr) {
void *
extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit) {
- extent_t *gap;
+ edata_t *gap;
cassert(have_dss);
assert(size > 0);
@@ -153,7 +153,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t gap_size_page = (uintptr_t)ret -
(uintptr_t)gap_addr_page;
if (gap_size_page != 0) {
- extent_init(gap, arena_ind_get(arena),
+ edata_init(gap, arena_ind_get(arena),
gap_addr_page, gap_size_page, false,
SC_NSIZES, arena_extent_sn_next(arena),
extent_state_active, false, true, true,
@@ -194,17 +194,17 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
*commit = pages_decommit(ret, size);
}
if (*zero && *commit) {
- extent_t extent;
+ edata_t edata;
ehooks_t *ehooks = arena_get_ehooks(
arena);
- extent_init(&extent,
+ edata_init(&edata,
arena_ind_get(arena), ret, size,
size, false, SC_NSIZES,
extent_state_active, false, true,
true, EXTENT_NOT_HEAD);
if (extent_purge_forced_wrapper(tsdn,
- arena, ehooks, &extent, 0, size)) {
+ arena, ehooks, &edata, 0, size)) {
memset(ret, 0, size);
}
}
diff --git a/src/inspect.c b/src/inspect.c
index 435016e..5ad23a0 100644
--- a/src/inspect.c
+++ b/src/inspect.c
@@ -6,21 +6,21 @@ inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr, size_t *nfree,
size_t *nregs, size_t *size) {
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
- const extent_t *extent = iealloc(tsdn, ptr);
- if (unlikely(extent == NULL)) {
+ const edata_t *edata = iealloc(tsdn, ptr);
+ if (unlikely(edata == NULL)) {
*nfree = *nregs = *size = 0;
return;
}
- *size = extent_size_get(extent);
- if (!extent_slab_get(extent)) {
+ *size = edata_size_get(edata);
+ if (!edata_slab_get(edata)) {
*nfree = 0;
*nregs = 1;
} else {
- *nfree = extent_nfree_get(extent);
- *nregs = bin_infos[extent_szind_get(extent)].nregs;
+ *nfree = edata_nfree_get(edata);
+ *nregs = bin_infos[edata_szind_get(edata)].nregs;
assert(*nfree <= *nregs);
- assert(*nfree * extent_usize_get(extent) <= *size);
+ assert(*nfree * edata_usize_get(edata) <= *size);
}
}
@@ -31,31 +31,31 @@ inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
&& bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
- const extent_t *extent = iealloc(tsdn, ptr);
- if (unlikely(extent == NULL)) {
+ const edata_t *edata = iealloc(tsdn, ptr);
+ if (unlikely(edata == NULL)) {
*nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
*slabcur_addr = NULL;
return;
}
- *size = extent_size_get(extent);
- if (!extent_slab_get(extent)) {
+ *size = edata_size_get(edata);
+ if (!edata_slab_get(edata)) {
*nfree = *bin_nfree = *bin_nregs = 0;
*nregs = 1;
*slabcur_addr = NULL;
return;
}
- *nfree = extent_nfree_get(extent);
- const szind_t szind = extent_szind_get(extent);
+ *nfree = edata_nfree_get(edata);
+ const szind_t szind = edata_szind_get(edata);
*nregs = bin_infos[szind].nregs;
assert(*nfree <= *nregs);
- assert(*nfree * extent_usize_get(extent) <= *size);
+ assert(*nfree * edata_usize_get(edata) <= *size);
const arena_t *arena = (arena_t *)atomic_load_p(
- &arenas[extent_arena_ind_get(extent)], ATOMIC_RELAXED);
+ &arenas[edata_arena_ind_get(edata)], ATOMIC_RELAXED);
assert(arena != NULL);
- const unsigned binshard = extent_binshard_get(extent);
+ const unsigned binshard = edata_binshard_get(edata);
bin_t *bin = &arena->bins[szind].bin_shards[binshard];
malloc_mutex_lock(tsdn, &bin->lock);
@@ -66,12 +66,12 @@ inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
} else {
*bin_nfree = *bin_nregs = 0;
}
- extent_t *slab;
+ edata_t *slab;
if (bin->slabcur != NULL) {
slab = bin->slabcur;
} else {
- slab = extent_heap_first(&bin->slabs_nonfull);
+ slab = edata_heap_first(&bin->slabs_nonfull);
}
- *slabcur_addr = slab != NULL ? extent_addr_get(slab) : NULL;
+ *slabcur_addr = slab != NULL ? edata_addr_get(slab) : NULL;
malloc_mutex_unlock(tsdn, &bin->lock);
}
diff --git a/src/large.c b/src/large.c
index 6fd21be..67b4745 100644
--- a/src/large.c
+++ b/src/large.c
@@ -21,7 +21,7 @@ void *
large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero) {
size_t ausize;
- extent_t *extent;
+ edata_t *edata;
bool is_zeroed;
UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
@@ -44,28 +44,28 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
if (likely(!tsdn_null(tsdn))) {
arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize);
}
- if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn,
+ if (unlikely(arena == NULL) || (edata = arena_extent_alloc_large(tsdn,
arena, usize, alignment, &is_zeroed)) == NULL) {
return NULL;
}
/* See comments in arena_bin_slabs_full_insert(). */
if (!arena_is_auto(arena)) {
- /* Insert extent into large. */
+ /* Insert edata into large. */
malloc_mutex_lock(tsdn, &arena->large_mtx);
- extent_list_append(&arena->large, extent);
+ edata_list_append(&arena->large, edata);
malloc_mutex_unlock(tsdn, &arena->large_mtx);
}
if (zero) {
assert(is_zeroed);
} else if (config_fill && unlikely(opt_junk_alloc)) {
- memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK,
- extent_usize_get(extent));
+ memset(edata_addr_get(edata), JEMALLOC_ALLOC_JUNK,
+ edata_usize_get(edata));
}
arena_decay_tick(tsdn, arena);
- return extent_addr_get(extent);
+ return edata_addr_get(edata);
}
static void
@@ -90,11 +90,11 @@ large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk =
large_dalloc_maybe_junk_impl;
static bool
-large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
- arena_t *arena = arena_get_from_extent(extent);
- size_t oldusize = extent_usize_get(extent);
+large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) {
+ arena_t *arena = arena_get_from_edata(edata);
+ size_t oldusize = edata_usize_get(edata);
ehooks_t *ehooks = arena_get_ehooks(arena);
- size_t diff = extent_size_get(extent) - (usize + sz_large_pad);
+ size_t diff = edata_size_get(edata) - (usize + sz_large_pad);
assert(oldusize > usize);
@@ -104,31 +104,31 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
/* Split excess pages. */
if (diff != 0) {
- extent_t *trail = extent_split_wrapper(tsdn, arena,
- ehooks, extent, usize + sz_large_pad, sz_size2index(usize),
+ edata_t *trail = extent_split_wrapper(tsdn, arena,
+ ehooks, edata, usize + sz_large_pad, sz_size2index(usize),
false, diff, SC_NSIZES, false);
if (trail == NULL) {
return true;
}
if (config_fill && unlikely(opt_junk_free)) {
- large_dalloc_maybe_junk(extent_addr_get(trail),
- extent_size_get(trail));
+ large_dalloc_maybe_junk(edata_addr_get(trail),
+ edata_size_get(trail));
}
arena_extents_dirty_dalloc(tsdn, arena, ehooks, trail);
}
- arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize);
+ arena_extent_ralloc_large_shrink(tsdn, arena, edata, oldusize);
return false;
}
static bool
-large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
+large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
bool zero) {
- arena_t *arena = arena_get_from_extent(extent);
- size_t oldusize = extent_usize_get(extent);
+ arena_t *arena = arena_get_from_edata(edata);
+ size_t oldusize = edata_usize_get(edata);
ehooks_t *ehooks = arena_get_ehooks(arena);
size_t trailsize = usize - oldusize;
@@ -147,20 +147,20 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
*/
bool is_zeroed_trail = zero;
bool commit = true;
- extent_t *trail;
+ edata_t *trail;
bool new_mapping;
if ((trail = extents_alloc(tsdn, arena, ehooks, &arena->eset_dirty,
- extent_past_get(extent), trailsize, 0, CACHELINE, false, SC_NSIZES,
+ edata_past_get(edata), trailsize, 0, CACHELINE, false, SC_NSIZES,
&is_zeroed_trail, &commit)) != NULL
|| (trail = extents_alloc(tsdn, arena, ehooks, &arena->eset_muzzy,
- extent_past_get(extent), trailsize, 0, CACHELINE, false, SC_NSIZES,
+ edata_past_get(edata), trailsize, 0, CACHELINE, false, SC_NSIZES,
&is_zeroed_trail, &commit)) != NULL) {
if (config_stats) {
new_mapping = false;
}
} else {
if ((trail = extent_alloc_wrapper(tsdn, arena, ehooks,
- extent_past_get(extent), trailsize, 0, CACHELINE, false,
+ edata_past_get(edata), trailsize, 0, CACHELINE, false,
SC_NSIZES, &is_zeroed_trail, &commit)) == NULL) {
return true;
}
@@ -169,16 +169,16 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
}
}
- if (extent_merge_wrapper(tsdn, arena, ehooks, extent, trail)) {
+ if (extent_merge_wrapper(tsdn, arena, ehooks, edata, trail)) {
extent_dalloc_wrapper(tsdn, arena, ehooks, trail);
return true;
}
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
szind_t szind = sz_size2index(usize);
- extent_szind_set(extent, szind);
+ edata_szind_set(edata, szind);
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_addr_get(extent), szind, false);
+ (uintptr_t)edata_addr_get(edata), szind, false);
if (config_stats && new_mapping) {
arena_stats_mapped_add(tsdn, &arena->stats, trailsize);
@@ -194,7 +194,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
* of CACHELINE in [0 .. PAGE).
*/
void *zbase = (void *)
- ((uintptr_t)extent_addr_get(extent) + oldusize);
+ ((uintptr_t)edata_addr_get(edata) + oldusize);
void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
PAGE));
size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
@@ -203,19 +203,19 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
}
assert(is_zeroed_trail);
} else if (config_fill && unlikely(opt_junk_alloc)) {
- memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize),
+ memset((void *)((uintptr_t)edata_addr_get(edata) + oldusize),
JEMALLOC_ALLOC_JUNK, usize - oldusize);
}
- arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize);
+ arena_extent_ralloc_large_expand(tsdn, arena, edata, oldusize);
return false;
}
bool
-large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
+large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
size_t usize_max, bool zero) {
- size_t oldusize = extent_usize_get(extent);
+ size_t oldusize = edata_usize_get(edata);
/* The following should have been caught by callers. */
assert(usize_min > 0 && usize_max <= SC_LARGE_MAXCLASS);
@@ -225,16 +225,15 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
if (usize_max > oldusize) {
/* Attempt to expand the allocation in-place. */
- if (!large_ralloc_no_move_expand(tsdn, extent, usize_max,
+ if (!large_ralloc_no_move_expand(tsdn, edata, usize_max,
zero)) {
- arena_decay_tick(tsdn, arena_get_from_extent(extent));
+ arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
/* Try again, this time with usize_min. */
if (usize_min < usize_max && usize_min > oldusize &&
- large_ralloc_no_move_expand(tsdn, extent, usize_min,
- zero)) {
- arena_decay_tick(tsdn, arena_get_from_extent(extent));
+ large_ralloc_no_move_expand(tsdn, edata, usize_min, zero)) {
+ arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
}
@@ -244,14 +243,14 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
* the new size.
*/
if (oldusize >= usize_min && oldusize <= usize_max) {
- arena_decay_tick(tsdn, arena_get_from_extent(extent));
+ arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
/* Attempt to shrink the allocation in-place. */
if (oldusize > usize_max) {
- if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
- arena_decay_tick(tsdn, arena_get_from_extent(extent));
+ if (!large_ralloc_no_move_shrink(tsdn, edata, usize_max)) {
+ arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
}
@@ -271,9 +270,9 @@ void *
large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
size_t alignment, bool zero, tcache_t *tcache,
hook_ralloc_args_t *hook_args) {
- extent_t *extent = iealloc(tsdn, ptr);
+ edata_t *edata = iealloc(tsdn, ptr);
- size_t oldusize = extent_usize_get(extent);
+ size_t oldusize = edata_usize_get(edata);
/* The following should have been caught by callers. */
assert(usize > 0 && usize <= SC_LARGE_MAXCLASS);
/* Both allocation sizes must be large to avoid a move. */
@@ -281,11 +280,11 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
&& usize >= SC_LARGE_MINCLASS);
/* Try to avoid moving the allocation. */
- if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) {
+ if (!large_ralloc_no_move(tsdn, edata, usize, usize, zero)) {
hook_invoke_expand(hook_args->is_realloc
? hook_expand_realloc : hook_expand_rallocx, ptr, oldusize,
usize, (uintptr_t)ptr, hook_args->args);
- return extent_addr_get(extent);
+ return edata_addr_get(edata);
}
/*
@@ -306,8 +305,8 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
size_t copysize = (usize < oldusize) ? usize : oldusize;
- memcpy(ret, extent_addr_get(extent), copysize);
- isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true);
+ memcpy(ret, edata_addr_get(edata), copysize);
+ isdalloct(tsdn, edata_addr_get(edata), oldusize, tcache, NULL, true);
return ret;
}
@@ -316,76 +315,75 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
* whether the arena's large_mtx is currently held.
*/
static void
-large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
bool junked_locked) {
if (!junked_locked) {
/* See comments in arena_bin_slabs_full_insert(). */
if (!arena_is_auto(arena)) {
malloc_mutex_lock(tsdn, &arena->large_mtx);
- extent_list_remove(&arena->large, extent);
+ edata_list_remove(&arena->large, edata);
malloc_mutex_unlock(tsdn, &arena->large_mtx);
}
- large_dalloc_maybe_junk(extent_addr_get(extent),
- extent_usize_get(extent));
+ large_dalloc_maybe_junk(edata_addr_get(edata),
+ edata_usize_get(edata));
} else {
/* Only hold the large_mtx if necessary. */
if (!arena_is_auto(arena)) {
malloc_mutex_assert_owner(tsdn, &arena->large_mtx);
- extent_list_remove(&arena->large, extent);
+ edata_list_remove(&arena->large, edata);
}
}
- arena_extent_dalloc_large_prep(tsdn, arena, extent);
+ arena_extent_dalloc_large_prep(tsdn, arena, edata);
}
static void
-large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
+large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
ehooks_t *ehooks = arena_get_ehooks(arena);
- arena_extents_dirty_dalloc(tsdn, arena, ehooks, extent);
+ arena_extents_dirty_dalloc(tsdn, arena, ehooks, edata);
}
void
-large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) {
- large_dalloc_prep_impl(tsdn, arena_get_from_extent(extent), extent,
- true);
+large_dalloc_prep_junked_locked(tsdn_t *tsdn, edata_t *edata) {
+ large_dalloc_prep_impl(tsdn, arena_get_from_edata(edata), edata, true);
}
void
-large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) {
- large_dalloc_finish_impl(tsdn, arena_get_from_extent(extent), extent);
+large_dalloc_finish(tsdn_t *tsdn, edata_t *edata) {
+ large_dalloc_finish_impl(tsdn, arena_get_from_edata(edata), edata);
}
void
-large_dalloc(tsdn_t *tsdn, extent_t *extent) {
- arena_t *arena = arena_get_from_extent(extent);
- large_dalloc_prep_impl(tsdn, arena, extent, false);
- large_dalloc_finish_impl(tsdn, arena, extent);
+large_dalloc(tsdn_t *tsdn, edata_t *edata) {
+ arena_t *arena = arena_get_from_edata(edata);
+ large_dalloc_prep_impl(tsdn, arena, edata, false);
+ large_dalloc_finish_impl(tsdn, arena, edata);
arena_decay_tick(tsdn, arena);
}
size_t
-large_salloc(tsdn_t *tsdn, const extent_t *extent) {
- return extent_usize_get(extent);
+large_salloc(tsdn_t *tsdn, const edata_t *edata) {
+ return edata_usize_get(edata);
}
void
-large_prof_info_get(const extent_t *extent, prof_info_t *prof_info) {
- extent_prof_info_get(extent, prof_info);
+large_prof_info_get(const edata_t *edata, prof_info_t *prof_info) {
+ edata_prof_info_get(edata, prof_info);
}
static void
-large_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
- extent_prof_tctx_set(extent, tctx);
+large_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) {
+ edata_prof_tctx_set(edata, tctx);
}
void
-large_prof_tctx_reset(extent_t *extent) {
- large_prof_tctx_set(extent, (prof_tctx_t *)(uintptr_t)1U);
+large_prof_tctx_reset(edata_t *edata) {
+ large_prof_tctx_set(edata, (prof_tctx_t *)(uintptr_t)1U);
}
void
-large_prof_info_set(extent_t *extent, prof_tctx_t *tctx) {
- large_prof_tctx_set(extent, tctx);
+large_prof_info_set(edata_t *edata, prof_tctx_t *tctx) {
+ large_prof_tctx_set(edata, tctx);
nstime_t t;
nstime_init_update(&t);
- extent_prof_alloc_time_set(extent, &t);
+ edata_prof_alloc_time_set(edata, &t);
}
diff --git a/src/tcache.c b/src/tcache.c
index 7922e59..0a511e2 100644
--- a/src/tcache.c
+++ b/src/tcache.c
@@ -114,8 +114,8 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
/* Enabled with --enable-extra-size-check. */
static void
-tbin_extents_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind,
- size_t nflush, extent_t **extents){
+tbin_edatas_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind,
+ size_t nflush, edata_t **edatas){
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
@@ -129,9 +129,9 @@ tbin_extents_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind,
size_t sz_sum = binind * nflush;
void **bottom_item = cache_bin_bottom_item_get(tbin, binind);
for (unsigned i = 0 ; i < nflush; i++) {
- rtree_extent_szind_read(tsdn, &extents_rtree,
+ rtree_edata_szind_read(tsdn, &extents_rtree,
rtree_ctx, (uintptr_t)*(bottom_item - i), true,
- &extents[i], &szind);
+ &edatas[i], &szind);
sz_sum -= szind;
}
if (sz_sum != 0) {
@@ -154,26 +154,26 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
arena_t *arena = tcache->arena;
assert(arena != NULL);
unsigned nflush = ncached - rem;
- VARIABLE_ARRAY(extent_t *, item_extent, nflush);
+ VARIABLE_ARRAY(edata_t *, item_edata, nflush);
void **bottom_item = cache_bin_bottom_item_get(tbin, binind);
- /* Look up extent once per item. */
+ /* Look up edata once per item. */
if (config_opt_safety_checks) {
- tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind,
- nflush, item_extent);
+ tbin_edatas_lookup_size_check(tsd_tsdn(tsd), tbin, binind,
+ nflush, item_edata);
} else {
for (unsigned i = 0 ; i < nflush; i++) {
- item_extent[i] = iealloc(tsd_tsdn(tsd),
+ item_edata[i] = iealloc(tsd_tsdn(tsd),
*(bottom_item - i));
}
}
while (nflush > 0) {
/* Lock the arena bin associated with the first object. */
- extent_t *extent = item_extent[0];
- unsigned bin_arena_ind = extent_arena_ind_get(extent);
+ edata_t *edata = item_edata[0];
+ unsigned bin_arena_ind = edata_arena_ind_get(edata);
arena_t *bin_arena = arena_get(tsd_tsdn(tsd), bin_arena_ind,
false);
- unsigned binshard = extent_binshard_get(extent);
+ unsigned binshard = edata_binshard_get(edata);
assert(binshard < bin_infos[binind].n_shards);
bin_t *bin = &bin_arena->bins[binind].bin_shards[binshard];
@@ -187,13 +187,13 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
unsigned ndeferred = 0;
for (unsigned i = 0; i < nflush; i++) {
void *ptr = *(bottom_item - i);
- extent = item_extent[i];
- assert(ptr != NULL && extent != NULL);
+ edata = item_edata[i];
+ assert(ptr != NULL && edata != NULL);
- if (extent_arena_ind_get(extent) == bin_arena_ind
- && extent_binshard_get(extent) == binshard) {
+ if (edata_arena_ind_get(edata) == bin_arena_ind
+ && edata_binshard_get(edata) == binshard) {
arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
- bin_arena, bin, binind, extent, ptr);
+ bin_arena, bin, binind, edata, ptr);
} else {
/*
* This object was allocated via a different
@@ -202,7 +202,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
* handled in a future pass.
*/
*(bottom_item - ndeferred) = ptr;
- item_extent[ndeferred] = extent;
+ item_edata[ndeferred] = edata;
ndeferred++;
}
}
@@ -244,22 +244,22 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t
arena_t *tcache_arena = tcache->arena;
assert(tcache_arena != NULL);
unsigned nflush = ncached - rem;
- VARIABLE_ARRAY(extent_t *, item_extent, nflush);
+ VARIABLE_ARRAY(edata_t *, item_edata, nflush);
void **bottom_item = cache_bin_bottom_item_get(tbin, binind);
#ifndef JEMALLOC_EXTRA_SIZE_CHECK
- /* Look up extent once per item. */
+ /* Look up edata once per item. */
for (unsigned i = 0 ; i < nflush; i++) {
- item_extent[i] = iealloc(tsd_tsdn(tsd), *(bottom_item - i));
+ item_edata[i] = iealloc(tsd_tsdn(tsd), *(bottom_item - i));
}
#else
tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, nflush,
- item_extent);
+ item_edata);
#endif
while (nflush > 0) {
/* Lock the arena associated with the first object. */
- extent_t *extent = item_extent[0];
- unsigned locked_arena_ind = extent_arena_ind_get(extent);
+ edata_t *edata = item_edata[0];
+ unsigned locked_arena_ind = edata_arena_ind_get(edata);
arena_t *locked_arena = arena_get(tsd_tsdn(tsd),
locked_arena_ind, false);
@@ -270,10 +270,10 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t
for (unsigned i = 0; i < nflush; i++) {
void *ptr = *(bottom_item - i);
assert(ptr != NULL);
- extent = item_extent[i];
- if (extent_arena_ind_get(extent) == locked_arena_ind) {
+ edata = item_edata[i];
+ if (edata_arena_ind_get(edata) == locked_arena_ind) {
large_dalloc_prep_junked_locked(tsd_tsdn(tsd),
- extent);
+ edata);
}
}
if ((config_prof || config_stats) &&
@@ -293,11 +293,11 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t
unsigned ndeferred = 0;
for (unsigned i = 0; i < nflush; i++) {
void *ptr = *(bottom_item - i);
- extent = item_extent[i];
- assert(ptr != NULL && extent != NULL);
+ edata = item_edata[i];
+ assert(ptr != NULL && edata != NULL);
- if (extent_arena_ind_get(extent) == locked_arena_ind) {
- large_dalloc_finish(tsd_tsdn(tsd), extent);
+ if (edata_arena_ind_get(edata) == locked_arena_ind) {
+ large_dalloc_finish(tsd_tsdn(tsd), edata);
} else {
/*
* This object was allocated via a different
@@ -306,7 +306,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t
* in a future pass.
*/
*(bottom_item - ndeferred) = ptr;
- item_extent[ndeferred] = extent;
+ item_edata[ndeferred] = edata;
ndeferred++;
}
}
diff --git a/test/unit/arena_reset.c b/test/unit/arena_reset.c
index b182f31..854799d 100644
--- a/test/unit/arena_reset.c
+++ b/test/unit/arena_reset.c
@@ -63,17 +63,17 @@ vsalloc(tsdn_t *tsdn, const void *ptr) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- extent_t *extent;
+ edata_t *edata;
szind_t szind;
- if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, false, &extent, &szind)) {
+ if (rtree_edata_szind_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, false, &edata, &szind)) {
return 0;
}
- if (extent == NULL) {
+ if (edata == NULL) {
return 0;
}
- if (extent_state_get(extent) != extent_state_active) {
+ if (edata_state_get(edata) != extent_state_active) {
return 0;
}
diff --git a/test/unit/base.c b/test/unit/base.c
index 7ced15f..3b848ca 100644
--- a/test/unit/base.c
+++ b/test/unit/base.c
@@ -168,14 +168,14 @@ TEST_BEGIN(test_base_hooks_not_null) {
* that the first block's remaining space is considered for subsequent
* allocation.
*/
- assert_zu_ge(extent_bsize_get(&base->blocks->extent), QUANTUM,
+ assert_zu_ge(edata_bsize_get(&base->blocks->edata), QUANTUM,
"Remainder insufficient for test");
/* Use up all but one quantum of block. */
- while (extent_bsize_get(&base->blocks->extent) > QUANTUM) {
+ while (edata_bsize_get(&base->blocks->edata) > QUANTUM) {
p = base_alloc(tsdn, base, QUANTUM, QUANTUM);
assert_ptr_not_null(p, "Unexpected base_alloc() failure");
}
- r_exp = extent_addr_get(&base->blocks->extent);
+ r_exp = edata_addr_get(&base->blocks->edata);
assert_zu_eq(base->extent_sn_next, 1, "One extant block expected");
q = base_alloc(tsdn, base, QUANTUM + 1, QUANTUM);
assert_ptr_not_null(q, "Unexpected base_alloc() failure");
diff --git a/test/unit/binshard.c b/test/unit/binshard.c
index d7a8df8..d9a0d59 100644
--- a/test/unit/binshard.c
+++ b/test/unit/binshard.c
@@ -53,7 +53,7 @@ TEST_END
static void *
thd_start(void *varg) {
void *ptr, *ptr2;
- extent_t *extent;
+ edata_t *edata;
unsigned shard1, shard2;
tsdn_t *tsdn = tsdn_fetch();
@@ -62,13 +62,13 @@ thd_start(void *varg) {
ptr = mallocx(1, MALLOCX_TCACHE_NONE);
ptr2 = mallocx(129, MALLOCX_TCACHE_NONE);
- extent = iealloc(tsdn, ptr);
- shard1 = extent_binshard_get(extent);
+ edata = iealloc(tsdn, ptr);
+ shard1 = edata_binshard_get(edata);
dallocx(ptr, 0);
assert_u_lt(shard1, 16, "Unexpected bin shard used");
- extent = iealloc(tsdn, ptr2);
- shard2 = extent_binshard_get(extent);
+ edata = iealloc(tsdn, ptr2);
+ shard2 = edata_binshard_get(edata);
dallocx(ptr2, 0);
assert_u_lt(shard2, 4, "Unexpected bin shard used");
diff --git a/test/unit/rtree.c b/test/unit/rtree.c
index 9105e3e..2477db0 100644
--- a/test/unit/rtree.c
+++ b/test/unit/rtree.c
@@ -75,8 +75,8 @@ TEST_BEGIN(test_rtree_read_empty) {
rtree_ctx_t rtree_ctx;
rtree_ctx_data_init(&rtree_ctx);
assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
- assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, PAGE,
- false), "rtree_extent_read() should return NULL for empty tree");
+ assert_ptr_null(rtree_edata_read(tsdn, rtree, &rtree_ctx, PAGE,
+ false), "rtree_edata_read() should return NULL for empty tree");
rtree_delete(tsdn, rtree);
}
TEST_END
@@ -86,11 +86,11 @@ TEST_END
#undef SEED
TEST_BEGIN(test_rtree_extrema) {
- extent_t extent_a, extent_b;
- extent_init(&extent_a, INVALID_ARENA_IND, NULL, SC_LARGE_MINCLASS,
+ edata_t edata_a, edata_b;
+ edata_init(&edata_a, INVALID_ARENA_IND, NULL, SC_LARGE_MINCLASS,
false, sz_size2index(SC_LARGE_MINCLASS), 0,
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
- extent_init(&extent_b, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
+ edata_init(&edata_b, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
tsdn_t *tsdn = tsdn_fetch();
@@ -100,21 +100,21 @@ TEST_BEGIN(test_rtree_extrema) {
rtree_ctx_data_init(&rtree_ctx);
assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
- assert_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, &extent_a,
- extent_szind_get(&extent_a), extent_slab_get(&extent_a)),
+ assert_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, &edata_a,
+ edata_szind_get(&edata_a), edata_slab_get(&edata_a)),
"Unexpected rtree_write() failure");
rtree_szind_slab_update(tsdn, rtree, &rtree_ctx, PAGE,
- extent_szind_get(&extent_a), extent_slab_get(&extent_a));
- assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, PAGE, true),
- &extent_a,
- "rtree_extent_read() should return previously set value");
+ edata_szind_get(&edata_a), edata_slab_get(&edata_a));
+ assert_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx, PAGE, true),
+ &edata_a,
+ "rtree_edata_read() should return previously set value");
assert_false(rtree_write(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0),
- &extent_b, extent_szind_get_maybe_invalid(&extent_b),
- extent_slab_get(&extent_b)), "Unexpected rtree_write() failure");
- assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
- ~((uintptr_t)0), true), &extent_b,
- "rtree_extent_read() should return previously set value");
+ &edata_b, edata_szind_get_maybe_invalid(&edata_b),
+ edata_slab_get(&edata_b)), "Unexpected rtree_write() failure");
+ assert_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx,
+ ~((uintptr_t)0), true), &edata_b,
+ "rtree_edata_read() should return previously set value");
rtree_delete(tsdn, rtree);
}
@@ -126,8 +126,8 @@ TEST_BEGIN(test_rtree_bits) {
uintptr_t keys[] = {PAGE, PAGE + 1,
PAGE + (((uintptr_t)1) << LG_PAGE) - 1};
- extent_t extent;
- extent_init(&extent, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
+ edata_t edata;
+ edata_init(&edata, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
rtree_t *rtree = &test_rtree;
@@ -137,17 +137,17 @@ TEST_BEGIN(test_rtree_bits) {
for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) {
assert_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i],
- &extent, SC_NSIZES, false),
+ &edata, SC_NSIZES, false),
"Unexpected rtree_write() failure");
for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
- assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
- keys[j], true), &extent,
- "rtree_extent_read() should return previously set "
+ assert_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx,
+ keys[j], true), &edata,
+ "rtree_edata_read() should return previously set "
"value and ignore insignificant key bits; i=%u, "
"j=%u, set key=%#"FMTxPTR", get key=%#"FMTxPTR, i,
j, keys[i], keys[j]);
}
- assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx,
+ assert_ptr_null(rtree_edata_read(tsdn, rtree, &rtree_ctx,
(((uintptr_t)2) << LG_PAGE), false),
"Only leftmost rtree leaf should be set; i=%u", i);
rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]);
@@ -167,8 +167,8 @@ TEST_BEGIN(test_rtree_random) {
rtree_ctx_t rtree_ctx;
rtree_ctx_data_init(&rtree_ctx);
- extent_t extent;
- extent_init(&extent, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
+ edata_t edata;
+ edata_init(&edata, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
@@ -179,29 +179,29 @@ TEST_BEGIN(test_rtree_random) {
&rtree_ctx, keys[i], false, true);
assert_ptr_not_null(elm,
"Unexpected rtree_leaf_elm_lookup() failure");
- rtree_leaf_elm_write(tsdn, rtree, elm, &extent, SC_NSIZES,
+ rtree_leaf_elm_write(tsdn, rtree, elm, &edata, SC_NSIZES,
false);
- assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
- keys[i], true), &extent,
- "rtree_extent_read() should return previously set value");
+ assert_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx,
+ keys[i], true), &edata,
+ "rtree_edata_read() should return previously set value");
}
for (unsigned i = 0; i < NSET; i++) {
- assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
- keys[i], true), &extent,
- "rtree_extent_read() should return previously set value, "
+ assert_ptr_eq(rtree_edata_read(tsdn, rtree, &rtree_ctx,
+ keys[i], true), &edata,
+ "rtree_edata_read() should return previously set value, "
"i=%u", i);
}
for (unsigned i = 0; i < NSET; i++) {
rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]);
- assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx,
+ assert_ptr_null(rtree_edata_read(tsdn, rtree, &rtree_ctx,
keys[i], true),
- "rtree_extent_read() should return previously set value");
+ "rtree_edata_read() should return previously set value");
}
for (unsigned i = 0; i < NSET; i++) {
- assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx,
+ assert_ptr_null(rtree_edata_read(tsdn, rtree, &rtree_ctx,
keys[i], true),
- "rtree_extent_read() should return previously set value");
+ "rtree_edata_read() should return previously set value");
}
rtree_delete(tsdn, rtree);
diff --git a/test/unit/slab.c b/test/unit/slab.c
index bcc752e..5d2b35f 100644
--- a/test/unit/slab.c
+++ b/test/unit/slab.c
@@ -7,24 +7,24 @@ TEST_BEGIN(test_arena_slab_regind) {
for (binind = 0; binind < SC_NBINS; binind++) {
size_t regind;
- extent_t slab;
+ edata_t slab;
const bin_info_t *bin_info = &bin_infos[binind];
- extent_init(&slab, INVALID_ARENA_IND,
+ edata_init(&slab, INVALID_ARENA_IND,
mallocx(bin_info->slab_size, MALLOCX_LG_ALIGN(LG_PAGE)),
bin_info->slab_size, true,
binind, 0, extent_state_active, false, true, true,
EXTENT_NOT_HEAD);
- assert_ptr_not_null(extent_addr_get(&slab),
+ assert_ptr_not_null(edata_addr_get(&slab),
"Unexpected malloc() failure");
for (regind = 0; regind < bin_info->nregs; regind++) {
- void *reg = (void *)((uintptr_t)extent_addr_get(&slab) +
+ void *reg = (void *)((uintptr_t)edata_addr_get(&slab) +
(bin_info->reg_size * regind));
assert_zu_eq(arena_slab_regind(&slab, binind, reg),
regind,
"Incorrect region index computed for size %zu",
bin_info->reg_size);
}
- free(extent_addr_get(&slab));
+ free(edata_addr_get(&slab));
}
}
TEST_END