summaryrefslogtreecommitdiffstats
path: root/include/jemalloc
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2017-03-24 22:22:26 (GMT)
committerJason Evans <jasone@canonware.com>2017-03-26 06:30:13 (GMT)
commit735ad8210c93185b36a36ec4740985681004ce25 (patch)
treebed94ec607e245dca0419353b7db37db89d74bfc /include/jemalloc
parent0591c204b468e7b273c2f3f94f488cffbe8d7a74 (diff)
downloadjemalloc-735ad8210c93185b36a36ec4740985681004ce25.zip
jemalloc-735ad8210c93185b36a36ec4740985681004ce25.tar.gz
jemalloc-735ad8210c93185b36a36ec4740985681004ce25.tar.bz2
Pack various extent_t fields into a bitfield.
This reduces sizeof(extent_t) from 160 to 136 on x64.
Diffstat (limited to 'include/jemalloc')
-rw-r--r--include/jemalloc/internal/extent_inlines.h144
-rw-r--r--include/jemalloc/internal/extent_structs.h115
2 files changed, 155 insertions, 104 deletions
diff --git a/include/jemalloc/internal/extent_inlines.h b/include/jemalloc/internal/extent_inlines.h
index f86822d..22229b5 100644
--- a/include/jemalloc/internal/extent_inlines.h
+++ b/include/jemalloc/internal/extent_inlines.h
@@ -3,20 +3,20 @@
#ifndef JEMALLOC_ENABLE_INLINE
arena_t *extent_arena_get(const extent_t *extent);
-void *extent_base_get(const extent_t *extent);
-void *extent_addr_get(const extent_t *extent);
-size_t extent_size_get(const extent_t *extent);
szind_t extent_szind_get_maybe_invalid(const extent_t *extent);
szind_t extent_szind_get(const extent_t *extent);
size_t extent_usize_get(const extent_t *extent);
-void *extent_before_get(const extent_t *extent);
-void *extent_last_get(const extent_t *extent);
-void *extent_past_get(const extent_t *extent);
size_t extent_sn_get(const extent_t *extent);
extent_state_t extent_state_get(const extent_t *extent);
bool extent_zeroed_get(const extent_t *extent);
bool extent_committed_get(const extent_t *extent);
bool extent_slab_get(const extent_t *extent);
+void *extent_base_get(const extent_t *extent);
+void *extent_addr_get(const extent_t *extent);
+size_t extent_size_get(const extent_t *extent);
+void *extent_before_get(const extent_t *extent);
+void *extent_last_get(const extent_t *extent);
+void *extent_past_get(const extent_t *extent);
arena_slab_data_t *extent_slab_data_get(extent_t *extent);
const arena_slab_data_t *extent_slab_data_get_const(const extent_t *extent);
prof_tctx_t *extent_prof_tctx_get(const extent_t *extent);
@@ -49,32 +49,25 @@ int extent_snad_comp(const extent_t *a, const extent_t *b);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
JEMALLOC_INLINE arena_t *
extent_arena_get(const extent_t *extent) {
- return arenas[extent->e_arena_ind];
-}
-
-JEMALLOC_INLINE void *
-extent_base_get(const extent_t *extent) {
- assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
- !extent->e_slab);
- return PAGE_ADDR2BASE(extent->e_addr);
-}
-
-JEMALLOC_INLINE void *
-extent_addr_get(const extent_t *extent) {
- assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
- !extent->e_slab);
- return extent->e_addr;
-}
-
-JEMALLOC_INLINE size_t
-extent_size_get(const extent_t *extent) {
- return extent->e_size;
+ unsigned arena_ind = (unsigned)((extent->e_bits &
+ EXTENT_BITS_ARENA_MASK) >> EXTENT_BITS_ARENA_SHIFT);
+ /*
+ * The following check is omitted because we should never actually read
+ * a NULL arena pointer.
+ */
+ if (false && arena_ind > MALLOCX_ARENA_MAX) {
+ return NULL;
+ }
+ assert(arena_ind <= MALLOCX_ARENA_MAX);
+ return arenas[arena_ind];
}
JEMALLOC_INLINE szind_t
extent_szind_get_maybe_invalid(const extent_t *extent) {
- assert(extent->e_szind <= NSIZES);
- return extent->e_szind;
+ szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >>
+ EXTENT_BITS_SZIND_SHIFT);
+ assert(szind <= NSIZES);
+ return szind;
}
JEMALLOC_INLINE szind_t
@@ -89,57 +82,81 @@ extent_usize_get(const extent_t *extent) {
return index2size(extent_szind_get(extent));
}
-JEMALLOC_INLINE void *
-extent_before_get(const extent_t *extent) {
- return (void *)((uintptr_t)extent_base_get(extent) - PAGE);
-}
-
-JEMALLOC_INLINE void *
-extent_last_get(const extent_t *extent) {
- return (void *)((uintptr_t)extent_base_get(extent) +
- extent_size_get(extent) - PAGE);
-}
-
-JEMALLOC_INLINE void *
-extent_past_get(const extent_t *extent) {
- return (void *)((uintptr_t)extent_base_get(extent) +
- extent_size_get(extent));
-}
-
JEMALLOC_INLINE size_t
extent_sn_get(const extent_t *extent) {
- return extent->e_sn;
+ return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >>
+ EXTENT_BITS_SN_SHIFT);
}
JEMALLOC_INLINE extent_state_t
extent_state_get(const extent_t *extent) {
- return extent->e_state;
+ return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK) >>
+ EXTENT_BITS_STATE_SHIFT);
}
JEMALLOC_INLINE bool
extent_zeroed_get(const extent_t *extent) {
- return extent->e_zeroed;
+ return (bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK) >>
+ EXTENT_BITS_ZEROED_SHIFT);
}
JEMALLOC_INLINE bool
extent_committed_get(const extent_t *extent) {
- return extent->e_committed;
+ return (bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK) >>
+ EXTENT_BITS_COMMITTED_SHIFT);
}
JEMALLOC_INLINE bool
extent_slab_get(const extent_t *extent) {
- return extent->e_slab;
+ return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >>
+ EXTENT_BITS_SLAB_SHIFT);
+}
+
+JEMALLOC_INLINE void *
+extent_base_get(const extent_t *extent) {
+ assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
+ !extent_slab_get(extent));
+ return PAGE_ADDR2BASE(extent->e_addr);
+}
+
+JEMALLOC_INLINE void *
+extent_addr_get(const extent_t *extent) {
+ assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
+ !extent_slab_get(extent));
+ return extent->e_addr;
+}
+
+JEMALLOC_INLINE size_t
+extent_size_get(const extent_t *extent) {
+ return extent->e_size;
+}
+
+JEMALLOC_INLINE void *
+extent_before_get(const extent_t *extent) {
+ return (void *)((uintptr_t)extent_base_get(extent) - PAGE);
+}
+
+JEMALLOC_INLINE void *
+extent_last_get(const extent_t *extent) {
+ return (void *)((uintptr_t)extent_base_get(extent) +
+ extent_size_get(extent) - PAGE);
+}
+
+JEMALLOC_INLINE void *
+extent_past_get(const extent_t *extent) {
+ return (void *)((uintptr_t)extent_base_get(extent) +
+ extent_size_get(extent));
}
JEMALLOC_INLINE arena_slab_data_t *
extent_slab_data_get(extent_t *extent) {
- assert(extent->e_slab);
+ assert(extent_slab_get(extent));
return &extent->e_slab_data;
}
JEMALLOC_INLINE const arena_slab_data_t *
extent_slab_data_get_const(const extent_t *extent) {
- assert(extent->e_slab);
+ assert(extent_slab_get(extent));
return &extent->e_slab_data;
}
@@ -151,7 +168,10 @@ extent_prof_tctx_get(const extent_t *extent) {
JEMALLOC_INLINE void
extent_arena_set(extent_t *extent, arena_t *arena) {
- extent->e_arena_ind = (arena != NULL) ? arena_ind_get(arena) : UINT_MAX;
+ unsigned arena_ind = (arena != NULL) ? arena_ind_get(arena) : ((1U <<
+ MALLOCX_ARENA_BITS) - 1);
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ARENA_MASK) |
+ ((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT);
}
JEMALLOC_INLINE void
@@ -186,32 +206,38 @@ extent_size_set(extent_t *extent, size_t size) {
JEMALLOC_INLINE void
extent_szind_set(extent_t *extent, szind_t szind) {
assert(szind <= NSIZES); /* NSIZES means "invalid". */
- extent->e_szind = szind;
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) |
+ ((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT);
}
JEMALLOC_INLINE void
extent_sn_set(extent_t *extent, size_t sn) {
- extent->e_sn = sn;
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK) |
+ ((uint64_t)sn << EXTENT_BITS_SN_SHIFT);
}
JEMALLOC_INLINE void
extent_state_set(extent_t *extent, extent_state_t state) {
- extent->e_state = state;
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK) |
+ ((uint64_t)state << EXTENT_BITS_STATE_SHIFT);
}
JEMALLOC_INLINE void
extent_zeroed_set(extent_t *extent, bool zeroed) {
- extent->e_zeroed = zeroed;
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK) |
+ ((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT);
}
JEMALLOC_INLINE void
extent_committed_set(extent_t *extent, bool committed) {
- extent->e_committed = committed;
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK) |
+ ((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT);
}
JEMALLOC_INLINE void
extent_slab_set(extent_t *extent, bool slab) {
- extent->e_slab = slab;
+ extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) |
+ ((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT);
}
JEMALLOC_INLINE void
diff --git a/include/jemalloc/internal/extent_structs.h b/include/jemalloc/internal/extent_structs.h
index 1b2b4bc..ddc0408 100644
--- a/include/jemalloc/internal/extent_structs.h
+++ b/include/jemalloc/internal/extent_structs.h
@@ -10,59 +10,84 @@ typedef enum {
/* Extent (span of pages). Use accessor functions for e_* fields. */
struct extent_s {
- /* Arena from which this extent came, or UINT_MAX if unassociated. */
- unsigned e_arena_ind;
-
- /* Pointer to the extent that this structure is responsible for. */
- void *e_addr;
-
- /* Extent size. */
- size_t e_size;
-
/*
- * Usable size class index for allocations residing in this extent,
- * regardless of whether the extent is a slab. Extent size and usable
- * size often differ even for non-slabs, either due to large_pad or
- * promotion of sampled small regions.
- */
- szind_t e_szind;
-
- /*
- * Serial number (potentially non-unique).
+ * Bitfield containing several fields:
+ *
+ * a: arena_ind
+ * b: slab
+ * c: committed
+ * z: zeroed
+ * t: state
+ * i: szind
+ * n: sn
+ *
+ * nnnnnnnn ... nnnnnnni iiiiiiit tzcbaaaa aaaaaaaa
+ *
+ * arena_ind: Arena from which this extent came, or all 1 bits if
+ * unassociated.
+ *
+ * slab: The slab flag indicates whether the extent is used for a slab
+ * of small regions. This helps differentiate small size classes,
+ * and it indicates whether interior pointers can be looked up via
+ * iealloc().
+ *
+ * committed: The committed flag indicates whether physical memory is
+ * committed to the extent, whether explicitly or implicitly
+ * as on a system that overcommits and satisfies physical
+ * memory needs on demand via soft page faults.
+ *
+ * zeroed: The zeroed flag is used by extent recycling code to track
+ * whether memory is zero-filled.
+ *
+ * state: The state flag is an extent_state_t.
*
- * In principle serial numbers can wrap around on 32-bit systems if
- * JEMALLOC_MUNMAP is defined, but as long as comparison functions fall
- * back on address comparison for equal serial numbers, stable (if
- * imperfect) ordering is maintained.
+ * szind: The szind flag indicates usable size class index for
+ * allocations residing in this extent, regardless of whether the
+ * extent is a slab. Extent size and usable size often differ
+ * even for non-slabs, either due to large_pad or promotion of
+ * sampled small regions.
*
- * Serial numbers may not be unique even in the absence of wrap-around,
- * e.g. when splitting an extent and assigning the same serial number to
- * both resulting adjacent extents.
+ * sn: Serial number (potentially non-unique).
+ *
+ * Serial numbers may wrap around if JEMALLOC_MUNMAP is defined, but
+ * as long as comparison functions fall back on address comparison
+ * for equal serial numbers, stable (if imperfect) ordering is
+ * maintained.
+ *
+ * Serial numbers may not be unique even in the absence of
+ * wrap-around, e.g. when splitting an extent and assigning the same
+ * serial number to both resulting adjacent extents.
*/
- size_t e_sn;
+ uint64_t e_bits;
+#define EXTENT_BITS_ARENA_SHIFT 0
+#define EXTENT_BITS_ARENA_MASK \
+ (((1U << MALLOCX_ARENA_BITS) - 1) << EXTENT_BITS_ARENA_SHIFT)
- /* Extent state. */
- extent_state_t e_state;
+#define EXTENT_BITS_SLAB_SHIFT MALLOCX_ARENA_BITS
+#define EXTENT_BITS_SLAB_MASK (0x1U << EXTENT_BITS_SLAB_SHIFT)
- /*
- * The zeroed flag is used by extent recycling code to track whether
- * memory is zero-filled.
- */
- bool e_zeroed;
+#define EXTENT_BITS_COMMITTED_SHIFT (MALLOCX_ARENA_BITS + 1)
+#define EXTENT_BITS_COMMITTED_MASK (0x1U << EXTENT_BITS_COMMITTED_SHIFT)
- /*
- * True if physical memory is committed to the extent, whether
- * explicitly or implicitly as on a system that overcommits and
- * satisfies physical memory needs on demand via soft page faults.
- */
- bool e_committed;
+#define EXTENT_BITS_ZEROED_SHIFT (MALLOCX_ARENA_BITS + 2)
+#define EXTENT_BITS_ZEROED_MASK (0x1U << EXTENT_BITS_ZEROED_SHIFT)
- /*
- * The slab flag indicates whether the extent is used for a slab of
- * small regions. This helps differentiate small size classes, and it
- * indicates whether interior pointers can be looked up via iealloc().
- */
- bool e_slab;
+#define EXTENT_BITS_STATE_SHIFT (MALLOCX_ARENA_BITS + 3)
+#define EXTENT_BITS_STATE_MASK (0x3U << EXTENT_BITS_STATE_SHIFT)
+
+#define EXTENT_BITS_SZIND_SHIFT (MALLOCX_ARENA_BITS + 5)
+#define EXTENT_BITS_SZIND_MASK \
+ (((1U << LG_CEIL_NSIZES) - 1) << EXTENT_BITS_SZIND_SHIFT)
+
+#define EXTENT_BITS_SN_SHIFT \
+ (MALLOCX_ARENA_BITS + 5 + LG_CEIL_NSIZES)
+#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
+
+ /* Pointer to the extent that this structure is responsible for. */
+ void *e_addr;
+
+ /* Extent size. */
+ size_t e_size;
union {
/* Small region slab metadata. */