summaryrefslogtreecommitdiffstats
path: root/include/jemalloc/internal/arena.h
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2012-05-12 00:48:33 (GMT)
committerJason Evans <jasone@canonware.com>2012-05-12 00:48:33 (GMT)
commitfc9b1dbf69f59d7ecfc4ac68da9847e017e1d046 (patch)
tree7f843c7c51cd5df5d3be1ca48f504325f0536c0d /include/jemalloc/internal/arena.h
parentfc1bb70e5f0d9a58b39efa39cc549b5af5104760 (diff)
parentcbb71caceb1e53d0fd21284ce298885327c211b4 (diff)
downloadjemalloc-3.0.0.zip
jemalloc-3.0.0.tar.gz
jemalloc-3.0.0.tar.bz2
Merge branch 'dev'3.0.0
Conflicts: ChangeLog include/jemalloc/internal/chunk.h src/chunk.c src/huge.c src/jemalloc.c test/rallocm.c
Diffstat (limited to 'include/jemalloc/internal/arena.h')
-rw-r--r--include/jemalloc/internal/arena.h744
1 files changed, 488 insertions, 256 deletions
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index b80c118..0b0f640 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -2,41 +2,6 @@
#ifdef JEMALLOC_H_TYPES
/*
- * Subpages are an artificially designated partitioning of pages. Their only
- * purpose is to support subpage-spaced size classes.
- *
- * There must be at least 4 subpages per page, due to the way size classes are
- * handled.
- */
-#define LG_SUBPAGE 8
-#define SUBPAGE ((size_t)(1U << LG_SUBPAGE))
-#define SUBPAGE_MASK (SUBPAGE - 1)
-
-/* Return the smallest subpage multiple that is >= s. */
-#define SUBPAGE_CEILING(s) \
- (((s) + SUBPAGE_MASK) & ~SUBPAGE_MASK)
-
-#ifdef JEMALLOC_TINY
- /* Smallest size class to support. */
-# define LG_TINY_MIN LG_SIZEOF_PTR
-# define TINY_MIN (1U << LG_TINY_MIN)
-#endif
-
-/*
- * Maximum size class that is a multiple of the quantum, but not (necessarily)
- * a power of 2. Above this size, allocations are rounded up to the nearest
- * power of 2.
- */
-#define LG_QSPACE_MAX_DEFAULT 7
-
-/*
- * Maximum size class that is a multiple of the cacheline, but not (necessarily)
- * a power of 2. Above this size, allocations are rounded up to the nearest
- * power of 2.
- */
-#define LG_CSPACE_MAX_DEFAULT 9
-
-/*
* RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized
* as small as possible such that this setting is still honored, without
* violating other constraints. The goal is to make runs as small as possible
@@ -51,7 +16,7 @@
* constraint is relaxed (ignored) for runs that are so small that the
* per-region overhead is greater than:
*
- * (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP))
+ * (RUN_MAX_OVRHD / (reg_interval << (3+RUN_BFP))
*/
#define RUN_BFP 12
/* \/ Implicit binary fixed point. */
@@ -63,6 +28,12 @@
#define RUN_MAXREGS (1U << LG_RUN_MAXREGS)
/*
+ * Minimum redzone size. Redzones may be larger than this if necessary to
+ * preserve region alignment.
+ */
+#define REDZONE_MINSIZE 16
+
+/*
* The minimum ratio of active:dirty pages per arena is computed as:
*
* (nactive >> opt_lg_dirty_mult) >= ndirty
@@ -85,6 +56,15 @@ typedef struct arena_s arena_t;
/* Each element of the chunk map corresponds to one page within the chunk. */
struct arena_chunk_map_s {
+#ifndef JEMALLOC_PROF
+ /*
+ * Overlay prof_ctx in order to allow it to be referenced by dead code.
+ * Such antics aren't warranted for per arena data structures, but
+ * chunk map overhead accounts for a percentage of memory, rather than
+ * being just a fixed cost.
+ */
+ union {
+#endif
union {
/*
* Linkage for run trees. There are two disjoint uses:
@@ -103,22 +83,23 @@ struct arena_chunk_map_s {
ql_elm(arena_chunk_map_t) ql_link;
} u;
-#ifdef JEMALLOC_PROF
/* Profile counters, used for large object runs. */
prof_ctx_t *prof_ctx;
+#ifndef JEMALLOC_PROF
+ }; /* union { ... }; */
#endif
/*
* Run address (or size) and various flags are stored together. The bit
* layout looks like (assuming 32-bit system):
*
- * ???????? ???????? ????---- ----dula
+ * ???????? ???????? ????nnnn nnnndula
*
* ? : Unallocated: Run address for first/last pages, unset for internal
* pages.
* Small: Run page offset.
* Large: Run size for first page, unset for trailing pages.
- * - : Unused.
+ * n : binind for small size class, BININD_INVALID for large size class.
* d : dirty?
* u : unzeroed?
* l : large?
@@ -128,7 +109,8 @@ struct arena_chunk_map_s {
*
* p : run page offset
* s : run size
- * c : (binind+1) for size class (used only if prof_promote is true)
+ * n : binind for size class; large objects set these to BININD_INVALID
+ * except for promoted allocations (see prof_promote)
* x : don't care
* - : 0
* + : 1
@@ -136,37 +118,38 @@ struct arena_chunk_map_s {
* [dula] : bit unset
*
* Unallocated (clean):
- * ssssssss ssssssss ssss---- ----du-a
- * xxxxxxxx xxxxxxxx xxxx---- -----Uxx
- * ssssssss ssssssss ssss---- ----dU-a
+ * ssssssss ssssssss ssss++++ ++++du-a
+ * xxxxxxxx xxxxxxxx xxxxxxxx xxxx-Uxx
+ * ssssssss ssssssss ssss++++ ++++dU-a
*
* Unallocated (dirty):
- * ssssssss ssssssss ssss---- ----D--a
- * xxxxxxxx xxxxxxxx xxxx---- ----xxxx
- * ssssssss ssssssss ssss---- ----D--a
+ * ssssssss ssssssss ssss++++ ++++D--a
+ * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
+ * ssssssss ssssssss ssss++++ ++++D--a
*
* Small:
- * pppppppp pppppppp pppp---- ----d--A
- * pppppppp pppppppp pppp---- -------A
- * pppppppp pppppppp pppp---- ----d--A
+ * pppppppp pppppppp ppppnnnn nnnnd--A
+ * pppppppp pppppppp ppppnnnn nnnn---A
+ * pppppppp pppppppp ppppnnnn nnnnd--A
*
* Large:
- * ssssssss ssssssss ssss---- ----D-LA
- * xxxxxxxx xxxxxxxx xxxx---- ----xxxx
- * -------- -------- -------- ----D-LA
+ * ssssssss ssssssss ssss++++ ++++D-LA
+ * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
+ * -------- -------- ----++++ ++++D-LA
*
- * Large (sampled, size <= PAGE_SIZE):
- * ssssssss ssssssss sssscccc ccccD-LA
+ * Large (sampled, size <= PAGE):
+ * ssssssss ssssssss ssssnnnn nnnnD-LA
*
- * Large (not sampled, size == PAGE_SIZE):
- * ssssssss ssssssss ssss---- ----D-LA
+ * Large (not sampled, size == PAGE):
+ * ssssssss ssssssss ssss++++ ++++D-LA
*/
size_t bits;
-#ifdef JEMALLOC_PROF
-#define CHUNK_MAP_CLASS_SHIFT 4
-#define CHUNK_MAP_CLASS_MASK ((size_t)0xff0U)
-#endif
-#define CHUNK_MAP_FLAGS_MASK ((size_t)0xfU)
+#define CHUNK_MAP_BININD_SHIFT 4
+#define BININD_INVALID ((size_t)0xffU)
+/* CHUNK_MAP_BININD_MASK == (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) */
+#define CHUNK_MAP_BININD_MASK ((size_t)0xff0U)
+#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK
+#define CHUNK_MAP_FLAGS_MASK ((size_t)0xcU)
#define CHUNK_MAP_DIRTY ((size_t)0x8U)
#define CHUNK_MAP_UNZEROED ((size_t)0x4U)
#define CHUNK_MAP_LARGE ((size_t)0x2U)
@@ -205,11 +188,6 @@ struct arena_chunk_s {
typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
struct arena_run_s {
-#ifdef JEMALLOC_DEBUG
- uint32_t magic;
-# define ARENA_RUN_MAGIC 0x384adf93
-#endif
-
/* Bin this run is associated with. */
arena_bin_t *bin;
@@ -224,11 +202,50 @@ struct arena_run_s {
* Read-only information associated with each element of arena_t's bins array
* is stored separately, partly to reduce memory usage (only one copy, rather
* than one per arena), but mainly to avoid false cacheline sharing.
+ *
+ * Each run has the following layout:
+ *
+ * /--------------------\
+ * | arena_run_t header |
+ * | ... |
+ * bitmap_offset | bitmap |
+ * | ... |
+ * ctx0_offset | ctx map |
+ * | ... |
+ * |--------------------|
+ * | redzone |
+ * reg0_offset | region 0 |
+ * | redzone |
+ * |--------------------| \
+ * | redzone | |
+ * | region 1 | > reg_interval
+ * | redzone | /
+ * |--------------------|
+ * | ... |
+ * | ... |
+ * | ... |
+ * |--------------------|
+ * | redzone |
+ * | region nregs-1 |
+ * | redzone |
+ * |--------------------|
+ * | alignment pad? |
+ * \--------------------/
+ *
+ * reg_interval has at least the same minimum alignment as reg_size; this
+ * preserves the alignment constraint that sa2u() depends on. Alignment pad is
+ * either 0 or redzone_size; it is present only if needed to align reg0_offset.
*/
struct arena_bin_info_s {
/* Size of regions in a run for this bin's size class. */
size_t reg_size;
+ /* Redzone size. */
+ size_t redzone_size;
+
+ /* Interval between regions (reg_size + (redzone_size << 1)). */
+ size_t reg_interval;
+
/* Total size of a run for this bin's size class. */
size_t run_size;
@@ -247,13 +264,11 @@ struct arena_bin_info_s {
*/
bitmap_info_t bitmap_info;
-#ifdef JEMALLOC_PROF
/*
* Offset of first (prof_ctx_t *) in a run header for this bin's size
- * class, or 0 if (opt_prof == false).
+ * class, or 0 if (config_prof == false || opt_prof == false).
*/
uint32_t ctx0_offset;
-#endif
/* Offset of first region in a run for this bin's size class. */
uint32_t reg0_offset;
@@ -283,18 +298,11 @@ struct arena_bin_s {
*/
arena_run_tree_t runs;
-#ifdef JEMALLOC_STATS
/* Bin statistics. */
malloc_bin_stats_t stats;
-#endif
};
struct arena_s {
-#ifdef JEMALLOC_DEBUG
- uint32_t magic;
-# define ARENA_MAGIC 0x947d3d24
-#endif
-
/* This arena's index within the arenas array. */
unsigned ind;
@@ -314,20 +322,14 @@ struct arena_s {
*/
malloc_mutex_t lock;
-#ifdef JEMALLOC_STATS
arena_stats_t stats;
-# ifdef JEMALLOC_TCACHE
/*
* List of tcaches for extant threads associated with this arena.
* Stats from these are merged incrementally, and at exit.
*/
ql_head(tcache_t) tcache_ql;
-# endif
-#endif
-#ifdef JEMALLOC_PROF
uint64_t prof_accumbytes;
-#endif
/* List of dirty-page-containing chunks this arena manages. */
ql_head(arena_chunk_t) chunks_dirty;
@@ -378,140 +380,334 @@ struct arena_s {
arena_avail_tree_t runs_avail_clean;
arena_avail_tree_t runs_avail_dirty;
- /*
- * bins is used to store trees of free regions of the following sizes,
- * assuming a 64-bit system with 16-byte quantum, 4 KiB page size, and
- * default MALLOC_CONF.
- *
- * bins[i] | size |
- * --------+--------+
- * 0 | 8 |
- * --------+--------+
- * 1 | 16 |
- * 2 | 32 |
- * 3 | 48 |
- * : :
- * 6 | 96 |
- * 7 | 112 |
- * 8 | 128 |
- * --------+--------+
- * 9 | 192 |
- * 10 | 256 |
- * 11 | 320 |
- * 12 | 384 |
- * 13 | 448 |
- * 14 | 512 |
- * --------+--------+
- * 15 | 768 |
- * 16 | 1024 |
- * 17 | 1280 |
- * : :
- * 25 | 3328 |
- * 26 | 3584 |
- * 27 | 3840 |
- * --------+--------+
- */
- arena_bin_t bins[1]; /* Dynamically sized. */
+ /* bins is used to store trees of free regions. */
+ arena_bin_t bins[NBINS];
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-extern size_t opt_lg_qspace_max;
-extern size_t opt_lg_cspace_max;
extern ssize_t opt_lg_dirty_mult;
/*
* small_size2bin is a compact lookup table that rounds request sizes up to
* size classes. In order to reduce cache footprint, the table is compressed,
* and all accesses are via the SMALL_SIZE2BIN macro.
*/
-extern uint8_t const *small_size2bin;
+extern uint8_t const small_size2bin[];
#define SMALL_SIZE2BIN(s) (small_size2bin[(s-1) >> LG_TINY_MIN])
-extern arena_bin_info_t *arena_bin_info;
-
-/* Various bin-related settings. */
-#ifdef JEMALLOC_TINY /* Number of (2^n)-spaced tiny bins. */
-# define ntbins ((unsigned)(LG_QUANTUM - LG_TINY_MIN))
-#else
-# define ntbins 0
-#endif
-extern unsigned nqbins; /* Number of quantum-spaced bins. */
-extern unsigned ncbins; /* Number of cacheline-spaced bins. */
-extern unsigned nsbins; /* Number of subpage-spaced bins. */
-extern unsigned nbins;
-#ifdef JEMALLOC_TINY
-# define tspace_max ((size_t)(QUANTUM >> 1))
-#endif
-#define qspace_min QUANTUM
-extern size_t qspace_max;
-extern size_t cspace_min;
-extern size_t cspace_max;
-extern size_t sspace_min;
-extern size_t sspace_max;
-#define small_maxclass sspace_max
+extern arena_bin_info_t arena_bin_info[NBINS];
+/* Number of large size classes. */
#define nlclasses (chunk_npages - map_bias)
void arena_purge_all(arena_t *arena);
-#ifdef JEMALLOC_PROF
void arena_prof_accum(arena_t *arena, uint64_t accumbytes);
-#endif
-#ifdef JEMALLOC_TCACHE
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
- size_t binind
-# ifdef JEMALLOC_PROF
- , uint64_t prof_accumbytes
-# endif
- );
-#endif
+ size_t binind, uint64_t prof_accumbytes);
+void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
+ bool zero);
+void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
-void *arena_malloc(size_t size, bool zero);
-void *arena_palloc(arena_t *arena, size_t size, size_t alloc_size,
- size_t alignment, bool zero);
-size_t arena_salloc(const void *ptr);
-#ifdef JEMALLOC_PROF
+void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero);
void arena_prof_promoted(const void *ptr, size_t size);
-size_t arena_salloc_demote(const void *ptr);
-#endif
-void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+void arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_chunk_map_t *mapelm);
+void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t pageind, arena_chunk_map_t *mapelm);
+void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t pageind);
+void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk,
+ void *ptr);
void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
-#ifdef JEMALLOC_STATS
void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats);
-#endif
void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
void *arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero);
+ size_t alignment, bool zero, bool try_tcache);
bool arena_new(arena_t *arena, unsigned ind);
-bool arena_boot(void);
+void arena_boot(void);
+void arena_prefork(arena_t *arena);
+void arena_postfork_parent(arena_t *arena);
+void arena_postfork_child(arena_t *arena);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
+arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind);
+size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
+ size_t pageind);
+size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
+void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
+ size_t size, size_t flags);
+void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
+ size_t size);
+void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
+ size_t size, size_t flags);
+void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
+ size_t binind);
+void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
+ size_t runind, size_t binind, size_t flags);
+void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
+ size_t unzeroed);
+size_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
size_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
const void *ptr);
-# ifdef JEMALLOC_PROF
prof_ctx_t *arena_prof_ctx_get(const void *ptr);
void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
-# endif
-void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr);
+void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache);
+size_t arena_salloc(const void *ptr, bool demote);
+void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ bool try_tcache);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
+# ifdef JEMALLOC_ARENA_INLINE_A
+JEMALLOC_INLINE arena_chunk_map_t *
+arena_mapp_get(arena_chunk_t *chunk, size_t pageind)
+{
+
+ assert(pageind >= map_bias);
+ assert(pageind < chunk_npages);
+
+ return (&chunk->map[pageind-map_bias]);
+}
+
+JEMALLOC_INLINE size_t *
+arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
+{
+
+ return (&arena_mapp_get(chunk, pageind)->bits);
+}
+
+JEMALLOC_INLINE size_t
+arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
+{
+
+ return (*arena_mapbitsp_get(chunk, pageind));
+}
+
+JEMALLOC_INLINE size_t
+arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
+{
+ size_t mapbits;
+
+ mapbits = arena_mapbits_get(chunk, pageind);
+ assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
+ return (mapbits & ~PAGE_MASK);
+}
+
+JEMALLOC_INLINE size_t
+arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
+{
+ size_t mapbits;
+
+ mapbits = arena_mapbits_get(chunk, pageind);
+ assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
+ (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
+ return (mapbits & ~PAGE_MASK);
+}
+
+JEMALLOC_INLINE size_t
+arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
+{
+ size_t mapbits;
+
+ mapbits = arena_mapbits_get(chunk, pageind);
+ assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
+ CHUNK_MAP_ALLOCATED);
+ return (mapbits >> LG_PAGE);
+}
+
+JEMALLOC_INLINE size_t
+arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
+{
+ size_t mapbits;
+ size_t binind;
+
+ mapbits = arena_mapbits_get(chunk, pageind);
+ binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
+ assert(binind < NBINS || binind == BININD_INVALID);
+ return (binind);
+}
+
+JEMALLOC_INLINE size_t
+arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
+{
+ size_t mapbits;
+
+ mapbits = arena_mapbits_get(chunk, pageind);
+ return (mapbits & CHUNK_MAP_DIRTY);
+}
+
+JEMALLOC_INLINE size_t
+arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
+{
+ size_t mapbits;
+
+ mapbits = arena_mapbits_get(chunk, pageind);
+ return (mapbits & CHUNK_MAP_UNZEROED);
+}
+
+JEMALLOC_INLINE size_t
+arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
+{
+ size_t mapbits;
+
+ mapbits = arena_mapbits_get(chunk, pageind);
+ return (mapbits & CHUNK_MAP_LARGE);
+}
+
+JEMALLOC_INLINE size_t
+arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
+{
+ size_t mapbits;
+
+ mapbits = arena_mapbits_get(chunk, pageind);
+ return (mapbits & CHUNK_MAP_ALLOCATED);
+}
+
+JEMALLOC_INLINE void
+arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
+ size_t flags)
+{
+ size_t *mapbitsp;
+
+ mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ assert((size & PAGE_MASK) == 0);
+ assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
+ assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags);
+ *mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags;
+}
+
+JEMALLOC_INLINE void
+arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
+ size_t size)
+{
+ size_t *mapbitsp;
+
+ mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ assert((size & PAGE_MASK) == 0);
+ assert((*mapbitsp & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
+ *mapbitsp = size | (*mapbitsp & PAGE_MASK);
+}
+
+JEMALLOC_INLINE void
+arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
+ size_t flags)
+{
+ size_t *mapbitsp;
+ size_t unzeroed;
+
+ mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ assert((size & PAGE_MASK) == 0);
+ assert((flags & CHUNK_MAP_DIRTY) == flags);
+ unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
+ *mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags | unzeroed |
+ CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+}
+
+JEMALLOC_INLINE void
+arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
+ size_t binind)
+{
+ size_t *mapbitsp;
+
+ assert(binind <= BININD_INVALID);
+ mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE);
+ *mapbitsp = (*mapbitsp & ~CHUNK_MAP_BININD_MASK) | (binind <<
+ CHUNK_MAP_BININD_SHIFT);
+}
+
+JEMALLOC_INLINE void
+arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
+ size_t binind, size_t flags)
+{
+ size_t *mapbitsp;
+ size_t unzeroed;
+
+ assert(binind < BININD_INVALID);
+ mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ assert(pageind - runind >= map_bias);
+ assert((flags & CHUNK_MAP_DIRTY) == flags);
+ unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
+ *mapbitsp = (runind << LG_PAGE) | (binind << CHUNK_MAP_BININD_SHIFT) |
+ flags | unzeroed | CHUNK_MAP_ALLOCATED;
+}
+
+JEMALLOC_INLINE void
+arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
+ size_t unzeroed)
+{
+ size_t *mapbitsp;
+
+ mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ *mapbitsp = (*mapbitsp & ~CHUNK_MAP_UNZEROED) | unzeroed;
+}
+
+JEMALLOC_INLINE size_t
+arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
+{
+ size_t binind;
+
+ binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
+
+ if (config_debug) {
+ arena_chunk_t *chunk;
+ arena_t *arena;
+ size_t pageind;
+ size_t actual_mapbits;
+ arena_run_t *run;
+ arena_bin_t *bin;
+ size_t actual_binind;
+ arena_bin_info_t *bin_info;
+
+ assert(binind != BININD_INVALID);
+ assert(binind < NBINS);
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ arena = chunk->arena;
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ actual_mapbits = arena_mapbits_get(chunk, pageind);
+ assert(mapbits == actual_mapbits);
+ assert(arena_mapbits_large_get(chunk, pageind) == 0);
+ assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
+ run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
+ (actual_mapbits >> LG_PAGE)) << LG_PAGE));
+ bin = run->bin;
+ actual_binind = bin - arena->bins;
+ assert(binind == actual_binind);
+ bin_info = &arena_bin_info[actual_binind];
+ assert(((uintptr_t)ptr - ((uintptr_t)run +
+ (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
+ == 0);
+ }
+
+ return (binind);
+}
+# endif /* JEMALLOC_ARENA_INLINE_A */
+
+# ifdef JEMALLOC_ARENA_INLINE_B
JEMALLOC_INLINE size_t
arena_bin_index(arena_t *arena, arena_bin_t *bin)
{
size_t binind = bin - arena->bins;
- assert(binind < nbins);
+ assert(binind < NBINS);
return (binind);
}
@@ -519,9 +715,8 @@ JEMALLOC_INLINE unsigned
arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
{
unsigned shift, diff, regind;
- size_t size;
+ size_t interval;
- dassert(run->magic == ARENA_RUN_MAGIC);
/*
* Freeing a pointer lower than region zero can cause assertion
* failure.
@@ -537,12 +732,12 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
bin_info->reg0_offset);
/* Rescale (factor powers of 2 out of the numerator and denominator). */
- size = bin_info->reg_size;
- shift = ffs(size) - 1;
+ interval = bin_info->reg_interval;
+ shift = ffs(interval) - 1;
diff >>= shift;
- size >>= shift;
+ interval >>= shift;
- if (size == 1) {
+ if (interval == 1) {
/* The divisor was a power of 2. */
regind = diff;
} else {
@@ -554,7 +749,7 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
*
* becomes
*
- * (X * size_invs[D - 3]) >> SIZE_INV_SHIFT
+ * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT
*
* We can omit the first three elements, because we never
* divide by 0, and 1 and 2 are both powers of two, which are
@@ -562,7 +757,7 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
*/
#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS)
#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1)
- static const unsigned size_invs[] = {
+ static const unsigned interval_invs[] = {
SIZE_INV(3),
SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
@@ -573,20 +768,21 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
};
- if (size <= ((sizeof(size_invs) / sizeof(unsigned)) + 2))
- regind = (diff * size_invs[size - 3]) >> SIZE_INV_SHIFT;
- else
- regind = diff / size;
+ if (interval <= ((sizeof(interval_invs) / sizeof(unsigned)) +
+ 2)) {
+ regind = (diff * interval_invs[interval - 3]) >>
+ SIZE_INV_SHIFT;
+ } else
+ regind = diff / interval;
#undef SIZE_INV
#undef SIZE_INV_SHIFT
}
- assert(diff == regind * size);
+ assert(diff == regind * interval);
assert(regind < bin_info->nregs);
return (regind);
}
-#ifdef JEMALLOC_PROF
JEMALLOC_INLINE prof_ctx_t *
arena_prof_ctx_get(const void *ptr)
{
@@ -594,32 +790,33 @@ arena_prof_ctx_get(const void *ptr)
arena_chunk_t *chunk;
size_t pageind, mapbits;
+ cassert(config_prof);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
- mapbits = chunk->map[pageind-map_bias].bits;
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
if ((mapbits & CHUNK_MAP_LARGE) == 0) {
if (prof_promote)
ret = (prof_ctx_t *)(uintptr_t)1U;
else {
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
- (uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) <<
- PAGE_SHIFT));
- size_t binind = arena_bin_index(chunk->arena, run->bin);
+ (uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
+ LG_PAGE));
+ size_t binind = arena_ptr_small_binind_get(ptr,
+ mapbits);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
unsigned regind;
- dassert(run->magic == ARENA_RUN_MAGIC);
regind = arena_run_regind(run, bin_info, ptr);
ret = *(prof_ctx_t **)((uintptr_t)run +
bin_info->ctx0_offset + (regind *
sizeof(prof_ctx_t *)));
}
} else
- ret = chunk->map[pageind-map_bias].prof_ctx;
+ ret = arena_mapp_get(chunk, pageind)->prof_ctx;
return (ret);
}
@@ -630,25 +827,24 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
arena_chunk_t *chunk;
size_t pageind, mapbits;
+ cassert(config_prof);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
- mapbits = chunk->map[pageind-map_bias].bits;
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
if ((mapbits & CHUNK_MAP_LARGE) == 0) {
if (prof_promote == false) {
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
- (uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) <<
- PAGE_SHIFT));
- arena_bin_t *bin = run->bin;
+ (uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
+ LG_PAGE));
size_t binind;
arena_bin_info_t *bin_info;
unsigned regind;
- dassert(run->magic == ARENA_RUN_MAGIC);
- binind = arena_bin_index(chunk->arena, bin);
+ binind = arena_ptr_small_binind_get(ptr, mapbits);
bin_info = &arena_bin_info[binind];
regind = arena_run_regind(run, bin_info, ptr);
@@ -657,86 +853,122 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
} else
assert((uintptr_t)ctx == (uintptr_t)1U);
} else
- chunk->map[pageind-map_bias].prof_ctx = ctx;
+ arena_mapp_get(chunk, pageind)->prof_ctx = ctx;
+}
+
+JEMALLOC_INLINE void *
+arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache)
+{
+ tcache_t *tcache;
+
+ assert(size != 0);
+ assert(size <= arena_maxclass);
+
+ if (size <= SMALL_MAXCLASS) {
+ if (try_tcache && (tcache = tcache_get(true)) != NULL)
+ return (tcache_alloc_small(tcache, size, zero));
+ else {
+ return (arena_malloc_small(choose_arena(arena), size,
+ zero));
+ }
+ } else {
+ /*
+ * Initialize tcache after checking size in order to avoid
+ * infinite recursion during tcache initialization.
+ */
+ if (try_tcache && size <= tcache_maxclass && (tcache =
+ tcache_get(true)) != NULL)
+ return (tcache_alloc_large(tcache, size, zero));
+ else {
+ return (arena_malloc_large(choose_arena(arena), size,
+ zero));
+ }
+ }
+}
+
+/* Return the size of the allocation pointed to by ptr. */
+JEMALLOC_INLINE size_t
+arena_salloc(const void *ptr, bool demote)
+{
+ size_t ret;
+ arena_chunk_t *chunk;
+ size_t pageind, binind;
+
+ assert(ptr != NULL);
+ assert(CHUNK_ADDR2BASE(ptr) != ptr);
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
+ binind = arena_mapbits_binind_get(chunk, pageind);
+ if (binind == BININD_INVALID || (config_prof && demote == false &&
+ prof_promote && arena_mapbits_large_get(chunk, pageind) != 0)) {
+ /*
+ * Large allocation. In the common case (demote == true), and
+ * as this is an inline function, most callers will only end up
+ * looking at binind to determine that ptr is a small
+ * allocation.
+ */
+ assert(((uintptr_t)ptr & PAGE_MASK) == 0);
+ ret = arena_mapbits_large_size_get(chunk, pageind);
+ assert(ret != 0);
+ assert(pageind + (ret>>LG_PAGE) <= chunk_npages);
+ assert(ret == PAGE || arena_mapbits_large_size_get(chunk,
+ pageind+(ret>>LG_PAGE)-1) == 0);
+ assert(binind == arena_mapbits_binind_get(chunk,
+ pageind+(ret>>LG_PAGE)-1));
+ assert(arena_mapbits_dirty_get(chunk, pageind) ==
+ arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1));
+ } else {
+ /*
+ * Small allocation (possibly promoted to a large object due to
+ * prof_promote).
+ */
+ assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
+ arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
+ pageind)) == binind);
+ ret = arena_bin_info[binind].reg_size;
+ }
+
+ return (ret);
}
-#endif
JEMALLOC_INLINE void
-arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
+arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache)
{
- size_t pageind;
- arena_chunk_map_t *mapelm;
+ size_t pageind, mapbits;
+ tcache_t *tcache;
assert(arena != NULL);
- dassert(arena->magic == ARENA_MAGIC);
assert(chunk->arena == arena);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
- mapelm = &chunk->map[pageind-map_bias];
- assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
- if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ mapbits = arena_mapbits_get(chunk, pageind);
+ assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
+ if ((mapbits & CHUNK_MAP_LARGE) == 0) {
/* Small allocation. */
-#ifdef JEMALLOC_TCACHE
- tcache_t *tcache;
+ if (try_tcache && (tcache = tcache_get(false)) != NULL) {
+ size_t binind;
- if ((tcache = tcache_get()) != NULL)
- tcache_dalloc_small(tcache, ptr);
- else {
-#endif
- arena_run_t *run;
- arena_bin_t *bin;
-
- run = (arena_run_t *)((uintptr_t)chunk +
- (uintptr_t)((pageind - (mapelm->bits >>
- PAGE_SHIFT)) << PAGE_SHIFT));
- dassert(run->magic == ARENA_RUN_MAGIC);
- bin = run->bin;
-#ifdef JEMALLOC_DEBUG
- {
- size_t binind = arena_bin_index(arena, bin);
- arena_bin_info_t *bin_info =
- &arena_bin_info[binind];
- assert(((uintptr_t)ptr - ((uintptr_t)run +
- (uintptr_t)bin_info->reg0_offset)) %
- bin_info->reg_size == 0);
- }
-#endif
- malloc_mutex_lock(&bin->lock);
- arena_dalloc_bin(arena, chunk, ptr, mapelm);
- malloc_mutex_unlock(&bin->lock);
-#ifdef JEMALLOC_TCACHE
- }
-#endif
+ binind = arena_ptr_small_binind_get(ptr, mapbits);
+ tcache_dalloc_small(tcache, ptr, binind);
+ } else
+ arena_dalloc_small(arena, chunk, ptr, pageind);
} else {
-#ifdef JEMALLOC_TCACHE
- size_t size = mapelm->bits & ~PAGE_MASK;
+ size_t size = arena_mapbits_large_size_get(chunk, pageind);
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
- if (size <= tcache_maxclass) {
- tcache_t *tcache;
-
- if ((tcache = tcache_get()) != NULL)
- tcache_dalloc_large(tcache, ptr, size);
- else {
- malloc_mutex_lock(&arena->lock);
- arena_dalloc_large(arena, chunk, ptr);
- malloc_mutex_unlock(&arena->lock);
- }
- } else {
- malloc_mutex_lock(&arena->lock);
+
+ if (try_tcache && size <= tcache_maxclass && (tcache =
+ tcache_get(false)) != NULL) {
+ tcache_dalloc_large(tcache, ptr, size);
+ } else
arena_dalloc_large(arena, chunk, ptr);
- malloc_mutex_unlock(&arena->lock);
- }
-#else
- assert(((uintptr_t)ptr & PAGE_MASK) == 0);
- malloc_mutex_lock(&arena->lock);
- arena_dalloc_large(arena, chunk, ptr);
- malloc_mutex_unlock(&arena->lock);
-#endif
}
}
+# endif /* JEMALLOC_ARENA_INLINE_B */
#endif
#endif /* JEMALLOC_H_INLINES */