diff options
| author | Jason Evans <jasone@canonware.com> | 2016-05-12 21:51:07 (GMT) |
|---|---|---|
| committer | Jason Evans <jasone@canonware.com> | 2016-05-12 21:53:25 (GMT) |
| commit | f70a254d44c8d30af2cd5d30531fb18fdabaae6d (patch) | |
| tree | 4e226a1bd0d56d742ca0950f69829ed7f0216e62 /include/jemalloc/internal/arena.h | |
| parent | e02b83cc5e3c4d30f93dba945162e3aa58d962d6 (diff) | |
| parent | 09f8585ce8a57baa387cc0327e51c0baffbdce6f (diff) | |
| download | jemalloc-4.2.0.zip jemalloc-4.2.0.tar.gz jemalloc-4.2.0.tar.bz2 | |
Merge branch 'dev'4.2.0
Diffstat (limited to 'include/jemalloc/internal/arena.h')
| -rw-r--r-- | include/jemalloc/internal/arena.h | 390 |
1 files changed, 226 insertions, 164 deletions
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h index 42a7896..b1de2b6 100644 --- a/include/jemalloc/internal/arena.h +++ b/include/jemalloc/internal/arena.h @@ -36,6 +36,7 @@ typedef enum { #define DECAY_NTICKS_PER_UPDATE 1000 typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t; +typedef struct arena_avail_links_s arena_avail_links_t; typedef struct arena_run_s arena_run_t; typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t; typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t; @@ -153,13 +154,13 @@ struct arena_runs_dirty_link_s { */ struct arena_chunk_map_misc_s { /* - * Linkage for run trees. There are two disjoint uses: + * Linkage for run heaps. There are two disjoint uses: * - * 1) arena_t's runs_avail tree. + * 1) arena_t's runs_avail heaps. * 2) arena_run_t conceptually uses this linkage for in-use non-full * runs, rather than directly embedding linkage. */ - rb_node(arena_chunk_map_misc_t) rb_link; + phn(arena_chunk_map_misc_t) ph_link; union { /* Linkage for list of dirty runs. */ @@ -175,7 +176,7 @@ struct arena_chunk_map_misc_s { arena_run_t run; }; }; -typedef rb_tree(arena_chunk_map_misc_t) arena_run_tree_t; +typedef ph(arena_chunk_map_misc_t) arena_run_heap_t; #endif /* JEMALLOC_ARENA_STRUCTS_A */ #ifdef JEMALLOC_ARENA_STRUCTS_B @@ -272,13 +273,13 @@ struct arena_bin_s { arena_run_t *runcur; /* - * Tree of non-full runs. This tree is used when looking for an + * Heap of non-full runs. This heap is used when looking for an * existing run when runcur is no longer usable. We choose the * non-full run that is lowest in memory; this policy tends to keep * objects packed well, and it can also help reduce the number of * almost-empty chunks. */ - arena_run_tree_t runs; + arena_run_heap_t runs; /* Bin statistics. */ malloc_bin_stats_t stats; @@ -289,10 +290,18 @@ struct arena_s { unsigned ind; /* - * Number of threads currently assigned to this arena. This field is - * synchronized via atomic operations. + * Number of threads currently assigned to this arena, synchronized via + * atomic operations. Each thread has two distinct assignments, one for + * application-serving allocation, and the other for internal metadata + * allocation. Internal metadata must not be allocated from arenas + * created via the arenas.extend mallctl, because the arena.<i>.reset + * mallctl indiscriminately discards all allocations for the affected + * arena. + * + * 0: Application allocation. + * 1: Internal metadata allocation. */ - unsigned nthreads; + unsigned nthreads[2]; /* * There are three classes of arena operations from a locking @@ -321,6 +330,10 @@ struct arena_s { dss_prec_t dss_prec; + + /* Extant arena chunks. */ + ql_head(extent_node_t) achunks; + /* * In order to avoid rapid chunk allocation/deallocation when an arena * oscillates right on the cusp of needing a new chunk, cache the most @@ -457,10 +470,10 @@ struct arena_s { arena_bin_t bins[NBINS]; /* - * Quantized address-ordered trees of this arena's available runs. The - * trees are used for first-best-fit run allocation. + * Quantized address-ordered heaps of this arena's available runs. The + * heaps are used for first-best-fit run allocation. */ - arena_run_tree_t runs_avail[1]; /* Dynamically sized. */ + arena_run_heap_t runs_avail[1]; /* Dynamically sized. */ }; /* Used in conjunction with tsd for fast arena-related context lookup. */ @@ -505,25 +518,28 @@ void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache); void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool cache); -extent_node_t *arena_node_alloc(arena_t *arena); -void arena_node_dalloc(arena_t *arena, extent_node_t *node); -void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment, - bool *zero); -void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize); -void arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, - size_t oldsize, size_t usize); -void arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, - size_t oldsize, size_t usize); -bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, - size_t oldsize, size_t usize, bool *zero); -ssize_t arena_lg_dirty_mult_get(arena_t *arena); -bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult); -ssize_t arena_decay_time_get(arena_t *arena); -bool arena_decay_time_set(arena_t *arena, ssize_t decay_time); -void arena_maybe_purge(arena_t *arena); -void arena_purge(arena_t *arena, bool all); -void arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin, - szind_t binind, uint64_t prof_accumbytes); +extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena); +void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node); +void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool *zero); +void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, + size_t usize); +void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, + void *chunk, size_t oldsize, size_t usize); +void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, + void *chunk, size_t oldsize, size_t usize); +bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, + void *chunk, size_t oldsize, size_t usize, bool *zero); +ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena); +bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, + ssize_t lg_dirty_mult); +ssize_t arena_decay_time_get(tsdn_t *tsdn, arena_t *arena); +bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time); +void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all); +void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena); +void arena_reset(tsd_t *tsd, arena_t *arena); +void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, + tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes); void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero); #ifdef JEMALLOC_JET @@ -536,17 +552,18 @@ extern arena_dalloc_junk_small_t *arena_dalloc_junk_small; void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); #endif void arena_quarantine_junk_small(void *ptr, size_t usize); -void *arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t ind, bool zero); -void *arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, - bool zero, tcache_t *tcache); -void *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, +void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind, + bool zero); +void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, + szind_t ind, bool zero); +void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache); -void arena_prof_promoted(const void *ptr, size_t size); -void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, - void *ptr, arena_chunk_map_bits_t *bitselm); -void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind, arena_chunk_map_bits_t *bitselm); -void arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, +void arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size); +void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm); +void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm); +void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t pageind); #ifdef JEMALLOC_JET typedef void (arena_dalloc_junk_large_t)(void *, size_t); @@ -554,70 +571,80 @@ extern arena_dalloc_junk_large_t *arena_dalloc_junk_large; #else void arena_dalloc_junk_large(void *ptr, size_t usize); #endif -void arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk, - void *ptr); -void arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, +void arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk, void *ptr); +void arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr); #ifdef JEMALLOC_JET typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t); extern arena_ralloc_junk_large_t *arena_ralloc_junk_large; #endif -bool arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, - size_t extra, bool zero); +bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, + size_t size, size_t extra, bool zero); void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache); -dss_prec_t arena_dss_prec_get(arena_t *arena); -bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); +dss_prec_t arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena); +bool arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec); ssize_t arena_lg_dirty_mult_default_get(void); bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult); ssize_t arena_decay_time_default_get(void); bool arena_decay_time_default_set(ssize_t decay_time); -void arena_basic_stats_merge(arena_t *arena, unsigned *nthreads, +void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, + unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult, + ssize_t *decay_time, size_t *nactive, size_t *ndirty); +void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, - size_t *nactive, size_t *ndirty); -void arena_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss, - ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive, - size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, - malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats); -unsigned arena_nthreads_get(arena_t *arena); -void arena_nthreads_inc(arena_t *arena); -void arena_nthreads_dec(arena_t *arena); -arena_t *arena_new(unsigned ind); + size_t *nactive, size_t *ndirty, arena_stats_t *astats, + malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, + malloc_huge_stats_t *hstats); +unsigned arena_nthreads_get(arena_t *arena, bool internal); +void arena_nthreads_inc(arena_t *arena, bool internal); +void arena_nthreads_dec(arena_t *arena, bool internal); +arena_t *arena_new(tsdn_t *tsdn, unsigned ind); bool arena_boot(void); -void arena_prefork0(arena_t *arena); -void arena_prefork1(arena_t *arena); -void arena_prefork2(arena_t *arena); -void arena_prefork3(arena_t *arena); -void arena_postfork_parent(arena_t *arena); -void arena_postfork_child(arena_t *arena); +void arena_prefork0(tsdn_t *tsdn, arena_t *arena); +void arena_prefork1(tsdn_t *tsdn, arena_t *arena); +void arena_prefork2(tsdn_t *tsdn, arena_t *arena); +void arena_prefork3(tsdn_t *tsdn, arena_t *arena); +void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena); +void arena_postfork_child(tsdn_t *tsdn, arena_t *arena); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE -arena_chunk_map_bits_t *arena_bitselm_get(arena_chunk_t *chunk, +arena_chunk_map_bits_t *arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind); -arena_chunk_map_misc_t *arena_miscelm_get(arena_chunk_t *chunk, +const arena_chunk_map_bits_t *arena_bitselm_get_const( + const arena_chunk_t *chunk, size_t pageind); +arena_chunk_map_misc_t *arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind); +const arena_chunk_map_misc_t *arena_miscelm_get_const( + const arena_chunk_t *chunk, size_t pageind); size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm); -void *arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm); +void *arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm); arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd); arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run); -size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbitsp_read(size_t *mapbitsp); -size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind); +size_t *arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind); +const size_t *arena_mapbitsp_get_const(const arena_chunk_t *chunk, + size_t pageind); +size_t arena_mapbitsp_read(const size_t *mapbitsp); +size_t arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_size_decode(size_t mapbits); -size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, +size_t arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind); -szind_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_large_size_get(const arena_chunk_t *chunk, + size_t pageind); +size_t arena_mapbits_small_runind_get(const arena_chunk_t *chunk, + size_t pageind); +szind_t arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_decommitted_get(const arena_chunk_t *chunk, + size_t pageind); +size_t arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind); void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits); size_t arena_mapbits_size_encode(size_t size); void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, @@ -637,29 +664,31 @@ void arena_metadata_allocated_sub(arena_t *arena, size_t size); size_t arena_metadata_allocated_get(arena_t *arena); bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes); bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes); -bool arena_prof_accum(arena_t *arena, uint64_t accumbytes); +bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes); szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin); size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr); -prof_tctx_t *arena_prof_tctx_get(const void *ptr); -void arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx); -void arena_prof_tctx_reset(const void *ptr, size_t usize, +prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr); +void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx); +void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr, prof_tctx_t *old_tctx); -void arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks); -void arena_decay_tick(tsd_t *tsd, arena_t *arena); -void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, +void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks); +void arena_decay_tick(tsdn_t *tsdn, arena_t *arena); +void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, tcache_t *tcache, bool slow_path); arena_t *arena_aalloc(const void *ptr); -size_t arena_salloc(const void *ptr, bool demote); -void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path); -void arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache); +size_t arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote); +void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path); +void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, + bool slow_path); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) # ifdef JEMALLOC_ARENA_INLINE_A JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t * -arena_bitselm_get(arena_chunk_t *chunk, size_t pageind) +arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind) { assert(pageind >= map_bias); @@ -668,8 +697,15 @@ arena_bitselm_get(arena_chunk_t *chunk, size_t pageind) return (&chunk->map_bits[pageind-map_bias]); } +JEMALLOC_ALWAYS_INLINE const arena_chunk_map_bits_t * +arena_bitselm_get_const(const arena_chunk_t *chunk, size_t pageind) +{ + + return (arena_bitselm_get_mutable((arena_chunk_t *)chunk, pageind)); +} + JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * -arena_miscelm_get(arena_chunk_t *chunk, size_t pageind) +arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind) { assert(pageind >= map_bias); @@ -679,6 +715,13 @@ arena_miscelm_get(arena_chunk_t *chunk, size_t pageind) (uintptr_t)map_misc_offset) + pageind-map_bias); } +JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t * +arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind) +{ + + return (arena_miscelm_get_mutable((arena_chunk_t *)chunk, pageind)); +} + JEMALLOC_ALWAYS_INLINE size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm) { @@ -693,7 +736,7 @@ arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm) } JEMALLOC_ALWAYS_INLINE void * -arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm) +arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm) { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); size_t pageind = arena_miscelm_to_pageind(miscelm); @@ -726,24 +769,31 @@ arena_run_to_miscelm(arena_run_t *run) } JEMALLOC_ALWAYS_INLINE size_t * -arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind) +arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind) { - return (&arena_bitselm_get(chunk, pageind)->bits); + return (&arena_bitselm_get_mutable(chunk, pageind)->bits); +} + +JEMALLOC_ALWAYS_INLINE const size_t * +arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind) +{ + + return (arena_mapbitsp_get_mutable((arena_chunk_t *)chunk, pageind)); } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbitsp_read(size_t *mapbitsp) +arena_mapbitsp_read(const size_t *mapbitsp) { return (*mapbitsp); } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_get(arena_chunk_t *chunk, size_t pageind) +arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind) { - return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind))); + return (arena_mapbitsp_read(arena_mapbitsp_get_const(chunk, pageind))); } JEMALLOC_ALWAYS_INLINE size_t @@ -763,7 +813,7 @@ arena_mapbits_size_decode(size_t mapbits) } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind) +arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; @@ -773,7 +823,7 @@ arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind) } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind) +arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; @@ -784,7 +834,7 @@ arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind) } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind) +arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; @@ -795,7 +845,7 @@ arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind) } JEMALLOC_ALWAYS_INLINE szind_t -arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind) +arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; szind_t binind; @@ -807,7 +857,7 @@ arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind) } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind) +arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; @@ -818,7 +868,7 @@ arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind) } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind) +arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; @@ -829,7 +879,7 @@ arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind) } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind) +arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; @@ -840,7 +890,7 @@ arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind) } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind) +arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; @@ -849,7 +899,7 @@ arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind) } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind) +arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; @@ -885,7 +935,7 @@ JEMALLOC_ALWAYS_INLINE void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags) { - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); assert((size & PAGE_MASK) == 0); assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); @@ -899,7 +949,7 @@ JEMALLOC_ALWAYS_INLINE void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, size_t size) { - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); size_t mapbits = arena_mapbitsp_read(mapbitsp); assert((size & PAGE_MASK) == 0); @@ -911,7 +961,7 @@ arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, JEMALLOC_ALWAYS_INLINE void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags) { - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); assert((flags & CHUNK_MAP_UNZEROED) == flags); arena_mapbitsp_write(mapbitsp, flags); @@ -921,7 +971,7 @@ JEMALLOC_ALWAYS_INLINE void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags) { - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); assert((size & PAGE_MASK) == 0); assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); @@ -936,7 +986,7 @@ JEMALLOC_ALWAYS_INLINE void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, szind_t binind) { - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); size_t mapbits = arena_mapbitsp_read(mapbitsp); assert(binind <= BININD_INVALID); @@ -950,7 +1000,7 @@ JEMALLOC_ALWAYS_INLINE void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, szind_t binind, size_t flags) { - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); assert(binind < BININD_INVALID); assert(pageind - runind >= map_bias); @@ -1007,7 +1057,7 @@ arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes) } JEMALLOC_INLINE bool -arena_prof_accum(arena_t *arena, uint64_t accumbytes) +arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) { cassert(config_prof); @@ -1018,9 +1068,9 @@ arena_prof_accum(arena_t *arena, uint64_t accumbytes) { bool ret; - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); ret = arena_prof_accum_impl(arena, accumbytes); - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (ret); } } @@ -1038,12 +1088,12 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits) size_t pageind; size_t actual_mapbits; size_t rpages_ind; - arena_run_t *run; + const arena_run_t *run; arena_bin_t *bin; szind_t run_binind, actual_binind; arena_bin_info_t *bin_info; - arena_chunk_map_misc_t *miscelm; - void *rpages; + const arena_chunk_map_misc_t *miscelm; + const void *rpages; assert(binind != BININD_INVALID); assert(binind < NBINS); @@ -1056,7 +1106,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits) assert(arena_mapbits_allocated_get(chunk, pageind) != 0); rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); - miscelm = arena_miscelm_get(chunk, rpages_ind); + miscelm = arena_miscelm_get_const(chunk, rpages_ind); run = &miscelm->run; run_binind = run->binind; bin = &arena->bins[run_binind]; @@ -1156,7 +1206,7 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) } JEMALLOC_INLINE prof_tctx_t * -arena_prof_tctx_get(const void *ptr) +arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr) { prof_tctx_t *ret; arena_chunk_t *chunk; @@ -1172,18 +1222,19 @@ arena_prof_tctx_get(const void *ptr) if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) ret = (prof_tctx_t *)(uintptr_t)1U; else { - arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk, - pageind); + arena_chunk_map_misc_t *elm = + arena_miscelm_get_mutable(chunk, pageind); ret = atomic_read_p(&elm->prof_tctx_pun); } } else - ret = huge_prof_tctx_get(ptr); + ret = huge_prof_tctx_get(tsdn, ptr); return (ret); } JEMALLOC_INLINE void -arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx) +arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx) { arena_chunk_t *chunk; @@ -1202,7 +1253,7 @@ arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx) assert(arena_mapbits_large_get(chunk, pageind) != 0); - elm = arena_miscelm_get(chunk, pageind); + elm = arena_miscelm_get_mutable(chunk, pageind); atomic_write_p(&elm->prof_tctx_pun, tctx); } else { /* @@ -1214,12 +1265,12 @@ arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx) assert(arena_mapbits_large_get(chunk, pageind) == 0); } } else - huge_prof_tctx_set(ptr, tctx); + huge_prof_tctx_set(tsdn, ptr, tctx); } JEMALLOC_INLINE void -arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr, - prof_tctx_t *old_tctx) +arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, + const void *old_ptr, prof_tctx_t *old_tctx) { cassert(config_prof); @@ -1238,56 +1289,59 @@ arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr, 0); assert(arena_mapbits_large_get(chunk, pageind) != 0); - elm = arena_miscelm_get(chunk, pageind); + elm = arena_miscelm_get_mutable(chunk, pageind); atomic_write_p(&elm->prof_tctx_pun, (prof_tctx_t *)(uintptr_t)1U); } else - huge_prof_tctx_reset(ptr); + huge_prof_tctx_reset(tsdn, ptr); } } JEMALLOC_ALWAYS_INLINE void -arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks) +arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) { + tsd_t *tsd; ticker_t *decay_ticker; - if (unlikely(tsd == NULL)) + if (unlikely(tsdn_null(tsdn))) return; + tsd = tsdn_tsd(tsdn); decay_ticker = decay_ticker_get(tsd, arena->ind); if (unlikely(decay_ticker == NULL)) return; if (unlikely(ticker_ticks(decay_ticker, nticks))) - arena_purge(arena, false); + arena_purge(tsdn, arena, false); } JEMALLOC_ALWAYS_INLINE void -arena_decay_tick(tsd_t *tsd, arena_t *arena) +arena_decay_tick(tsdn_t *tsdn, arena_t *arena) { - arena_decay_ticks(tsd, arena, 1); + arena_decay_ticks(tsdn, arena, 1); } JEMALLOC_ALWAYS_INLINE void * -arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, bool zero, +arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, tcache_t *tcache, bool slow_path) { + assert(!tsdn_null(tsdn) || tcache == NULL); assert(size != 0); if (likely(tcache != NULL)) { if (likely(size <= SMALL_MAXCLASS)) { - return (tcache_alloc_small(tsd, arena, tcache, size, - ind, zero, slow_path)); + return (tcache_alloc_small(tsdn_tsd(tsdn), arena, + tcache, size, ind, zero, slow_path)); } if (likely(size <= tcache_maxclass)) { - return (tcache_alloc_large(tsd, arena, tcache, size, - ind, zero, slow_path)); + return (tcache_alloc_large(tsdn_tsd(tsdn), arena, + tcache, size, ind, zero, slow_path)); } /* (size > tcache_maxclass) case falls through. */ assert(size > tcache_maxclass); } - return (arena_malloc_hard(tsd, arena, size, ind, zero, tcache)); + return (arena_malloc_hard(tsdn, arena, size, ind, zero)); } JEMALLOC_ALWAYS_INLINE arena_t * @@ -1304,7 +1358,7 @@ arena_aalloc(const void *ptr) /* Return the size of the allocation pointed to by ptr. */ JEMALLOC_ALWAYS_INLINE size_t -arena_salloc(const void *ptr, bool demote) +arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote) { size_t ret; arena_chunk_t *chunk; @@ -1347,17 +1401,18 @@ arena_salloc(const void *ptr, bool demote) ret = index2size(binind); } } else - ret = huge_salloc(ptr); + ret = huge_salloc(tsdn, ptr); return (ret); } JEMALLOC_ALWAYS_INLINE void -arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) +arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path) { arena_chunk_t *chunk; size_t pageind, mapbits; + assert(!tsdn_null(tsdn) || tcache == NULL); assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); @@ -1370,11 +1425,12 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) if (likely(tcache != NULL)) { szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); - tcache_dalloc_small(tsd, tcache, ptr, binind, - slow_path); + tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, + binind, slow_path); } else { - arena_dalloc_small(tsd, extent_node_arena_get( - &chunk->node), chunk, ptr, pageind); + arena_dalloc_small(tsdn, + extent_node_arena_get(&chunk->node), chunk, + ptr, pageind); } } else { size_t size = arena_mapbits_large_size_get(chunk, @@ -1385,22 +1441,26 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) if (likely(tcache != NULL) && size - large_pad <= tcache_maxclass) { - tcache_dalloc_large(tsd, tcache, ptr, size - - large_pad, slow_path); + tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, + size - large_pad, slow_path); } else { - arena_dalloc_large(tsd, extent_node_arena_get( - &chunk->node), chunk, ptr); + arena_dalloc_large(tsdn, + extent_node_arena_get(&chunk->node), chunk, + ptr); } } } else - huge_dalloc(tsd, ptr, tcache); + huge_dalloc(tsdn, ptr); } JEMALLOC_ALWAYS_INLINE void -arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache) +arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, + bool slow_path) { arena_chunk_t *chunk; + assert(!tsdn_null(tsdn) || tcache == NULL); + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (likely(chunk != ptr)) { if (config_prof && opt_prof) { @@ -1417,34 +1477,36 @@ arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache) pageind) - large_pad; } } - assert(s2u(size) == s2u(arena_salloc(ptr, false))); + assert(s2u(size) == s2u(arena_salloc(tsdn, ptr, false))); if (likely(size <= SMALL_MAXCLASS)) { /* Small allocation. */ if (likely(tcache != NULL)) { szind_t binind = size2index(size); - tcache_dalloc_small(tsd, tcache, ptr, binind, - true); + tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, + binind, slow_path); } else { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - arena_dalloc_small(tsd, extent_node_arena_get( - &chunk->node), chunk, ptr, pageind); + arena_dalloc_small(tsdn, + extent_node_arena_get(&chunk->node), chunk, + ptr, pageind); } } else { assert(config_cache_oblivious || ((uintptr_t)ptr & PAGE_MASK) == 0); if (likely(tcache != NULL) && size <= tcache_maxclass) { - tcache_dalloc_large(tsd, tcache, ptr, size, - true); + tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, + size, slow_path); } else { - arena_dalloc_large(tsd, extent_node_arena_get( - &chunk->node), chunk, ptr); + arena_dalloc_large(tsdn, + extent_node_arena_get(&chunk->node), chunk, + ptr); } } } else - huge_dalloc(tsd, ptr, tcache); + huge_dalloc(tsdn, ptr); } # endif /* JEMALLOC_ARENA_INLINE_B */ #endif |
