diff options
author | Jason Evans <jasone@canonware.com> | 2015-02-11 20:24:27 (GMT) |
---|---|---|
committer | Jason Evans <jasone@canonware.com> | 2015-02-12 08:15:56 (GMT) |
commit | cbf3a6d70371d2390b8b0e76814e04cc6088002c (patch) | |
tree | 2b2cfd2ec225e680a83a5d231bc11e44892e66b9 /include/jemalloc | |
parent | f30e261c5b85d2900224f91c6d426a23dce94fe9 (diff) | |
download | jemalloc-cbf3a6d70371d2390b8b0e76814e04cc6088002c.zip jemalloc-cbf3a6d70371d2390b8b0e76814e04cc6088002c.tar.gz jemalloc-cbf3a6d70371d2390b8b0e76814e04cc6088002c.tar.bz2 |
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
Diffstat (limited to 'include/jemalloc')
-rw-r--r-- | include/jemalloc/internal/arena.h | 60 | ||||
-rw-r--r-- | include/jemalloc/internal/atomic.h | 4 | ||||
-rw-r--r-- | include/jemalloc/internal/base.h | 2 | ||||
-rw-r--r-- | include/jemalloc/internal/chunk.h | 22 | ||||
-rw-r--r-- | include/jemalloc/internal/chunk_dss.h | 4 | ||||
-rw-r--r-- | include/jemalloc/internal/ctl.h | 5 | ||||
-rw-r--r-- | include/jemalloc/internal/extent.h | 25 | ||||
-rw-r--r-- | include/jemalloc/internal/huge.h | 4 | ||||
-rw-r--r-- | include/jemalloc/internal/jemalloc_internal.h.in | 28 | ||||
-rw-r--r-- | include/jemalloc/internal/jemalloc_internal_defs.h.in | 6 | ||||
-rw-r--r-- | include/jemalloc/internal/private_symbols.txt | 12 | ||||
-rw-r--r-- | include/jemalloc/internal/rtree.h | 23 | ||||
-rw-r--r-- | include/jemalloc/internal/stats.h | 15 |
13 files changed, 111 insertions, 99 deletions
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h index 5476899..2ae4609 100644 --- a/include/jemalloc/internal/arena.h +++ b/include/jemalloc/internal/arena.h @@ -151,8 +151,12 @@ typedef ql_head(arena_chunk_map_misc_t) arena_chunk_miscelms_t; /* Arena chunk header. */ struct arena_chunk_s { - /* Arena that owns the chunk. */ - arena_t *arena; + /* + * The arena that owns the chunk is node.arena. This field as a whole + * is used by chunks_rtree to support both ivsalloc() and core-based + * debugging. + */ + extent_node_t node; /* * Map of pages within chunk that keeps track of free/large/small. The @@ -313,6 +317,27 @@ struct arena_s { /* List of dirty runs this arena manages. */ arena_chunk_miscelms_t runs_dirty; + /* Extant huge allocations. */ + ql_head(extent_node_t) huge; + /* Synchronizes all huge allocation/update/deallocation. */ + malloc_mutex_t huge_mtx; + + /* + * Trees of chunks that were previously allocated (trees differ only in + * node ordering). These are used when allocating chunks, in an attempt + * to re-use address space. Depending on function, different tree + * orderings are needed, which is why there are two trees with the same + * contents. + */ + extent_tree_t chunks_szad_mmap; + extent_tree_t chunks_ad_mmap; + extent_tree_t chunks_szad_dss; + extent_tree_t chunks_ad_dss; + malloc_mutex_t chunks_mtx; + /* Cache of nodes that were allocated via base_alloc(). */ + ql_head(extent_node_t) node_cache; + malloc_mutex_t node_cache_mtx; + /* * User-configurable chunk allocation and deallocation functions. */ @@ -338,6 +363,8 @@ extern size_t arena_maxclass; /* Max size class for arenas. */ extern unsigned nlclasses; /* Number of large size classes. */ extern unsigned nhclasses; /* Number of huge size classes. */ +extent_node_t *arena_node_alloc(arena_t *arena); +void arena_node_dalloc(arena_t *arena, extent_node_t *node); void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment, bool *zero); void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize); @@ -453,8 +480,7 @@ void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, tcache_t *tcache); arena_t *arena_aalloc(const void *ptr); size_t arena_salloc(const void *ptr, bool demote); -void arena_dalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, - tcache_t *tcache); +void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache); void arena_sdalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, size_t size, tcache_t *tcache); #endif @@ -792,7 +818,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits) assert(binind != BININD_INVALID); assert(binind < NBINS); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena = chunk->arena; + arena = chunk->node.arena; pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; actual_mapbits = arena_mapbits_get(chunk, pageind); assert(mapbits == actual_mapbits); @@ -980,7 +1006,7 @@ arena_aalloc(const void *ptr) arena_chunk_t *chunk; chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - return (chunk->arena); + return (chunk->node.arena); } /* Return the size of the allocation pointed to by ptr. */ @@ -1024,11 +1050,18 @@ arena_salloc(const void *ptr, bool demote) } JEMALLOC_ALWAYS_INLINE void -arena_dalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, tcache_t *tcache) +arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache) { + arena_chunk_t *chunk; size_t pageind, mapbits; assert(ptr != NULL); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (unlikely(chunk == ptr)) { + huge_dalloc(tsd, ptr, tcache); + return; + } assert(CHUNK_ADDR2BASE(ptr) != ptr); pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; @@ -1040,8 +1073,10 @@ arena_dalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, tcache_t *tcache) index_t binind = arena_ptr_small_binind_get(ptr, mapbits); tcache_dalloc_small(tsd, tcache, ptr, binind); - } else - arena_dalloc_small(chunk->arena, chunk, ptr, pageind); + } else { + arena_dalloc_small(chunk->node.arena, chunk, ptr, + pageind); + } } else { size_t size = arena_mapbits_large_size_get(chunk, pageind); @@ -1050,7 +1085,7 @@ arena_dalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, tcache_t *tcache) if (likely(tcache != NULL) && size <= tcache_maxclass) tcache_dalloc_large(tsd, tcache, ptr, size); else - arena_dalloc_large(chunk->arena, chunk, ptr); + arena_dalloc_large(chunk->node.arena, chunk, ptr); } } @@ -1081,7 +1116,8 @@ arena_sdalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, size_t size, } else { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - arena_dalloc_small(chunk->arena, chunk, ptr, pageind); + arena_dalloc_small(chunk->node.arena, chunk, ptr, + pageind); } } else { assert(((uintptr_t)ptr & PAGE_MASK) == 0); @@ -1089,7 +1125,7 @@ arena_sdalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, size_t size, if (likely(tcache != NULL) && size <= tcache_maxclass) tcache_dalloc_large(tsd, tcache, ptr, size); else - arena_dalloc_large(chunk->arena, chunk, ptr); + arena_dalloc_large(chunk->node.arena, chunk, ptr); } } # endif /* JEMALLOC_ARENA_INLINE_B */ diff --git a/include/jemalloc/internal/atomic.h b/include/jemalloc/internal/atomic.h index af2c687..0d33065 100644 --- a/include/jemalloc/internal/atomic.h +++ b/include/jemalloc/internal/atomic.h @@ -52,7 +52,7 @@ void atomic_write_uint32(uint32_t *p, uint32_t x); void *atomic_add_p(void **p, void *x); void *atomic_sub_p(void **p, void *x); bool atomic_cas_p(void **p, void *c, void *s); -void atomic_write_p(void **p, void *x); +void atomic_write_p(void **p, const void *x); size_t atomic_add_z(size_t *p, size_t x); size_t atomic_sub_z(size_t *p, size_t x); bool atomic_cas_z(size_t *p, size_t c, size_t s); @@ -538,7 +538,7 @@ atomic_cas_p(void **p, void *c, void *s) } JEMALLOC_INLINE void -atomic_write_p(void **p, void *x) +atomic_write_p(void **p, const void *x) { #if (LG_SIZEOF_PTR == 3) diff --git a/include/jemalloc/internal/base.h b/include/jemalloc/internal/base.h index a0798ee..bec76b3 100644 --- a/include/jemalloc/internal/base.h +++ b/include/jemalloc/internal/base.h @@ -10,8 +10,6 @@ #ifdef JEMALLOC_H_EXTERNS void *base_alloc(size_t size); -extent_node_t *base_node_alloc(void); -void base_node_dalloc(extent_node_t *node); size_t base_allocated_get(void); bool base_boot(void); void base_prefork(void); diff --git a/include/jemalloc/internal/chunk.h b/include/jemalloc/internal/chunk.h index 62ac3e7..5e0fb14 100644 --- a/include/jemalloc/internal/chunk.h +++ b/include/jemalloc/internal/chunk.h @@ -30,24 +30,21 @@ extern size_t opt_lg_chunk; extern const char *opt_dss; -/* Protects stats_chunks; currently not used for any other purpose. */ -extern malloc_mutex_t chunks_mtx; -/* Chunk statistics. */ -extern chunk_stats_t stats_chunks; - extern rtree_t chunks_rtree; extern size_t chunksize; extern size_t chunksize_mask; /* (chunksize - 1). */ extern size_t chunk_npages; +bool chunk_register(const void *chunk, const extent_node_t *node); +void chunk_deregister(const void *chunk, const extent_node_t *node); void *chunk_alloc_base(size_t size); void *chunk_alloc_arena(chunk_alloc_t *chunk_alloc, chunk_dalloc_t *chunk_dalloc, unsigned arena_ind, void *new_addr, size_t size, size_t alignment, bool *zero); void *chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, unsigned arena_ind); -void chunk_unmap(void *chunk, size_t size); +void chunk_unmap(arena_t *arena, void *chunk, size_t size); bool chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind); bool chunk_boot(void); void chunk_prefork(void); @@ -58,6 +55,19 @@ void chunk_postfork_child(void); /******************************************************************************/ #ifdef JEMALLOC_H_INLINES +#ifndef JEMALLOC_ENABLE_INLINE +extent_node_t *chunk_lookup(const void *chunk); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_)) +JEMALLOC_INLINE extent_node_t * +chunk_lookup(const void *chunk) +{ + + return (rtree_get(&chunks_rtree, (uintptr_t)chunk)); +} +#endif + #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ diff --git a/include/jemalloc/internal/chunk_dss.h b/include/jemalloc/internal/chunk_dss.h index 0989647..87366a2 100644 --- a/include/jemalloc/internal/chunk_dss.h +++ b/include/jemalloc/internal/chunk_dss.h @@ -23,8 +23,8 @@ extern const char *dss_prec_names[]; dss_prec_t chunk_dss_prec_get(void); bool chunk_dss_prec_set(dss_prec_t dss_prec); -void *chunk_alloc_dss(void *new_addr, size_t size, size_t alignment, - bool *zero); +void *chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, + size_t alignment, bool *zero); bool chunk_in_dss(void *chunk); bool chunk_dss_boot(void); void chunk_dss_prefork(void); diff --git a/include/jemalloc/internal/ctl.h b/include/jemalloc/internal/ctl.h index 65617bc..ab9c986 100644 --- a/include/jemalloc/internal/ctl.h +++ b/include/jemalloc/internal/ctl.h @@ -54,11 +54,6 @@ struct ctl_stats_s { size_t active; size_t metadata; size_t mapped; - struct { - size_t current; /* stats_chunks.curchunks */ - uint64_t total; /* stats_chunks.nchunks */ - size_t high; /* stats_chunks.highchunks */ - } chunks; unsigned narenas; ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ }; diff --git a/include/jemalloc/internal/extent.h b/include/jemalloc/internal/extent.h index f45940c..fbcdcf9 100644 --- a/include/jemalloc/internal/extent.h +++ b/include/jemalloc/internal/extent.h @@ -9,21 +9,17 @@ typedef struct extent_node_s extent_node_t; /* Tree of extents. */ struct extent_node_s { - /* Linkage for the size/address-ordered tree. */ - rb_node(extent_node_t) link_szad; - - /* Linkage for the address-ordered tree. */ - rb_node(extent_node_t) link_ad; + /* Arena from which this extent came, if any. */ + arena_t *arena; /* Pointer to the extent that this tree node is responsible for. */ void *addr; - /* Total region size. */ + /* + * Total region size, or 0 if this node corresponds to an arena chunk. + */ size_t size; - /* Arena from which this extent came, if any. */ - arena_t *arena; - /* * 'prof_tctx' and 'zeroed' are never needed at the same time, so * overlay them in order to fit extent_node_t in one cache line. @@ -35,6 +31,17 @@ struct extent_node_s { /* True if zero-filled; used by chunk recycling code. */ bool zeroed; }; + + union { + /* Linkage for the size/address-ordered tree. */ + rb_node(extent_node_t) link_szad; + + /* Linkage for huge allocations and cached chunks nodes. */ + ql_elm(extent_node_t) link_ql; + }; + + /* Linkage for the address-ordered tree. */ + rb_node(extent_node_t) link_ad; }; typedef rb_tree(extent_node_t) extent_tree_t; diff --git a/include/jemalloc/internal/huge.h b/include/jemalloc/internal/huge.h index 231cc36..c478d16 100644 --- a/include/jemalloc/internal/huge.h +++ b/include/jemalloc/internal/huge.h @@ -27,10 +27,6 @@ arena_t *huge_aalloc(const void *ptr); size_t huge_salloc(const void *ptr); prof_tctx_t *huge_prof_tctx_get(const void *ptr); void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx); -bool huge_boot(void); -void huge_prefork(void); -void huge_postfork_parent(void); -void huge_postfork_child(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in index b8c994c..ab93aa5 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -119,13 +119,6 @@ static const bool config_xmalloc = false #endif ; -static const bool config_ivsalloc = -#ifdef JEMALLOC_IVSALLOC - true -#else - false -#endif - ; #ifdef JEMALLOC_C11ATOMICS #include <stdatomic.h> @@ -352,9 +345,9 @@ typedef unsigned index_t; #include "jemalloc/internal/arena.h" #include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/base.h" +#include "jemalloc/internal/rtree.h" #include "jemalloc/internal/chunk.h" #include "jemalloc/internal/huge.h" -#include "jemalloc/internal/rtree.h" #include "jemalloc/internal/tcache.h" #include "jemalloc/internal/hash.h" #include "jemalloc/internal/quarantine.h" @@ -378,9 +371,9 @@ typedef unsigned index_t; #include "jemalloc/internal/extent.h" #include "jemalloc/internal/arena.h" #include "jemalloc/internal/base.h" +#include "jemalloc/internal/rtree.h" #include "jemalloc/internal/chunk.h" #include "jemalloc/internal/huge.h" -#include "jemalloc/internal/rtree.h" #include "jemalloc/internal/tcache.h" #include "jemalloc/internal/hash.h" #include "jemalloc/internal/quarantine.h" @@ -457,9 +450,9 @@ void jemalloc_postfork_child(void); #include "jemalloc/internal/extent.h" #include "jemalloc/internal/arena.h" #include "jemalloc/internal/base.h" +#include "jemalloc/internal/rtree.h" #include "jemalloc/internal/chunk.h" #include "jemalloc/internal/huge.h" -#include "jemalloc/internal/rtree.h" #include "jemalloc/internal/tcache.h" #include "jemalloc/internal/hash.h" #include "jemalloc/internal/quarantine.h" @@ -483,6 +476,7 @@ void jemalloc_postfork_child(void); #include "jemalloc/internal/mb.h" #include "jemalloc/internal/extent.h" #include "jemalloc/internal/base.h" +#include "jemalloc/internal/rtree.h" #include "jemalloc/internal/chunk.h" #include "jemalloc/internal/huge.h" @@ -777,7 +771,6 @@ arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing, #endif #include "jemalloc/internal/bitmap.h" -#include "jemalloc/internal/rtree.h" /* * Include portions of arena.h interleaved with tcache.h in order to resolve * circular dependencies. @@ -966,10 +959,14 @@ ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) JEMALLOC_ALWAYS_INLINE size_t ivsalloc(const void *ptr, bool demote) { + extent_node_t *node; /* Return 0 if ptr is not within a chunk managed by jemalloc. */ - if (rtree_get(&chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0) + node = chunk_lookup(CHUNK_ADDR2BASE(ptr)); + if (node == NULL) return (0); + /* Only arena chunks should be looked up via interior pointers. */ + assert(node->addr == ptr || node->size == 0); return (isalloc(ptr, demote)); } @@ -999,7 +996,6 @@ p2rz(const void *ptr) JEMALLOC_ALWAYS_INLINE void idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata) { - arena_chunk_t *chunk; assert(ptr != NULL); if (config_stats && is_metadata) { @@ -1007,11 +1003,7 @@ idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata) config_prof)); } - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (likely(chunk != ptr)) - arena_dalloc(tsd, chunk, ptr, tcache); - else - huge_dalloc(tsd, ptr, tcache); + arena_dalloc(tsd, ptr, tcache); } JEMALLOC_ALWAYS_INLINE void diff --git a/include/jemalloc/internal/jemalloc_internal_defs.h.in b/include/jemalloc/internal/jemalloc_internal_defs.h.in index c8d7daf..0f0db8a 100644 --- a/include/jemalloc/internal/jemalloc_internal_defs.h.in +++ b/include/jemalloc/internal/jemalloc_internal_defs.h.in @@ -187,12 +187,6 @@ #undef JEMALLOC_INTERNAL_FFS /* - * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside - * within jemalloc-owned chunks before dereferencing them. - */ -#undef JEMALLOC_IVSALLOC - -/* * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. */ #undef JEMALLOC_ZONE diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt index cf42bea..d5601a6 100644 --- a/include/jemalloc/internal/private_symbols.txt +++ b/include/jemalloc/internal/private_symbols.txt @@ -60,6 +60,8 @@ arena_miscelm_to_pageind arena_miscelm_to_rpages arena_nbound arena_new +arena_node_alloc +arena_node_dalloc arena_palloc arena_postfork_child arena_postfork_parent @@ -103,8 +105,6 @@ atomic_sub_z base_alloc base_allocated_get base_boot -base_node_alloc -base_node_dalloc base_postfork_child base_postfork_parent base_prefork @@ -130,6 +130,7 @@ chunk_alloc_mmap chunk_boot chunk_dalloc_default chunk_dalloc_mmap +chunk_deregister chunk_dss_boot chunk_dss_postfork_child chunk_dss_postfork_parent @@ -137,12 +138,13 @@ chunk_dss_prec_get chunk_dss_prec_set chunk_dss_prefork chunk_in_dss +chunk_lookup chunk_npages chunk_postfork_child chunk_postfork_parent chunk_prefork +chunk_register chunk_unmap -chunks_mtx chunks_rtree chunksize chunksize_mask @@ -218,16 +220,12 @@ hash_x86_128 hash_x86_32 huge_aalloc huge_allocated -huge_boot huge_dalloc huge_dalloc_junk huge_malloc huge_ndalloc huge_nmalloc huge_palloc -huge_postfork_child -huge_postfork_parent -huge_prefork huge_prof_tctx_get huge_prof_tctx_set huge_ralloc diff --git a/include/jemalloc/internal/rtree.h b/include/jemalloc/internal/rtree.h index e86e17c..2eb726d 100644 --- a/include/jemalloc/internal/rtree.h +++ b/include/jemalloc/internal/rtree.h @@ -37,7 +37,7 @@ typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *); struct rtree_node_elm_s { union { rtree_node_elm_t *child; - void *val; + extent_node_t *val; }; }; @@ -110,13 +110,14 @@ bool rtree_node_valid(rtree_node_elm_t *node); rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm); rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level); -void *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm); -void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, void *val); +extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm); +void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, + const extent_node_t *val); rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level); rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level); -void *rtree_get(rtree_t *rtree, uintptr_t key); -bool rtree_set(rtree_t *rtree, uintptr_t key, void *val); +extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key); +bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_)) @@ -173,18 +174,18 @@ rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level) return (child); } -JEMALLOC_INLINE void * +JEMALLOC_INLINE extent_node_t * rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm) { - return (atomic_read_p(&elm->val)); + return (atomic_read_p((void **)&elm->val)); } JEMALLOC_INLINE void -rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, void *val) +rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val) { - atomic_write_p(&elm->val, val); + atomic_write_p((void **)&elm->val, val); } JEMALLOC_INLINE rtree_node_elm_t * @@ -210,7 +211,7 @@ rtree_subtree_read(rtree_t *rtree, unsigned level) return (subtree); } -JEMALLOC_INLINE void * +JEMALLOC_INLINE extent_node_t * rtree_get(rtree_t *rtree, uintptr_t key) { uintptr_t subkey; @@ -238,7 +239,7 @@ rtree_get(rtree_t *rtree, uintptr_t key) } JEMALLOC_INLINE bool -rtree_set(rtree_t *rtree, uintptr_t key, void *val) +rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val) { uintptr_t subkey; unsigned i, start_level; diff --git a/include/jemalloc/internal/stats.h b/include/jemalloc/internal/stats.h index 7cba77b..c91dba9 100644 --- a/include/jemalloc/internal/stats.h +++ b/include/jemalloc/internal/stats.h @@ -135,21 +135,6 @@ struct arena_stats_s { malloc_huge_stats_t *hstats; }; -struct chunk_stats_s { - /* Number of chunks that were allocated. */ - uint64_t nchunks; - - /* High-water mark for number of chunks allocated. */ - size_t highchunks; - - /* - * Current number of chunks allocated. This value isn't maintained for - * any other purpose, so keep track of it in order to be able to set - * highchunks. - */ - size_t curchunks; -}; - #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS |