diff options
Diffstat (limited to 'jemalloc/src/chunk.c')
| -rw-r--r-- | jemalloc/src/chunk.c | 39 |
1 files changed, 30 insertions, 9 deletions
diff --git a/jemalloc/src/chunk.c b/jemalloc/src/chunk.c index e6e3bcd..00bf50a 100644 --- a/jemalloc/src/chunk.c +++ b/jemalloc/src/chunk.c @@ -14,11 +14,15 @@ malloc_mutex_t chunks_mtx; chunk_stats_t stats_chunks; #endif +#ifdef JEMALLOC_IVSALLOC +rtree_t *chunks_rtree; +#endif + /* Various chunk-related settings. */ size_t chunksize; size_t chunksize_mask; /* (chunksize - 1). */ size_t chunk_npages; -size_t arena_chunk_header_npages; +size_t map_bias; size_t arena_maxclass; /* Max size class for arenas. */ /******************************************************************************/ @@ -30,7 +34,7 @@ size_t arena_maxclass; /* Max size class for arenas. */ * advantage of them if they are returned. */ void * -chunk_alloc(size_t size, bool *zero) +chunk_alloc(size_t size, bool base, bool *zero) { void *ret; @@ -63,10 +67,18 @@ chunk_alloc(size_t size, bool *zero) /* All strategies for allocation failed. */ ret = NULL; RETURN: +#ifdef JEMALLOC_IVSALLOC + if (base == false && ret != NULL) { + if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) { + chunk_dealloc(ret, size); + return (NULL); + } + } +#endif #if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF)) if (ret != NULL) { # ifdef JEMALLOC_PROF - bool udump; + bool gdump; # endif malloc_mutex_lock(&chunks_mtx); # ifdef JEMALLOC_STATS @@ -76,17 +88,17 @@ RETURN: if (stats_chunks.curchunks > stats_chunks.highchunks) { stats_chunks.highchunks = stats_chunks.curchunks; # ifdef JEMALLOC_PROF - udump = true; + gdump = true; # endif } # ifdef JEMALLOC_PROF else - udump = false; + gdump = false; # endif malloc_mutex_unlock(&chunks_mtx); # ifdef JEMALLOC_PROF - if (opt_prof && opt_prof_udump && udump) - prof_udump(); + if (opt_prof && opt_prof_gdump && gdump) + prof_gdump(); # endif } #endif @@ -104,6 +116,9 @@ chunk_dealloc(void *chunk, size_t size) assert(size != 0); assert((size & chunksize_mask) == 0); +#ifdef JEMALLOC_IVSALLOC + rtree_set(chunks_rtree, (uintptr_t)chunk, NULL); +#endif #if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF)) malloc_mutex_lock(&chunks_mtx); stats_chunks.curchunks -= (size / chunksize); @@ -126,21 +141,27 @@ chunk_boot(void) { /* Set variables according to the value of opt_lg_chunk. */ - chunksize = (1LU << opt_lg_chunk); + chunksize = (ZU(1) << opt_lg_chunk); assert(chunksize >= PAGE_SIZE); chunksize_mask = chunksize - 1; chunk_npages = (chunksize >> PAGE_SHIFT); +#ifdef JEMALLOC_IVSALLOC + chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - opt_lg_chunk); + if (chunks_rtree == NULL) + return (true); +#endif #if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF)) if (malloc_mutex_init(&chunks_mtx)) return (true); memset(&stats_chunks, 0, sizeof(chunk_stats_t)); #endif - #ifdef JEMALLOC_SWAP if (chunk_swap_boot()) return (true); #endif + if (chunk_mmap_boot()) + return (true); #ifdef JEMALLOC_DSS if (chunk_dss_boot()) return (true); |
