summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2017-04-17 04:51:26 (GMT)
committerJason Evans <jasone@canonware.com>2017-04-17 21:47:45 (GMT)
commit76b35f4b2fdcc6eeb0ee7ecfbeaa05ef3fa2753e (patch)
tree78dfbab1cda2c54562ec6edce95d0f37a10d59ab /src
parent69aa5528091db805accc32af8d350f32b91bfd1a (diff)
downloadjemalloc-76b35f4b2fdcc6eeb0ee7ecfbeaa05ef3fa2753e.zip
jemalloc-76b35f4b2fdcc6eeb0ee7ecfbeaa05ef3fa2753e.tar.gz
jemalloc-76b35f4b2fdcc6eeb0ee7ecfbeaa05ef3fa2753e.tar.bz2
Track extent structure serial number (esn) in extent_t.
This enables stable sorting of extent_t structures.
Diffstat (limited to 'src')
-rw-r--r--src/base.c71
-rw-r--r--src/extent.c3
2 files changed, 44 insertions, 30 deletions
diff --git a/src/base.c b/src/base.c
index 515d336..caec955 100644
--- a/src/base.c
+++ b/src/base.c
@@ -88,8 +88,7 @@ base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
sn = *extent_sn_next;
(*extent_sn_next)++;
- extent_init(extent, NULL, addr, size, false, NSIZES, sn,
- extent_state_active, true, true);
+ extent_binit(extent, addr, size, sn);
}
static void *
@@ -103,23 +102,22 @@ base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
*gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
alignment) - (uintptr_t)extent_addr_get(extent);
ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
- assert(extent_size_get(extent) >= *gap_size + size);
- extent_init(extent, NULL, (void *)((uintptr_t)extent_addr_get(extent) +
- *gap_size + size), extent_size_get(extent) - *gap_size - size,
- false, NSIZES, extent_sn_get(extent), extent_state_active, true,
- true);
+ assert(extent_bsize_get(extent) >= *gap_size + size);
+ extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
+ *gap_size + size), extent_bsize_get(extent) - *gap_size - size,
+ extent_sn_get(extent));
return ret;
}
static void
base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent,
size_t gap_size, void *addr, size_t size) {
- if (extent_size_get(extent) > 0) {
+ if (extent_bsize_get(extent) > 0) {
/*
* Compute the index for the largest size class that does not
* exceed extent's size.
*/
- szind_t index_floor = size2index(extent_size_get(extent) + 1) -
+ szind_t index_floor = size2index(extent_bsize_get(extent) + 1) -
1;
extent_heap_insert(&base->avail[index_floor], extent);
}
@@ -286,28 +284,16 @@ base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
return old_extent_hooks;
}
-/*
- * base_alloc() returns zeroed memory, which is always demand-zeroed for the
- * auto arenas, in order to make multi-page sparse data structures such as radix
- * tree nodes efficient with respect to physical memory usage. Upon success a
- * pointer to at least size bytes with specified alignment is returned. Note
- * that size is rounded up to the nearest multiple of alignment to avoid false
- * sharing.
- */
-void *
-base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
- void *ret;
- size_t usize, asize;
- szind_t i;
- extent_t *extent;
-
+static void *
+base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
+ size_t *esn) {
alignment = QUANTUM_CEILING(alignment);
- usize = ALIGNMENT_CEILING(size, alignment);
- asize = usize + alignment - QUANTUM;
+ size_t usize = ALIGNMENT_CEILING(size, alignment);
+ size_t asize = usize + alignment - QUANTUM;
- extent = NULL;
+ extent_t *extent = NULL;
malloc_mutex_lock(tsdn, &base->mtx);
- for (i = size2index(asize); i < NSIZES; i++) {
+ for (szind_t i = size2index(asize); i < NSIZES; i++) {
extent = extent_heap_remove_first(&base->avail[i]);
if (extent != NULL) {
/* Use existing space. */
@@ -318,17 +304,46 @@ base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
/* Try to allocate more space. */
extent = base_extent_alloc(tsdn, base, usize, alignment);
}
+ void *ret;
if (extent == NULL) {
ret = NULL;
goto label_return;
}
ret = base_extent_bump_alloc(tsdn, base, extent, usize, alignment);
+ if (esn != NULL) {
+ *esn = extent_sn_get(extent);
+ }
label_return:
malloc_mutex_unlock(tsdn, &base->mtx);
return ret;
}
+/*
+ * base_alloc() returns zeroed memory, which is always demand-zeroed for the
+ * auto arenas, in order to make multi-page sparse data structures such as radix
+ * tree nodes efficient with respect to physical memory usage. Upon success a
+ * pointer to at least size bytes with specified alignment is returned. Note
+ * that size is rounded up to the nearest multiple of alignment to avoid false
+ * sharing.
+ */
+void *
+base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
+ return base_alloc_impl(tsdn, base, size, alignment, NULL);
+}
+
+extent_t *
+base_alloc_extent(tsdn_t *tsdn, base_t *base) {
+ size_t esn;
+ extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
+ CACHELINE, &esn);
+ if (extent == NULL) {
+ return NULL;
+ }
+ extent_esn_set(extent, esn);
+ return extent;
+}
+
void
base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
size_t *mapped) {
diff --git a/src/extent.c b/src/extent.c
index 2344e9c..c999ae6 100644
--- a/src/extent.c
+++ b/src/extent.c
@@ -98,8 +98,7 @@ extent_alloc(tsdn_t *tsdn, arena_t *arena) {
extent = extent_list_last(&arena->extent_freelist);
if (extent == NULL) {
malloc_mutex_unlock(tsdn, &arena->extent_freelist_mtx);
- return base_alloc(tsdn, arena->base, sizeof(extent_t),
- CACHELINE);
+ return base_alloc_extent(tsdn, arena->base);
}
extent_list_remove(&arena->extent_freelist, extent);
malloc_mutex_unlock(tsdn, &arena->extent_freelist_mtx);