summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Include/internal/mimalloc/mimalloc/internal.h2
-rw-r--r--Include/internal/mimalloc/mimalloc/types.h2
-rw-r--r--Objects/mimalloc/heap.c14
-rw-r--r--Objects/mimalloc/init.c8
-rw-r--r--Objects/mimalloc/page.c1
-rw-r--r--Objects/mimalloc/segment.c20
-rw-r--r--Python/pystate.c4
7 files changed, 35 insertions, 16 deletions
diff --git a/Include/internal/mimalloc/mimalloc/internal.h b/Include/internal/mimalloc/mimalloc/internal.h
index afd7d18..887bf26 100644
--- a/Include/internal/mimalloc/mimalloc/internal.h
+++ b/Include/internal/mimalloc/mimalloc/internal.h
@@ -155,7 +155,7 @@ size_t _mi_bin_size(uint8_t bin); // for stats
uint8_t _mi_bin(size_t size); // for stats
// "heap.c"
-void _mi_heap_init_ex(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id);
+void _mi_heap_init_ex(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool no_reclaim, uint8_t tag);
void _mi_heap_destroy_pages(mi_heap_t* heap);
void _mi_heap_collect_abandon(mi_heap_t* heap);
void _mi_heap_set_default_direct(mi_heap_t* heap);
diff --git a/Include/internal/mimalloc/mimalloc/types.h b/Include/internal/mimalloc/mimalloc/types.h
index ab41b1c..b8cae24 100644
--- a/Include/internal/mimalloc/mimalloc/types.h
+++ b/Include/internal/mimalloc/mimalloc/types.h
@@ -311,6 +311,7 @@ typedef struct mi_page_s {
uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
uint8_t is_committed : 1; // `true` if the page virtual memory is committed
uint8_t is_zero_init : 1; // `true` if the page was initially zero initialized
+ uint8_t tag : 4; // tag from the owning heap
// layout like this to optimize access in `mi_malloc` and `mi_free`
uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear`
@@ -551,6 +552,7 @@ struct mi_heap_s {
size_t page_retired_max; // largest retired index into the `pages` array.
mi_heap_t* next; // list of heaps per thread
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
+ uint8_t tag; // custom identifier for this heap
};
diff --git a/Objects/mimalloc/heap.c b/Objects/mimalloc/heap.c
index c50e3b0..6468999a 100644
--- a/Objects/mimalloc/heap.c
+++ b/Objects/mimalloc/heap.c
@@ -209,7 +209,7 @@ mi_heap_t* mi_heap_get_backing(void) {
return bheap;
}
-void _mi_heap_init_ex(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id)
+void _mi_heap_init_ex(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool no_reclaim, uint8_t tag)
{
_mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t));
heap->tld = tld;
@@ -224,17 +224,19 @@ void _mi_heap_init_ex(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id)
heap->cookie = _mi_heap_random_next(heap) | 1;
heap->keys[0] = _mi_heap_random_next(heap);
heap->keys[1] = _mi_heap_random_next(heap);
+ heap->no_reclaim = no_reclaim;
+ heap->tag = tag;
+ // push on the thread local heaps list
+ heap->next = heap->tld->heaps;
+ heap->tld->heaps = heap;
}
mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) {
mi_heap_t* bheap = mi_heap_get_backing();
mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode?
if (heap == NULL) return NULL;
- _mi_heap_init_ex(heap, bheap->tld, arena_id);
- heap->no_reclaim = true; // don't reclaim abandoned pages or otherwise destroy is unsafe
- // push on the thread local heaps list
- heap->next = heap->tld->heaps;
- heap->tld->heaps = heap;
+ // don't reclaim abandoned pages or otherwise destroy is unsafe
+ _mi_heap_init_ex(heap, bheap->tld, arena_id, true, 0);
return heap;
}
diff --git a/Objects/mimalloc/init.c b/Objects/mimalloc/init.c
index 0446021..5897f05 100644
--- a/Objects/mimalloc/init.c
+++ b/Objects/mimalloc/init.c
@@ -14,7 +14,7 @@ terms of the MIT license. A copy of the license can be found in the file
// Empty page used to initialize the small free pages array
const mi_page_t _mi_page_empty = {
- 0, false, false, false,
+ 0, false, false, false, 0,
0, // capacity
0, // reserved capacity
{ 0 }, // flags
@@ -121,7 +121,8 @@ mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
0, // page count
MI_BIN_FULL, 0, // page retired min/max
NULL, // next
- false
+ false,
+ 0
};
#define tld_empty_stats ((mi_stats_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,stats)))
@@ -298,7 +299,7 @@ static bool _mi_heap_init(void) {
if (td == NULL) return false;
_mi_tld_init(&td->tld, &td->heap);
- _mi_heap_init_ex(&td->heap, &td->tld, _mi_arena_id_none());
+ _mi_heap_init_ex(&td->heap, &td->tld, _mi_arena_id_none(), false, 0);
_mi_heap_set_default_direct(&td->heap);
}
return false;
@@ -311,7 +312,6 @@ void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap) {
tld->segments.abandoned = &_mi_abandoned_default;
tld->os.stats = &tld->stats;
tld->heap_backing = bheap;
- tld->heaps = bheap;
}
// Free the thread local default heap (called from `mi_thread_done`)
diff --git a/Objects/mimalloc/page.c b/Objects/mimalloc/page.c
index 4610cf2..8f0ce92 100644
--- a/Objects/mimalloc/page.c
+++ b/Objects/mimalloc/page.c
@@ -660,6 +660,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
mi_assert_internal(block_size > 0);
// set fields
mi_page_set_heap(page, heap);
+ page->tag = heap->tag;
page->xblock_size = (block_size < MI_HUGE_BLOCK_SIZE ? (uint32_t)block_size : MI_HUGE_BLOCK_SIZE); // initialize before _mi_segment_page_start
size_t page_size;
const void* page_start = _mi_segment_page_start(segment, page, &page_size);
diff --git a/Objects/mimalloc/segment.c b/Objects/mimalloc/segment.c
index 1040da0..d9b39b0 100644
--- a/Objects/mimalloc/segment.c
+++ b/Objects/mimalloc/segment.c
@@ -1299,6 +1299,18 @@ static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, s
return has_page;
}
+static mi_heap_t* mi_heap_by_tag(mi_heap_t* heap, uint8_t tag) {
+ if (heap->tag == tag) {
+ return heap;
+ }
+ for (mi_heap_t *curr = heap->tld->heaps; curr != NULL; curr = curr->next) {
+ if (curr->tag == tag) {
+ return curr;
+ }
+ }
+ return NULL;
+}
+
// Reclaim an abandoned segment; returns NULL if the segment was freed
// set `right_page_reclaimed` to `true` if it reclaimed a page of the right `block_size` that was not full.
static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, size_t requested_block_size, bool* right_page_reclaimed, mi_segments_tld_t* tld) {
@@ -1321,6 +1333,7 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
if (mi_slice_is_used(slice)) {
// in use: reclaim the page in our heap
mi_page_t* page = mi_slice_to_page(slice);
+ mi_heap_t* target_heap = mi_heap_by_tag(heap, page->tag);
mi_assert_internal(page->is_committed);
mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE);
mi_assert_internal(mi_page_heap(page) == NULL);
@@ -1328,7 +1341,7 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
_mi_stat_decrease(&tld->stats->pages_abandoned, 1);
segment->abandoned--;
// set the heap again and allow delayed free again
- mi_page_set_heap(page, heap);
+ mi_page_set_heap(page, target_heap);
_mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true); // override never (after heap is set)
_mi_page_free_collect(page, false); // ensure used count is up to date
if (mi_page_all_free(page)) {
@@ -1337,8 +1350,9 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
}
else {
// otherwise reclaim it into the heap
- _mi_page_reclaim(heap, page);
- if (requested_block_size == page->xblock_size && mi_page_has_any_available(page)) {
+ _mi_page_reclaim(target_heap, page);
+ if (requested_block_size == page->xblock_size && mi_page_has_any_available(page) &&
+ heap == target_heap) {
if (right_page_reclaimed != NULL) { *right_page_reclaimed = true; }
}
}
diff --git a/Python/pystate.c b/Python/pystate.c
index 5f515cf..21f16b7 100644
--- a/Python/pystate.c
+++ b/Python/pystate.c
@@ -2539,8 +2539,8 @@ tstate_mimalloc_bind(PyThreadState *tstate)
tld->segments.abandoned = &tstate->interp->mimalloc.abandoned_pool;
// Initialize each heap
- for (Py_ssize_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) {
- _mi_heap_init_ex(&mts->heaps[i], tld, _mi_arena_id_none());
+ for (uint8_t i = 0; i < _Py_MIMALLOC_HEAP_COUNT; i++) {
+ _mi_heap_init_ex(&mts->heaps[i], tld, _mi_arena_id_none(), false, i);
}
// By default, object allocations use _Py_MIMALLOC_HEAP_OBJECT.