diff options
author | Sam Gross <colesbury@gmail.com> | 2024-03-06 14:42:11 (GMT) |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-03-06 14:42:11 (GMT) |
commit | c012c8ab7bb72a733bd98be5df32e262b9045f1a (patch) | |
tree | c3f39b0baae9a5b28816e768d788f0ec8d60ee0c /Objects/mimalloc/segment.c | |
parent | 02ee475ee3ce9468d44758df2cd79df9f0926303 (diff) | |
download | cpython-c012c8ab7bb72a733bd98be5df32e262b9045f1a.zip cpython-c012c8ab7bb72a733bd98be5df32e262b9045f1a.tar.gz cpython-c012c8ab7bb72a733bd98be5df32e262b9045f1a.tar.bz2 |
gh-115103: Delay reuse of mimalloc pages that store PyObjects (#115435)
This implements the delayed reuse of mimalloc pages that contain Python
objects in the free-threaded build.
Allocations of the same size class are grouped in data structures called
pages. These are different from operating system pages. For thread-safety, we
want to ensure that memory used to store PyObjects remains valid as long as
there may be concurrent lock-free readers; we want to delay using it for
other size classes, in other heaps, or returning it to the operating system.
When a mimalloc page becomes empty, instead of immediately freeing it, we tag
it with a QSBR goal and insert it into a per-thread state linked list of
pages to be freed. When mimalloc needs a fresh page, we process the queue and
free any still empty pages that are now deemed safe to be freed. Pages
waiting to be freed are still available for allocations of the same size
class and allocating from a page prevent it from being freed. There is
additional logic to handle abandoned pages when threads exit.
Diffstat (limited to 'Objects/mimalloc/segment.c')
-rw-r--r-- | Objects/mimalloc/segment.c | 16 |
1 files changed, 13 insertions, 3 deletions
diff --git a/Objects/mimalloc/segment.c b/Objects/mimalloc/segment.c index 584233b..08b1564 100644 --- a/Objects/mimalloc/segment.c +++ b/Objects/mimalloc/segment.c @@ -982,6 +982,10 @@ static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld mi_assert_internal(mi_page_all_free(page)); mi_segment_t* segment = _mi_ptr_segment(page); mi_assert_internal(segment->used > 0); +#ifdef Py_GIL_DISABLED + mi_assert_internal(page->qsbr_goal == 0); + mi_assert_internal(page->qsbr_node.next == NULL); +#endif size_t inuse = page->capacity * mi_page_block_size(page); _mi_stat_decrease(&tld->stats->page_committed, inuse); @@ -1270,10 +1274,13 @@ static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, s // ensure used count is up to date and collect potential concurrent frees mi_page_t* const page = mi_slice_to_page(slice); _mi_page_free_collect(page, false); - if (mi_page_all_free(page)) { + if (mi_page_all_free(page) && _PyMem_mi_page_is_safe_to_free(page)) { // if this page is all free now, free it without adding to any queues (yet) mi_assert_internal(page->next == NULL && page->prev==NULL); _mi_stat_decrease(&tld->stats->pages_abandoned, 1); +#ifdef Py_GIL_DISABLED + page->qsbr_goal = 0; +#endif segment->abandoned--; slice = mi_segment_page_clear(page, tld); // re-assign slice due to coalesce! mi_assert_internal(!mi_slice_is_used(slice)); @@ -1344,15 +1351,18 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, mi_page_set_heap(page, target_heap); _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true); // override never (after heap is set) _mi_page_free_collect(page, false); // ensure used count is up to date - if (mi_page_all_free(page)) { + if (mi_page_all_free(page) && _PyMem_mi_page_is_safe_to_free(page)) { // if everything free by now, free the page +#ifdef Py_GIL_DISABLED + page->qsbr_goal = 0; +#endif slice = mi_segment_page_clear(page, tld); // set slice again due to coalesceing } else { // otherwise reclaim it into the heap _mi_page_reclaim(target_heap, page); if (requested_block_size == page->xblock_size && mi_page_has_any_available(page) && - heap == target_heap) { + requested_block_size <= MI_MEDIUM_OBJ_SIZE_MAX && heap == target_heap) { if (right_page_reclaimed != NULL) { *right_page_reclaimed = true; } } } |