From 9b7ac9a1a67308a26d09f0b87d2e876fbc61c86c Mon Sep 17 00:00:00 2001 From: daanx Date: Mon, 25 Nov 2024 16:58:02 -0800 Subject: [PATCH] clean up candidate search; add mi_collect_reduce --- ide/vs2022/mimalloc.vcxproj | 2 +- include/mimalloc.h | 1 + src/page-queue.c | 10 +++++++++- src/page.c | 23 ++++++++++++++------- src/segment.c | 40 ++++++++++++++++++++++++++++--------- 5 files changed, 58 insertions(+), 18 deletions(-) diff --git a/ide/vs2022/mimalloc.vcxproj b/ide/vs2022/mimalloc.vcxproj index 160f1436..dddab777 100644 --- a/ide/vs2022/mimalloc.vcxproj +++ b/ide/vs2022/mimalloc.vcxproj @@ -116,7 +116,7 @@ true Default ../../include - MI_DEBUG=4;MI_GUARDED=1;%(PreprocessorDefinitions); + MI_DEBUG=3;MI_GUARDED=0;%(PreprocessorDefinitions); CompileAsCpp false stdcpp20 diff --git a/include/mimalloc.h b/include/mimalloc.h index 83cbda12..5916228b 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -148,6 +148,7 @@ typedef void (mi_cdecl mi_error_fun)(int err, void* arg); mi_decl_export void mi_register_error(mi_error_fun* fun, void* arg); mi_decl_export void mi_collect(bool force) mi_attr_noexcept; +mi_decl_export void mi_collect_reduce(size_t target_thread_owned) mi_attr_noexcept; mi_decl_export int mi_version(void) mi_attr_noexcept; mi_decl_export void mi_stats_reset(void) mi_attr_noexcept; mi_decl_export void mi_stats_merge(void) mi_attr_noexcept; diff --git a/src/page-queue.c b/src/page-queue.c index 3034e15d..9796f3dc 100644 --- a/src/page-queue.c +++ b/src/page-queue.c @@ -259,6 +259,14 @@ static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_ heap->page_count++; } +static void mi_page_queue_move_to_front(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) { + mi_assert_internal(mi_page_heap(page) == heap); + mi_assert_internal(mi_page_queue_contains(queue, page)); + if (queue->first == page) return; + mi_page_queue_remove(queue, page); + mi_page_queue_push(heap, queue, page); + mi_assert_internal(queue->first == page); +} static void mi_page_queue_enqueue_from_ex(mi_page_queue_t* to, mi_page_queue_t* from, bool enqueue_at_end, mi_page_t* page) { mi_assert_internal(page != NULL); @@ -335,7 +343,7 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro static void mi_page_queue_enqueue_from_full(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) { // note: we could insert at the front to increase reuse, but it slows down certain benchmarks (like `alloc-test`) - mi_page_queue_enqueue_from_ex(to, from, true /* enqueue at the end of the `to` queue? */, page); + mi_page_queue_enqueue_from_ex(to, from, false /* enqueue at the end of the `to` queue? */, page); } // Only called from `mi_heap_absorb`. diff --git a/src/page.c b/src/page.c index 3cf91ba8..43ac7c4e 100644 --- a/src/page.c +++ b/src/page.c @@ -471,6 +471,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept { // how to check this efficiently though... // for now, we don't retire if it is the only page left of this size class. mi_page_queue_t* pq = mi_page_queue_of(page); + #if MI_RETIRE_CYCLES > 0 const size_t bsize = mi_page_block_size(page); if mi_likely( /* bsize < MI_MAX_RETIRE_SIZE && */ !mi_page_queue_is_special(pq)) { // not full or huge queue? if (pq->last==page && pq->first==page) { // the only page in the queue? @@ -486,7 +487,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept { return; // don't free after all } } - + #endif _mi_page_free(page, pq, false); } @@ -753,6 +754,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p size_t candidate_count = 0; // we reset this on the first candidate to limit the search mi_page_t* page_candidate = NULL; // a page with free space mi_page_t* page = pq->first; + while (page != NULL) { mi_page_t* next = page->next; // remember next @@ -764,7 +766,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p // collect freed blocks by us and other threads _mi_page_free_collect(page, false); -#if defined(MI_MAX_CANDIDATE_SEARCH) + #if MI_MAX_CANDIDATE_SEARCH > 1 // search up to N pages for a best candidate // is the local free list non-empty? @@ -783,7 +785,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p page_candidate = page; candidate_count = 0; } - else if (!mi_page_is_expandable(page) && page->used >= page_candidate->used) { + else if (/* !mi_page_is_expandable(page) && */ page->used >= page_candidate->used) { page_candidate = page; } // if we find a non-expandable candidate, or searched for N pages, return with the best candidate @@ -792,7 +794,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p break; } } -#else + #else // first-fit algorithm // If the page contains free blocks, we are done if (mi_page_immediate_available(page) || mi_page_is_expandable(page)) { @@ -803,7 +805,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p // queue so we don't visit long-lived pages too often. mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page)); mi_page_to_full(page, pq); -#endif + #endif page = next; } // for each page @@ -828,10 +830,14 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p } } else { - // mi_assert(pq->first == page); + // move the page to the front of the queue + mi_page_queue_move_to_front(heap, pq, page); page->retire_expire = 0; + // _mi_heap_collect_retired(heap, false); // update retire counts; note: increases rss on MemoryLoad bench so don't do this } mi_assert_internal(page == NULL || mi_page_immediate_available(page)); + + return page; } @@ -839,7 +845,9 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p // Find a page with free blocks of `size`. static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) { - mi_page_queue_t* pq = mi_page_queue(heap,size); + mi_page_queue_t* pq = mi_page_queue(heap, size); + + // check the first page: we even do this with candidate search or otherwise we re-search every time mi_page_t* page = pq->first; if (page != NULL) { #if (MI_SECURE>=3) // in secure mode, we extend half the time to increase randomness @@ -858,6 +866,7 @@ static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) { return page; // fast path } } + return mi_page_queue_find_free_ex(heap, pq, true); } diff --git a/src/segment.c b/src/segment.c index d2604436..16764da8 100644 --- a/src/segment.c +++ b/src/segment.c @@ -979,6 +979,13 @@ void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) { _mi_arena_field_cursor_done(¤t); } + +static bool segment_count_is_within_target(mi_segments_tld_t* tld, size_t* ptarget) { + const size_t target = (size_t)mi_option_get_clamp(mi_option_target_segments_per_thread, 0, 1024); + if (ptarget != NULL) { *ptarget = target; } + return (target == 0 || tld->count < target); +} + static long mi_segment_get_reclaim_tries(mi_segments_tld_t* tld) { // limit the tries to 10% (default) of the abandoned segments with at least 8 and at most 1024 tries. const size_t perc = (size_t)mi_option_get_clamp(mi_option_max_segment_reclaim, 0, 100); @@ -1001,7 +1008,7 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size, mi_segment_t* segment = NULL; mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap, tld->subproc, false /* non-blocking */, ¤t); - while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL)) + while (segment_count_is_within_target(tld,NULL) && (max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL)) { mi_assert(segment->subproc == heap->tld->segments.subproc); // cursor only visits segments in our sub-process segment->abandoned_visits++; @@ -1026,7 +1033,7 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size, result = mi_segment_reclaim(segment, heap, block_size, reclaimed, tld); break; } - else if (segment->abandoned_visits > 3 && is_suitable && !mi_option_is_enabled(mi_option_target_segments_per_thread)) { + else if (segment->abandoned_visits > 3 && is_suitable) { // always reclaim on 3rd visit to limit the abandoned segment count. mi_segment_reclaim(segment, heap, 0, NULL, tld); } @@ -1087,15 +1094,11 @@ static void mi_segment_force_abandon(mi_segment_t* segment, mi_segments_tld_t* t // try abandon segments. // this should be called from `reclaim_or_alloc` so we know all segments are (about) fully in use. -static void mi_segments_try_abandon(mi_heap_t* heap, mi_segments_tld_t* tld) { - const size_t target = (size_t)mi_option_get_clamp(mi_option_target_segments_per_thread,0,1024); - // we call this when we are about to add a fresh segment so we should be under our target segment count. - if (target == 0 || tld->count < target) return; - +static void mi_segments_try_abandon_to_target(mi_heap_t* heap, size_t target, mi_segments_tld_t* tld) { + if (target <= 1) return; const size_t min_target = (target > 4 ? (target*3)/4 : target); // 75% - // todo: we should maintain a list of segments per thread; for now, only consider segments from the heap full pages - for (int i = 0; i < 16 && tld->count >= min_target; i++) { + for (int i = 0; i < 64 && tld->count >= min_target; i++) { mi_page_t* page = heap->pages[MI_BIN_FULL].first; while (page != NULL && mi_page_is_huge(page)) { page = page->next; @@ -1109,6 +1112,25 @@ static void mi_segments_try_abandon(mi_heap_t* heap, mi_segments_tld_t* tld) { } } +// try abandon segments. +// this should be called from `reclaim_or_alloc` so we know all segments are (about) fully in use. +static void mi_segments_try_abandon(mi_heap_t* heap, mi_segments_tld_t* tld) { + // we call this when we are about to add a fresh segment so we should be under our target segment count. + size_t target = 0; + if (segment_count_is_within_target(tld, &target)) return; + mi_segments_try_abandon_to_target(heap, target, tld); +} + +void mi_collect_reduce(size_t target_size) mi_attr_noexcept { + mi_collect(true); + mi_heap_t* heap = mi_heap_get_default(); + mi_segments_tld_t* tld = &heap->tld->segments; + size_t target = target_size / MI_SEGMENT_SIZE; + if (target == 0) { + target = (size_t)mi_option_get_clamp(mi_option_target_segments_per_thread, 1, 1024); + } + mi_segments_try_abandon_to_target(heap, target, tld); +} /* ----------------------------------------------------------- Reclaim or allocate