mirror of
https://github.com/microsoft/mimalloc.git
synced 2024-12-27 13:33:18 +08:00
merge from dev; add retired collect on abandon
This commit is contained in:
commit
f820fae858
@ -83,10 +83,14 @@ mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_wsize, mi_segmen
|
||||
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld);
|
||||
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
|
||||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t block_size, size_t* page_size, size_t* pre_size); // page start for any page
|
||||
void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
|
||||
|
||||
void _mi_segment_thread_collect(mi_segments_tld_t* tld);
|
||||
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
|
||||
void _mi_abandoned_await_readers(void);
|
||||
|
||||
|
||||
|
||||
// "page.c"
|
||||
void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc;
|
||||
|
||||
|
29
src/alloc.c
29
src/alloc.c
@ -176,33 +176,6 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block
|
||||
// Free
|
||||
// ------------------------------------------------------
|
||||
|
||||
// free huge block from another thread
|
||||
static mi_decl_noinline void mi_free_huge_block_mt(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) {
|
||||
// huge page segments are always abandoned and can be freed immediately
|
||||
mi_assert_internal(segment->page_kind==MI_PAGE_HUGE);
|
||||
mi_assert_internal(segment == _mi_page_segment(page));
|
||||
mi_assert_internal(mi_atomic_read_relaxed(&segment->thread_id)==0);
|
||||
|
||||
// claim it and free
|
||||
mi_heap_t* heap = mi_get_default_heap();
|
||||
// paranoia: if this it the last reference, the cas should always succeed
|
||||
if (mi_atomic_cas_strong(&segment->thread_id, heap->thread_id, 0)) {
|
||||
mi_block_set_next(page, block, page->free);
|
||||
page->free = block;
|
||||
page->used--;
|
||||
page->is_zero = false;
|
||||
mi_assert(page->used == 0);
|
||||
mi_tld_t* tld = heap->tld;
|
||||
const size_t bsize = mi_page_block_size(page);
|
||||
if (bsize > MI_HUGE_OBJ_SIZE_MAX) {
|
||||
_mi_stat_decrease(&tld->stats.giant, bsize);
|
||||
}
|
||||
else {
|
||||
_mi_stat_decrease(&tld->stats.huge, bsize);
|
||||
}
|
||||
_mi_segment_page_free(page, true, &tld->segments);
|
||||
}
|
||||
}
|
||||
|
||||
// multi-threaded free
|
||||
static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block)
|
||||
@ -210,7 +183,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
|
||||
// huge page segments are always abandoned and can be freed immediately
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
if (segment->page_kind==MI_PAGE_HUGE) {
|
||||
mi_free_huge_block_mt(segment, page, block);
|
||||
_mi_segment_huge_page_free(segment, page, block);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -138,6 +138,9 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
|
||||
// (if abandoning, after this there are no more thread-delayed references into the pages.)
|
||||
_mi_heap_delayed_free(heap);
|
||||
|
||||
// collect retired pages
|
||||
_mi_heap_collect_retired(heap, collect >= MI_FORCE);
|
||||
|
||||
// collect all pages owned by this thread
|
||||
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
|
||||
mi_assert_internal( collect != MI_ABANDON || mi_atomic_read_ptr(mi_block_t,&heap->thread_delayed_free) == NULL );
|
||||
|
@ -456,7 +456,6 @@ static void mi_segments_track_size(long segment_size, mi_segments_tld_t* tld) {
|
||||
if (tld->current_size > tld->peak_size) tld->peak_size = tld->current_size;
|
||||
}
|
||||
|
||||
|
||||
static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_segments_tld_t* tld) {
|
||||
segment->thread_id = 0;
|
||||
mi_segments_track_size(-((long)segment_size),tld);
|
||||
@ -1269,11 +1268,41 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, mi_segments_tld_t* tld
|
||||
if (segment == NULL) return NULL;
|
||||
mi_assert_internal(mi_segment_page_size(segment) - segment->segment_info_size - (2*(MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= size);
|
||||
segment->thread_id = 0; // huge pages are immediately abandoned
|
||||
mi_segments_track_size(-(long)segment->segment_size, tld);
|
||||
mi_page_t* page = mi_segment_find_free(segment, tld);
|
||||
mi_assert_internal(page != NULL);
|
||||
return page;
|
||||
}
|
||||
|
||||
// free huge block from another thread
|
||||
void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) {
|
||||
// huge page segments are always abandoned and can be freed immediately by any thread
|
||||
mi_assert_internal(segment->page_kind==MI_PAGE_HUGE);
|
||||
mi_assert_internal(segment == _mi_page_segment(page));
|
||||
mi_assert_internal(mi_atomic_read_relaxed(&segment->thread_id)==0);
|
||||
|
||||
// claim it and free
|
||||
mi_heap_t* heap = mi_get_default_heap();
|
||||
// paranoia: if this it the last reference, the cas should always succeed
|
||||
if (mi_atomic_cas_strong(&segment->thread_id, heap->thread_id, 0)) {
|
||||
mi_block_set_next(page, block, page->free);
|
||||
page->free = block;
|
||||
page->used--;
|
||||
page->is_zero = false;
|
||||
mi_assert(page->used == 0);
|
||||
mi_segments_tld_t* tld = &heap->tld->segments;
|
||||
const size_t bsize = mi_page_block_size(page);
|
||||
if (bsize > MI_HUGE_OBJ_SIZE_MAX) {
|
||||
_mi_stat_decrease(&tld->stats->giant, bsize);
|
||||
}
|
||||
else {
|
||||
_mi_stat_decrease(&tld->stats->huge, bsize);
|
||||
}
|
||||
mi_segments_track_size((long)segment->segment_size, tld);
|
||||
_mi_segment_page_free(page, true, tld);
|
||||
}
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Page allocation
|
||||
----------------------------------------------------------- */
|
||||
|
Loading…
x
Reference in New Issue
Block a user