merge from dev where huge objects are now part of page queues again

This commit is contained in:
Daan Leijen 2022-11-22 21:54:58 -08:00
commit ed82aa90ea
10 changed files with 104 additions and 30 deletions

View File

@ -89,6 +89,7 @@ bool _mi_os_reset(void* p, size_t size, mi_stats_t* stats);
// bool _mi_os_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
size_t _mi_os_good_alloc_size(size_t size);
bool _mi_os_has_overcommit(void);
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats);
void* _mi_os_alloc_aligned_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool* large, mi_stats_t* tld_stats);
void _mi_os_free_aligned(void* p, size_t size, size_t alignment, size_t align_offset, bool was_committed, mi_stats_t* tld_stats);
@ -114,7 +115,12 @@ void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t*
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segments_tld_t* tld);
void _mi_segment_thread_collect(mi_segments_tld_t* tld);
#if MI_HUGE_PAGE_ABANDON
void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
#else
void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
#endif
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size); // page start for any page
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);

View File

@ -71,6 +71,13 @@ terms of the MIT license. A copy of the license can be found in the file
#endif
// We used to abandon huge pages but to eagerly deallocate if freed from another thread,
// but that makes it not possible to visit them during a heap walk or include them in a
// `mi_heap_destroy`. We therefore instead reset/decommit the huge blocks if freed from
// another thread so most memory is available until it gets properly freed by the owning thread.
// #define MI_HUGE_PAGE_ABANDON 1
// ------------------------------------------------------
// Platform specific values
// ------------------------------------------------------

View File

@ -59,8 +59,11 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
uintptr_t adjust = alignment - (((uintptr_t)p + offset) & align_mask);
mi_assert_internal(adjust <= alignment);
void* aligned_p = (adjust == alignment ? p : (void*)((uintptr_t)p + adjust));
if (aligned_p != p) { mi_page_set_has_aligned(_mi_ptr_page(p), true); }
if (aligned_p != p) {
mi_page_set_has_aligned(_mi_ptr_page(p), true);
}
mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size);
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p));
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size);

View File

@ -347,6 +347,7 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
}
#endif
#if MI_HUGE_PAGE_ABANDON
#if (MI_STAT>0)
// maintain stats for huge objects
static void mi_stat_huge_free(const mi_page_t* page) {
@ -364,12 +365,13 @@ static void mi_stat_huge_free(const mi_page_t* page) {
MI_UNUSED(page);
}
#endif
#endif
// ------------------------------------------------------
// Free
// ------------------------------------------------------
// multi-threaded free (or free in huge block)
// multi-threaded free (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON)
static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block)
{
// The padding check may access the non-thread-owned page for the key values.
@ -379,12 +381,21 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
// huge page segments are always abandoned and can be freed immediately
mi_segment_t* segment = _mi_page_segment(page);
if (segment->kind==MI_SEGMENT_HUGE) {
if (segment->kind == MI_SEGMENT_HUGE) {
#if MI_HUGE_PAGE_ABANDON
// huge page segments are always abandoned and can be freed immediately
mi_stat_huge_free(page);
_mi_segment_huge_page_free(segment, page, block);
return;
#else
// huge pages are special as they occupy the entire segment
// as these are large we reset the memory occupied by the page so it is available to other threads
// (as the owning thread needs to actually free the memory later).
_mi_segment_huge_page_reset(segment, page, block);
#endif
}
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED // note: when tracking, cannot use mi_usable_size with multi-threading
memset(block, MI_DEBUG_FREED, mi_usable_size(block));
#endif
@ -466,7 +477,6 @@ mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* p
void mi_decl_noinline _mi_free_generic(const mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept {
//mi_page_t* const page = _mi_segment_page_of(segment, p);
mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p);
mi_stat_free(page, block); // stat_free may access the padding
mi_track_free(p);

View File

@ -1079,13 +1079,8 @@ bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats) {
bool _mi_os_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) {
MI_UNUSED(tld_stats);
mi_stats_t* stats = &_mi_stats_main;
if (mi_option_is_enabled(mi_option_reset_decommits)) {
return mi_os_commit_unreset(addr, size, is_zero, stats); // re-commit it (conservatively!)
}
else {
*is_zero = false;
return mi_os_resetx(addr, size, false, stats);
}
}
*/

View File

@ -229,8 +229,9 @@ static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) {
mi_assert_internal(mi_page_heap(page) == heap);
mi_assert_internal(!mi_page_queue_contains(queue, page));
#if MI_HUGE_PAGE_ABANDON
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
#endif
mi_assert_internal(page->xblock_size == queue->block_size ||
(page->xblock_size > MI_MEDIUM_OBJ_SIZE_MAX) ||
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));

View File

@ -112,7 +112,10 @@ bool _mi_page_is_valid(mi_page_t* page) {
mi_segment_t* segment = _mi_page_segment(page);
mi_assert_internal(!_mi_process_is_initialized || segment->thread_id==0 || segment->thread_id == mi_page_heap(page)->thread_id);
if (segment->kind != MI_SEGMENT_HUGE) {
#if MI_HUGE_PAGE_ABANDON
if (segment->kind != MI_SEGMENT_HUGE)
#endif
{
mi_page_queue_t* pq = mi_page_queue_of(page);
mi_assert_internal(mi_page_queue_contains(pq, page));
mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_in_full(page));
@ -245,7 +248,9 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
mi_assert_internal(mi_page_heap(page) == heap);
mi_assert_internal(mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE);
#if MI_HUGE_PAGE_ABANDON
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
#endif
mi_assert_internal(!page->is_reset);
// TODO: push on full queue immediately if it is full?
mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page));
@ -255,22 +260,25 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
// allocate a fresh page from a segment
static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size_t block_size, size_t page_alignment) {
mi_assert_internal(pq==NULL||mi_heap_contains_queue(heap, pq));
mi_assert_internal(pq==NULL || block_size > MI_MEDIUM_OBJ_SIZE_MAX || block_size == pq->block_size);
#if !MI_HUGE_PAGE_ABANDON
mi_assert_internal(pq != NULL);
mi_assert_internal(mi_heap_contains_queue(heap, pq));
mi_assert_internal(page_alignment > 0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || block_size == pq->block_size);
#endif
mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments, &heap->tld->os);
if (page == NULL) {
// this may be out-of-memory, or an abandoned page was reclaimed (and in our queue)
return NULL;
}
mi_assert_internal(pq==NULL || _mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
mi_assert_internal(page_alignment >0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || _mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
mi_assert_internal(pq!=NULL || page->xblock_size != 0);
mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size);
// a fresh page was found, initialize it
const size_t full_block_size = (pq == NULL ? mi_page_block_size(page) : block_size); // see also: mi_segment_huge_page_alloc
const size_t full_block_size = ((pq == NULL || mi_page_queue_is_huge(pq)) ? mi_page_block_size(page) : block_size); // see also: mi_segment_huge_page_alloc
mi_assert_internal(full_block_size >= block_size);
mi_page_init(heap, page, full_block_size, heap->tld);
mi_heap_stat_increase(heap, pages, 1);
if (pq!=NULL) mi_page_queue_push(heap, pq, page); // huge pages use pq==NULL
if (pq != NULL) { mi_page_queue_push(heap, pq, page); }
mi_assert_expensive(_mi_page_is_valid(page));
return page;
}
@ -431,7 +439,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
// how to check this efficiently though...
// for now, we don't retire if it is the only page left of this size class.
mi_page_queue_t* pq = mi_page_queue_of(page);
if mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_is_in_full(page)) {
if mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_queue_is_special(pq)) { // not too large && not full or huge queue?
if (pq->last==page && pq->first==page) { // the only page in the queue?
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
page->retire_expire = 1 + (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
@ -809,17 +817,23 @@ static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t
size_t block_size = _mi_os_good_alloc_size(size);
mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE || page_alignment > 0);
bool is_huge = (block_size > MI_LARGE_OBJ_SIZE_MAX || page_alignment > 0);
#if MI_HUGE_PAGE_ABANDON
mi_page_queue_t* pq = (is_huge ? NULL : mi_page_queue(heap, block_size));
#else
mi_page_queue_t* pq = mi_page_queue(heap, is_huge ? MI_HUGE_BLOCK_SIZE : block_size); // not block_size as that can be low if the page_alignment > 0
mi_assert_internal(!is_huge || mi_page_queue_is_huge(pq));
#endif
mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment);
if (page != NULL) {
mi_assert_internal(mi_page_immediate_available(page));
if (pq == NULL) {
// huge pages are directly abandoned
if (is_huge) {
mi_assert_internal(_mi_page_segment(page)->kind == MI_SEGMENT_HUGE);
mi_assert_internal(_mi_page_segment(page)->used==1);
#if MI_HUGE_PAGE_ABANDON
mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
mi_page_set_heap(page, NULL);
#endif
}
else {
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);

View File

@ -47,6 +47,7 @@ bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
bool _mi_os_decommit(void* p, size_t size, mi_stats_t* stats);
bool _mi_os_reset(void* p, size_t size, mi_stats_t* stats);
bool _mi_os_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
bool _mi_os_commit_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* stats);
// arena.c
mi_arena_id_t _mi_arena_id_none(void);
@ -481,11 +482,21 @@ void _mi_mem_collect(mi_os_tld_t* tld) {
-----------------------------------------------------------------------------*/
bool _mi_mem_reset(void* p, size_t size, mi_os_tld_t* tld) {
if (mi_option_is_enabled(mi_option_reset_decommits)) {
return _mi_os_decommit(p, size, tld->stats);
}
else {
return _mi_os_reset(p, size, tld->stats);
}
}
bool _mi_mem_unreset(void* p, size_t size, bool* is_zero, mi_os_tld_t* tld) {
if (mi_option_is_enabled(mi_option_reset_decommits)) {
return _mi_os_commit(p, size, is_zero, tld->stats);
}
else {
return _mi_os_unreset(p, size, is_zero, tld->stats);
}
}
bool _mi_mem_commit(void* p, size_t size, bool* is_zero, mi_os_tld_t* tld) {

View File

@ -388,7 +388,7 @@ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
// _mi_os_free(segment, mi_segment_size(segment), /*segment->memid,*/ tld->stats);
const size_t size = mi_segment_size(segment);
if (size != MI_SEGMENT_SIZE || segment->mem_align_offset != 0 || // only push regular segments on the cache
if (size != MI_SEGMENT_SIZE || segment->mem_align_offset != 0 || segment->kind == MI_SEGMENT_HUGE || // only push regular segments on the cache
!_mi_segment_cache_push(segment, size, segment->memid, &segment->commit_mask, &segment->decommit_mask, segment->mem_is_large, segment->mem_is_pinned, tld->os))
{
const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size);
@ -515,6 +515,11 @@ static bool mi_segment_ensure_committed(mi_segment_t* segment, uint8_t* p, size_
return mi_segment_commitx(segment,true,p,size,stats);
}
static void mi_segment_decommit(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) {
if (!segment->allow_decommit) return;
mi_segment_commitx(segment, false, p, size, stats);
}
static void mi_segment_perhaps_decommit(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) {
if (!segment->allow_decommit) return;
if (mi_option_get(mi_option_decommit_delay) == 0) {
@ -1523,18 +1528,21 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment,
if (segment == NULL || page==NULL) return NULL;
mi_assert_internal(segment->used==1);
mi_assert_internal(mi_page_block_size(page) >= size);
#if MI_HUGE_PAGE_ABANDON
segment->thread_id = 0; // huge segments are immediately abandoned
#endif
if (page_alignment > 0) {
size_t psize;
uint8_t* p = _mi_segment_page_start(segment, page, &psize);
uint8_t* aligned_p = (uint8_t*)_mi_align_up((uintptr_t)p, page_alignment);
mi_assert_internal(_mi_is_aligned(aligned_p, page_alignment));
mi_assert_internal(psize - (aligned_p - p) >= size);
if (!segment->mem_is_pinned && page->is_committed) {
if (!segment->allow_decommit) {
// decommit the part of the page that is unused; this can be quite large (close to MI_SEGMENT_SIZE)
uint8_t* decommit_start = p + sizeof(mi_block_t); // for the free list
ptrdiff_t decommit_size = aligned_p - decommit_start;
_mi_os_decommit(decommit_start, decommit_size, os_tld->stats);
mi_segment_decommit(segment, decommit_start, decommit_size, &_mi_stats_main);
}
}
// for huge pages we initialize the xblock_size as we may
@ -1545,6 +1553,7 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment,
return page;
}
#if MI_HUGE_PAGE_ABANDON
// free huge block from another thread
void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) {
// huge page segments are always abandoned and can be freed immediately by any thread
@ -1572,6 +1581,24 @@ void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block
#endif
}
#else
// reset memory of a huge block from another thread
void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) {
mi_assert_internal(segment->kind == MI_SEGMENT_HUGE);
mi_assert_internal(segment == _mi_page_segment(page));
mi_assert_internal(page->used == 1); // this is called just before the free
mi_assert_internal(page->free == NULL);
const size_t csize = mi_page_block_size(page) - sizeof(mi_block_t);
uint8_t* p = ( uint8_t*)block + sizeof(mi_block_t);
if (segment->allow_decommit) {
mi_segment_decommit(segment, p, csize, &_mi_stats_main);
}
else {
_mi_os_reset(p, csize, &_mi_stats_main);
}
}
#endif
/* -----------------------------------------------------------
Page allocation and free
----------------------------------------------------------- */

View File

@ -208,7 +208,7 @@ static bool test_visit(const mi_heap_t* heap, const mi_heap_area_t* area, void*
static void test_heap_walk(void) {
mi_heap_t* heap = mi_heap_new();
//mi_heap_malloc(heap, 2097152);
mi_heap_malloc(heap, 16*2097152);
mi_heap_malloc(heap, 2067152);
mi_heap_malloc(heap, 2097160);
mi_heap_malloc(heap, 24576);