fix stats for purging

This commit is contained in:
daanx 2023-04-05 11:11:03 -07:00
parent 96e9e6f570
commit cdefd5b69c
4 changed files with 57 additions and 10 deletions

View File

@ -100,6 +100,7 @@ bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
bool _mi_os_protect(void* addr, size_t size);
bool _mi_os_unprotect(void* addr, size_t size);
bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats);
bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats);
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, bool* is_zero, mi_stats_t* stats);
void* _mi_os_alloc_aligned_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool* large, bool* is_zero, mi_stats_t* tld_stats);
@ -114,7 +115,7 @@ void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats);
// arena.c
mi_arena_id_t _mi_arena_id_none(void);
void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset, size_t memid, bool all_committed, mi_stats_t* stats);
void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset, size_t memid, size_t committed, mi_stats_t* stats);
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
bool _mi_arena_memid_is_suitable(size_t arena_memid, mi_arena_id_t request_arena_id);

View File

@ -384,11 +384,24 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks,
mi_assert_internal(arena->allow_decommit);
const size_t size = mi_arena_block_size(blocks);
void* const p = arena->start + mi_arena_block_size(mi_bitmap_index_bit(bitmap_idx));
const bool decommitted = _mi_os_purge(p, size, stats);
bool needs_recommit;
if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) {
// all blocks are committed, we can purge freely
needs_recommit = _mi_os_purge(p, size, stats);
}
else {
// some blocks are not committed -- this can happen when a partially committed block is freed
// in `_mi_arena_free` and it is conservatively marked as uncommitted but still scheduled for a purge
// we need to ensure we do not try to reset, and also not count the decommit stats (as it was already adjusted)
mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits));
needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, stats);
_mi_stat_increase(&stats->committed, size);
}
// clear the purged blocks
_mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx);
// update committed bitmap
if (decommitted) {
if (needs_recommit) {
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
}
}
@ -525,13 +538,20 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats )
Arena free
----------------------------------------------------------- */
void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset, size_t memid, bool all_committed, mi_stats_t* stats) {
void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset, size_t memid, size_t committed_size, mi_stats_t* stats) {
mi_assert_internal(size > 0 && stats != NULL);
mi_assert_internal(committed_size <= size);
if (p==NULL) return;
if (size==0) return;
const bool all_committed = (committed_size == size);
if (memid == MI_MEMID_OS) {
// was a direct OS allocation, pass through
if (!all_committed && committed_size > 0) {
// if partially committed, adjust the committed stats
_mi_stat_decrease(&stats->committed, committed_size);
}
_mi_os_free_aligned(p, size, alignment, align_offset, all_committed, stats);
}
else {
@ -566,10 +586,19 @@ void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset,
else {
mi_assert_internal(arena->blocks_committed != NULL);
mi_assert_internal(arena->blocks_purge != NULL);
if (!all_committed) {
// assume the entire range as no longer committed
// mark the entire range as no longer committed (so we recommit the full range when re-using)
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
mi_track_mem_noaccess(p,size);
if (committed_size > 0) {
// if partially committed, adjust the committed stats (is it will be recommitted when re-using)
// in the delayed purge, we now need to not count a decommit if the range is not marked as committed.
_mi_stat_decrease(&stats->committed, committed_size);
}
// note: if not all committed, it may be that the purge will reset/decommit the entire range
// that contains already decommitted parts. Since purge consistently uses reset or decommit that
// works (as we should never reset decommitted parts).
}
// (delay) purge the entire range
mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats);

View File

@ -438,7 +438,7 @@ bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
// either resets or decommits memory, returns true if the memory needs
// to be recommitted if it is to be re-used later on.
bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats)
bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats)
{
if (!mi_option_is_enabled(mi_option_allow_purge)) return false;
_mi_stat_counter_increase(&stats->purge_calls, 1);
@ -452,11 +452,18 @@ bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats)
return needs_recommit;
}
else {
_mi_os_reset(p, size, stats);
if (allow_reset) { // this can sometimes be not allowed if the range is not fully committed
_mi_os_reset(p, size, stats);
}
return false; // not decommitted
}
}
// either resets or decommits memory, returns true if the memory needs
// to be recommitted if it is to be re-used later on.
bool _mi_os_purge(void* p, size_t size, mi_stats_t * stats) {
return _mi_os_purge_ex(p, size, true, stats);
}
// Protect a region in memory to be not accessible.

View File

@ -373,10 +373,14 @@ static void mi_reset_delayed(mi_segments_tld_t* tld) {
Segment size calculations
----------------------------------------------------------- */
static size_t mi_segment_raw_page_size(const mi_segment_t* segment) {
return (segment->page_kind == MI_PAGE_HUGE ? segment->segment_size : (size_t)1 << segment->page_shift);
}
// Raw start of the page available memory; can be used on uninitialized pages (only `segment_idx` must be set)
// The raw start is not taking aligned block allocation into consideration.
static uint8_t* mi_segment_raw_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size) {
size_t psize = (segment->page_kind == MI_PAGE_HUGE ? segment->segment_size : (size_t)1 << segment->page_shift);
size_t psize = mi_segment_raw_page_size(segment);
uint8_t* p = (uint8_t*)segment + page->segment_idx * psize;
if (page->segment_idx == 0) {
@ -475,13 +479,19 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se
bool any_reset = false;
bool fully_committed = true;
size_t committed = 0;
const size_t page_size = mi_segment_raw_page_size(segment);
for (size_t i = 0; i < segment->capacity; i++) {
mi_page_t* page = &segment->pages[i];
if (page->is_committed) { committed += page_size; }
if (!page->is_committed) { fully_committed = false; }
if (page->is_reset) { any_reset = true; }
}
// TODO: for now, pages always reset but we can purge instead allowing for pages to be decommitted.
MI_UNUSED(any_reset);
MI_UNUSED(fully_committed);
mi_assert_internal((fully_committed && committed == segment_size) || (!fully_committed && committed < segment_size));
// TODO: for now, pages always reset but we can purge instead allowing for pages to be decommitted.
/*
if (any_reset && mi_option_is_enabled(mi_option_reset_decommits)) {
fully_committed = false;
@ -489,7 +499,7 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se
*/
_mi_abandoned_await_readers(); // prevent ABA issue if concurrent readers try to access our memory (that might be purged)
_mi_arena_free(segment, segment_size, segment->mem_alignment, segment->mem_align_offset, segment->memid, fully_committed, tld->stats);
_mi_arena_free(segment, segment_size, segment->mem_alignment, segment->mem_align_offset, segment->memid, committed, tld->stats);
}
// called by threads that are terminating to free cached segments