allow per page purging instead of reset

This commit is contained in:
daanx 2023-04-08 17:37:58 -07:00
parent 6bd591d675
commit eb62caeda0
7 changed files with 111 additions and 145 deletions

View File

@ -327,8 +327,8 @@ typedef enum mi_option_e {
mi_option_reserve_huge_os_pages_at, // reserve huge OS pages at a specific NUMA node
mi_option_reserve_os_memory, // reserve specified amount of OS memory at startup
mi_option_deprecated_segment_cache,
mi_option_page_reset,
mi_option_abandoned_page_reset,
mi_option_deprecated_page_reset,
mi_option_abandoned_page_purge,
mi_option_deprecated_segment_reset,
mi_option_eager_commit_delay,
mi_option_purge_delay,
@ -340,7 +340,7 @@ typedef enum mi_option_e {
mi_option_max_segment_reclaim,
mi_option_destroy_on_exit,
mi_option_arena_reserve,
mi_option_arena_purge_delay,
mi_option_arena_purge_mult,
mi_option_allow_purge,
mi_option_purge_extend_delay,
_mi_option_last,
@ -348,7 +348,8 @@ typedef enum mi_option_e {
mi_option_large_os_pages = mi_option_allow_large_os_pages,
mi_option_eager_region_commit = mi_option_arena_eager_commit,
mi_option_reset_decommits = mi_option_purge_decommits,
mi_option_reset_delay = mi_option_purge_delay
mi_option_reset_delay = mi_option_purge_delay,
mi_option_abandoned_page_reset = mi_option_abandoned_page_purge
} mi_option_t;

View File

@ -284,7 +284,6 @@ typedef struct mi_page_s {
// "owned" by the segment
uint8_t segment_idx; // index in the segment `pages` array, `page == &segment->pages[page->segment_idx]`
uint8_t segment_in_use:1; // `true` if the segment allocated this page
uint8_t is_reset:1; // `true` if the page memory was reset
uint8_t is_committed:1; // `true` if the page virtual memory is committed
uint8_t is_zero_init:1; // `true` if the page was zero initialized
@ -327,10 +326,14 @@ typedef struct mi_segment_s {
// memory fields
size_t memid; // id for the os-level memory manager
bool mem_is_pinned; // `true` if we cannot decommit/reset/protect in this memory (i.e. when allocated using large OS pages)
bool mem_is_large; // `true` if the memory is in OS large or huge pages. (`is_pinned` will be true)
bool mem_is_committed; // `true` if the whole segment is eagerly committed
size_t mem_alignment; // page alignment for huge pages (only used for alignment > MI_ALIGNMENT_MAX)
size_t mem_align_offset; // offset for huge page alignment (only used for alignment > MI_ALIGNMENT_MAX)
bool allow_decommit;
bool allow_purge;
// segment fields
_Atomic(struct mi_segment_s*) abandoned_next;
struct mi_segment_s* next; // must be the first segment field after abandoned_next -- see `segment.c:segment_init`
@ -553,7 +556,7 @@ typedef struct mi_os_tld_s {
typedef struct mi_segments_tld_s {
mi_segment_queue_t small_free; // queue of segments with free small pages
mi_segment_queue_t medium_free; // queue of segments with free medium pages
mi_page_queue_t pages_reset; // queue of freed pages that can be reset
mi_page_queue_t pages_purge; // queue of freed pages that are delay purged
size_t count; // current number of segments;
size_t peak_count; // peak number of segments
size_t current_size; // current size of all segments

View File

@ -411,7 +411,7 @@ static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t
mi_assert_internal(arena->blocks_purge != NULL);
if (!mi_option_is_enabled(mi_option_allow_purge)) return;
const long delay = mi_option_get(mi_option_arena_purge_delay);
const long delay = mi_option_get(mi_option_purge_delay) * mi_option_get(mi_option_arena_purge_mult);
if (_mi_preloading() || delay == 0) {
// decommit directly
mi_arena_purge(arena, bitmap_idx, blocks, stats);
@ -501,7 +501,7 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi
}
// if not fully purged, make sure to purge again in the future
if (!full_purge) {
const long delay = mi_option_get(mi_option_arena_purge_delay);
const long delay = mi_option_get(mi_option_purge_delay) * mi_option_get(mi_option_arena_purge_mult);
mi_msecs_t expected = 0;
mi_atomic_cas_strong_acq_rel(&arena->purge_expire,&expected,_mi_clock_now() + delay);
}
@ -509,7 +509,7 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi
}
static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats ) {
const long delay = mi_option_get(mi_option_arena_purge_delay);
const long delay = mi_option_get(mi_option_purge_delay) * mi_option_get(mi_option_arena_purge_mult);
if (_mi_preloading() || delay == 0 || !mi_option_is_enabled(mi_option_allow_purge)) return; // nothing will be scheduled
const size_t max_arena = mi_atomic_load_acquire(&mi_arena_count);

View File

@ -14,7 +14,7 @@ terms of the MIT license. A copy of the license can be found in the file
// Empty page used to initialize the small free pages array
const mi_page_t _mi_page_empty = {
0, false, false, false, false,
0, false, false, false,
0, // capacity
0, // reserved capacity
{ 0 }, // flags

View File

@ -67,8 +67,8 @@ static mi_option_desc_t options[_mi_option_last] =
{-1, UNINIT, MI_OPTION(reserve_huge_os_pages_at) }, // reserve huge pages at node N
{ 0, UNINIT, MI_OPTION(reserve_os_memory) },
{ 0, UNINIT, MI_OPTION(deprecated_segment_cache) }, // cache N segments per thread
{ 0, UNINIT, MI_OPTION(page_reset) }, // reset page memory on free
{ 0, UNINIT, MI_OPTION(abandoned_page_reset) }, // reset free page memory when a thread terminates
{ 0, UNINIT, MI_OPTION(deprecated_page_reset) }, // reset page memory on free
{ 0, UNINIT, MI_OPTION(abandoned_page_purge) }, // purge free page memory when a thread terminates
{ 0, UNINIT, MI_OPTION(deprecated_segment_reset) }, // reset segment memory on free (needs eager commit)
#if defined(__NetBSD__)
{ 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
@ -89,7 +89,7 @@ static mi_option_desc_t options[_mi_option_last] =
{ 128L * 1024L, UNINIT, MI_OPTION(arena_reserve) },
#endif
{ 100, UNINIT, MI_OPTION(arena_purge_delay) }, // reset/decommit delay in milli-seconds for arena allocation
{ 10, UNINIT, MI_OPTION(arena_purge_mult) }, // purge delay multiplier for arena's
{ 1, UNINIT, MI_OPTION(allow_purge) }, // allow decommit/reset to free (physical) memory back to the OS
{ 1, UNINIT, MI_OPTION_LEGACY(purge_extend_delay, decommit_extend_delay) },
};

View File

@ -253,7 +253,7 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
#if MI_HUGE_PAGE_ABANDON
mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
#endif
mi_assert_internal(!page->is_reset);
// TODO: push on full queue immediately if it is full?
mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page));
mi_page_queue_push(heap, pq, page);

View File

@ -126,8 +126,8 @@ static size_t mi_segment_page_size(const mi_segment_t* segment) {
#endif
#if (MI_DEBUG>=2)
static bool mi_pages_reset_contains(const mi_page_t* page, mi_segments_tld_t* tld) {
mi_page_t* p = tld->pages_reset.first;
static bool mi_pages_purge_contains(const mi_page_t* page, mi_segments_tld_t* tld) {
mi_page_t* p = tld->pages_purge.first;
while (p != NULL) {
if (p == page) return true;
p = p->next;
@ -148,8 +148,8 @@ static bool mi_segment_is_valid(const mi_segment_t* segment, mi_segments_tld_t*
if (!page->segment_in_use) {
nfree++;
}
if (page->segment_in_use || page->is_reset) {
mi_assert_expensive(!mi_pages_reset_contains(page, tld));
if (page->segment_in_use) {
mi_assert_expensive(!mi_pages_purge_contains(page, tld));
}
}
mi_assert_internal(nfree + segment->used == segment->capacity);
@ -163,12 +163,12 @@ static bool mi_segment_is_valid(const mi_segment_t* segment, mi_segments_tld_t*
static bool mi_page_not_in_queue(const mi_page_t* page, mi_segments_tld_t* tld) {
mi_assert_internal(page != NULL);
if (page->next != NULL || page->prev != NULL) {
mi_assert_internal(mi_pages_reset_contains(page, tld));
mi_assert_internal(mi_pages_purge_contains(page, tld));
return false;
}
else {
// both next and prev are NULL, check for singleton list
return (tld->pages_reset.first != page && tld->pages_reset.last != page);
return (tld->pages_purge.first != page && tld->pages_purge.last != page);
}
}
@ -231,37 +231,37 @@ static void mi_segment_protect(mi_segment_t* segment, bool protect, mi_os_tld_t*
Page reset
----------------------------------------------------------- */
static void mi_page_reset(mi_segment_t* segment, mi_page_t* page, size_t size, mi_segments_tld_t* tld) {
static void mi_page_purge(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) {
// todo: should we purge the guard page as well when MI_SECURE>=2 ?
mi_assert_internal(page->is_committed);
if (!mi_option_is_enabled(mi_option_page_reset)) return;
if (segment->mem_is_pinned || page->segment_in_use || !page->is_committed || page->is_reset) return;
mi_assert_internal(!page->segment_in_use);
if (!segment->allow_purge) return;
mi_assert_internal(page->used == 0);
mi_assert_expensive(!mi_pages_purge_contains(page, tld));
size_t psize;
void* start = mi_segment_raw_page_start(segment, page, &psize);
page->is_reset = true;
mi_assert_internal(size <= psize);
size_t reset_size = ((size == 0 || size > psize) ? psize : size);
if (reset_size > 0) { _mi_os_reset(start, reset_size, tld->stats); }
void* start = mi_segment_raw_page_start(segment, page, &psize);
const bool needs_recommit = _mi_os_purge(start, psize, tld->stats);
if (needs_recommit) { page->is_committed = false; }
page->used = 0;
}
static bool mi_page_unreset(mi_segment_t* segment, mi_page_t* page, size_t size, mi_segments_tld_t* tld)
{
MI_UNUSED(size); MI_UNUSED(tld);
mi_assert_internal(page->is_reset);
mi_assert_internal(page->is_committed);
mi_assert_internal(!segment->mem_is_pinned);
if (segment->mem_is_pinned || !page->is_committed || !page->is_reset) return true;
page->is_reset = false;
/*
static bool mi_page_ensure_committed(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) {
if (page->is_committed) return true;
mi_assert_internal(segment->allow_decommit);
mi_assert_expensive(!mi_pages_purge_contains(page, tld));
size_t psize;
uint8_t* start = mi_segment_raw_page_start(segment, page, &psize);
size_t unreset_size = (size == 0 || size > psize ? psize : size);
*/
// bool is_zero = false;
// bool ok = true;
// if (unreset_size > 0) {
// ok = _mi_mem_unreset(start, unreset_size, &is_zero, tld->os);
// }
// if (is_zero) page->is_zero_init = true;
bool is_zero = false;
const size_t gsize = (MI_SECURE >= 2 ? _mi_os_page_size() : 0);
bool ok = _mi_os_commit(start, psize + gsize, &is_zero, tld->stats);
if (!ok) return false; // failed to commit!
page->is_committed = true;
page->used = 0;
page->is_zero_init = is_zero;
if (gsize > 0) {
mi_segment_protect_range(start + psize, gsize, true);
}
return true;
}
@ -274,33 +274,33 @@ static bool mi_page_unreset(mi_segment_t* segment, mi_page_t* page, size_t size,
// a 32-bit field while the clock is always 64-bit we need to guard
// against overflow, we use substraction to check for expiry which work
// as long as the reset delay is under (2^30 - 1) milliseconds (~12 days)
static void mi_page_reset_set_expire(mi_page_t* page) {
static void mi_page_purge_set_expire(mi_page_t* page) {
mi_assert_internal(page->used == 0);
uint32_t expire = (uint32_t)_mi_clock_now() + mi_option_get(mi_option_purge_delay);
page->used = expire;
}
static bool mi_page_reset_is_expired(mi_page_t* page, mi_msecs_t now) {
static bool mi_page_purge_is_expired(mi_page_t* page, mi_msecs_t now) {
int32_t expire = (int32_t)(page->used);
return (((int32_t)now - expire) >= 0);
}
static void mi_pages_reset_add(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) {
mi_assert_internal(!page->segment_in_use || !page->is_committed);
static void mi_segment_schedule_purge(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) {
mi_assert_internal(!page->segment_in_use);
mi_assert_internal(mi_page_not_in_queue(page,tld));
mi_assert_expensive(!mi_pages_reset_contains(page, tld));
mi_assert_expensive(!mi_pages_purge_contains(page, tld));
mi_assert_internal(_mi_page_segment(page)==segment);
if (!mi_option_is_enabled(mi_option_page_reset)) return;
if (segment->mem_is_pinned || page->segment_in_use || !page->is_committed || page->is_reset) return;
if (!segment->allow_purge) return;
if (mi_option_get(mi_option_purge_delay) == 0) {
// reset immediately?
mi_page_reset(segment, page, 0, tld);
// purge immediately?
mi_page_purge(segment, page, tld);
}
else {
// otherwise push on the delayed page reset queue
mi_page_queue_t* pq = &tld->pages_reset;
mi_page_queue_t* pq = &tld->pages_purge;
// push on top
mi_page_reset_set_expire(page);
mi_page_purge_set_expire(page);
page->next = pq->first;
page->prev = NULL;
if (pq->first == NULL) {
@ -314,13 +314,14 @@ static void mi_pages_reset_add(mi_segment_t* segment, mi_page_t* page, mi_segmen
}
}
static void mi_pages_reset_remove(mi_page_t* page, mi_segments_tld_t* tld) {
static void mi_page_purge_remove(mi_page_t* page, mi_segments_tld_t* tld) {
if (mi_page_not_in_queue(page,tld)) return;
mi_page_queue_t* pq = &tld->pages_reset;
mi_page_queue_t* pq = &tld->pages_purge;
mi_assert_internal(pq!=NULL);
mi_assert_internal(!page->segment_in_use);
mi_assert_internal(mi_pages_reset_contains(page, tld));
mi_assert_internal(page->used != 0);
mi_assert_internal(mi_pages_purge_contains(page, tld));
if (page->prev != NULL) page->prev->next = page->next;
if (page->next != NULL) page->next->prev = page->prev;
if (page == pq->last) pq->last = page->prev;
@ -329,14 +330,14 @@ static void mi_pages_reset_remove(mi_page_t* page, mi_segments_tld_t* tld) {
page->used = 0;
}
static void mi_pages_reset_remove_all_in_segment(mi_segment_t* segment, bool force_reset, mi_segments_tld_t* tld) {
static void mi_segment_remove_all_purges(mi_segment_t* segment, bool force_purge, mi_segments_tld_t* tld) {
if (segment->mem_is_pinned) return; // never reset in huge OS pages
for (size_t i = 0; i < segment->capacity; i++) {
mi_page_t* page = &segment->pages[i];
if (!page->segment_in_use && page->is_committed && !page->is_reset) {
mi_pages_reset_remove(page, tld);
if (force_reset) {
mi_page_reset(segment, page, 0, tld);
if (!page->segment_in_use) {
mi_page_purge_remove(page, tld);
if (force_purge) {
mi_page_purge(segment, page, tld);
}
}
else {
@ -345,16 +346,16 @@ static void mi_pages_reset_remove_all_in_segment(mi_segment_t* segment, bool for
}
}
static void mi_reset_delayed(mi_segments_tld_t* tld) {
if (!mi_option_is_enabled(mi_option_page_reset)) return;
static void mi_pages_try_purge(mi_segments_tld_t* tld) {
if (!mi_option_is_enabled(mi_option_allow_purge)) return;
mi_msecs_t now = _mi_clock_now();
mi_page_queue_t* pq = &tld->pages_reset;
mi_page_queue_t* pq = &tld->pages_purge;
// from oldest up to the first that has not expired yet
mi_page_t* page = pq->last;
while (page != NULL && mi_page_reset_is_expired(page,now)) {
while (page != NULL && mi_page_purge_is_expired(page,now)) {
mi_page_t* const prev = page->prev; // save previous field
mi_page_reset(_mi_page_segment(page), page, 0, tld);
page->used = 0;
mi_page_purge(_mi_page_segment(page), page, tld);
page->prev = page->next = NULL;
page = prev;
}
@ -477,27 +478,17 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se
mi_segment_protect(segment, false, tld->os); // ensure no more guard pages are set
}
bool any_reset = false;
bool fully_committed = true;
size_t committed = 0;
const size_t page_size = mi_segment_raw_page_size(segment);
for (size_t i = 0; i < segment->capacity; i++) {
mi_page_t* page = &segment->pages[i];
if (page->is_committed) { committed += page_size; }
if (!page->is_committed) { fully_committed = false; }
if (page->is_reset) { any_reset = true; }
if (!page->is_committed) { fully_committed = false; }
}
MI_UNUSED(any_reset);
MI_UNUSED(fully_committed);
mi_assert_internal((fully_committed && committed == segment_size) || (!fully_committed && committed < segment_size));
// TODO: for now, pages always reset but we can purge instead allowing for pages to be decommitted.
/*
if (any_reset && mi_option_is_enabled(mi_option_reset_decommits)) {
fully_committed = false;
}
*/
_mi_abandoned_await_readers(); // prevent ABA issue if concurrent readers try to access our memory (that might be purged)
_mi_arena_free(segment, segment_size, segment->mem_alignment, segment->mem_align_offset, segment->memid, committed, tld->stats);
}
@ -507,8 +498,8 @@ void _mi_segment_thread_collect(mi_segments_tld_t* tld) {
MI_UNUSED(tld);
#if MI_DEBUG>=2
if (!_mi_is_main_thread()) {
mi_assert_internal(tld->pages_reset.first == NULL);
mi_assert_internal(tld->pages_reset.last == NULL);
mi_assert_internal(tld->pages_purge.first == NULL);
mi_assert_internal(tld->pages_purge.last == NULL);
}
#endif
}
@ -551,10 +542,13 @@ static mi_segment_t* mi_segment_os_alloc(bool eager_delayed, size_t page_alignme
mi_track_mem_undefined(segment, info_size); MI_UNUSED(info_size);
segment->memid = memid;
segment->mem_is_pinned = (mem_large || is_pinned);
segment->mem_is_pinned = is_pinned;
segment->mem_is_large = mem_large;
segment->mem_is_committed = commit;
segment->mem_alignment = alignment;
segment->mem_align_offset = align_offset;
segment->allow_decommit = !segment->mem_is_pinned && !segment->mem_is_large;
segment->allow_purge = segment->allow_decommit && mi_option_is_enabled(mi_option_allow_purge);
mi_segments_track_size((long)(*segment_size), tld);
_mi_segment_map_allocated_at(segment);
return segment;
@ -610,7 +604,6 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
for (size_t i = 0; i < capacity; i++) {
mi_assert_internal(i <= 255);
segment->pages[i].segment_idx = (uint8_t)i;
segment->pages[i].is_reset = false;
segment->pages[i].is_committed = commit;
segment->pages[i].is_zero_init = is_zero;
}
@ -641,11 +634,10 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) {
MI_UNUSED(force);
mi_assert(segment != NULL);
// note: don't reset pages even on abandon as the whole segment is freed? (and ready for reuse)
bool force_reset = (force && mi_option_is_enabled(mi_option_abandoned_page_reset));
mi_pages_reset_remove_all_in_segment(segment, force_reset, tld);
mi_segment_remove_from_free_queue(segment,tld);
// don't purge as we are freeing now
mi_segment_remove_all_purges(segment, false /* don't force as we are about to free */, tld);
mi_segment_remove_from_free_queue(segment, tld);
mi_assert_expensive(!mi_segment_queue_contains(&tld->small_free, segment));
mi_assert_expensive(!mi_segment_queue_contains(&tld->medium_free, segment));
mi_assert(segment->next == NULL);
@ -668,35 +660,15 @@ static bool mi_segment_has_free(const mi_segment_t* segment) {
static bool mi_segment_page_claim(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) {
mi_assert_internal(_mi_page_segment(page) == segment);
mi_assert_internal(!page->segment_in_use);
mi_pages_reset_remove(page, tld);
mi_page_purge_remove(page, tld);
// check commit
if (!page->is_committed) {
mi_assert_internal(!segment->mem_is_pinned);
mi_assert_internal(!page->is_reset);
size_t psize;
uint8_t* start = mi_segment_raw_page_start(segment, page, &psize);
bool is_zero = false;
const size_t gsize = (MI_SECURE >= 2 ? _mi_os_page_size() : 0);
bool ok = _mi_os_commit(start, psize + gsize, &is_zero, tld->stats);
if (!ok) return false; // failed to commit!
if (gsize > 0) { mi_segment_protect_range(start + psize, gsize, true); }
if (is_zero) { page->is_zero_init = true; }
page->is_committed = true;
}
if (!mi_page_ensure_committed(segment, page, tld)) return false;
// set in-use before doing unreset to prevent delayed reset
page->segment_in_use = true;
segment->used++;
// check reset
if (page->is_reset) {
mi_assert_internal(!segment->mem_is_pinned);
bool ok = mi_page_unreset(segment, page, 0, tld);
if (!ok) {
page->segment_in_use = false;
segment->used--;
return false;
}
}
mi_assert_internal(page->segment_in_use);
segment->used++;
mi_assert_internal(page->segment_in_use && page->is_committed && page->used==0 && !mi_pages_purge_contains(page,tld));
mi_assert_internal(segment->used <= segment->capacity);
if (segment->used == segment->capacity && segment->page_kind <= MI_PAGE_MEDIUM) {
// if no more free pages, remove from the queue
@ -714,7 +686,7 @@ static bool mi_segment_page_claim(mi_segment_t* segment, mi_page_t* page, mi_seg
static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld);
// clear page data; can be called on abandoned segments
static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, bool allow_reset, mi_segments_tld_t* tld)
static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld)
{
mi_assert_internal(page->segment_in_use);
mi_assert_internal(mi_page_all_free(page));
@ -725,35 +697,24 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, bool a
_mi_stat_decrease(&tld->stats->page_committed, inuse);
_mi_stat_decrease(&tld->stats->pages, 1);
// calculate the used size from the raw (non-aligned) start of the page
//size_t pre_size;
//_mi_segment_page_start(segment, page, page->block_size, NULL, &pre_size);
//size_t used_size = pre_size + (page->capacity * page->block_size);
page->is_zero_init = false;
page->segment_in_use = false;
// reset the page memory to reduce memory pressure?
// note: must come after setting `segment_in_use` to false but before block_size becomes 0
//mi_page_reset(segment, page, 0 /*used_size*/, tld);
// zero the page data, but not the segment fields and capacity, and block_size (for page size calculations)
uint32_t block_size = page->xblock_size;
uint16_t capacity = page->capacity;
uint16_t reserved = page->reserved;
ptrdiff_t ofs = offsetof(mi_page_t,capacity);
memset((uint8_t*)page + ofs, 0, sizeof(*page) - ofs);
_mi_memzero((uint8_t*)page + ofs, sizeof(*page) - ofs);
page->capacity = capacity;
page->reserved = reserved;
page->xblock_size = block_size;
segment->used--;
// add to the free page list for reuse/reset
if (allow_reset) {
mi_pages_reset_add(segment, page, tld);
}
// schedule purge
mi_segment_schedule_purge(segment, page, tld);
page->capacity = 0; // after reset these can be zero'd now
page->capacity = 0; // after purge these can be zero'd now
page->reserved = 0;
}
@ -762,10 +723,10 @@ void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld)
mi_assert(page != NULL);
mi_segment_t* segment = _mi_page_segment(page);
mi_assert_expensive(mi_segment_is_valid(segment,tld));
mi_reset_delayed(tld);
mi_pages_try_purge(tld);
// mark it as free now
mi_segment_page_clear(segment, page, true, tld);
mi_segment_page_clear(segment, page, tld);
if (segment->used == 0) {
// no more used pages; remove from the free list and free the segment
@ -969,8 +930,8 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
mi_assert_expensive(mi_segment_is_valid(segment, tld));
// remove the segment from the free page queue if needed
mi_reset_delayed(tld);
mi_pages_reset_remove_all_in_segment(segment, mi_option_is_enabled(mi_option_abandoned_page_reset), tld);
mi_pages_try_purge(tld);
mi_segment_remove_all_purges(segment, mi_option_is_enabled(mi_option_abandoned_page_purge), tld);
mi_segment_remove_from_free_queue(segment, tld);
mi_assert_internal(segment->next == NULL && segment->prev == NULL);
@ -988,7 +949,7 @@ void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE);
mi_assert_internal(mi_page_heap(page) == NULL);
mi_segment_t* segment = _mi_page_segment(page);
mi_assert_expensive(!mi_pages_reset_contains(page, tld));
mi_assert_expensive(!mi_pages_purge_contains(page, tld));
mi_assert_expensive(mi_segment_is_valid(segment, tld));
segment->abandoned++;
_mi_stat_increase(&tld->stats->pages_abandoned, 1);
@ -1056,7 +1017,6 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
for (size_t i = 0; i < segment->capacity; i++) {
mi_page_t* page = &segment->pages[i];
if (page->segment_in_use) {
mi_assert_internal(!page->is_reset);
mi_assert_internal(page->is_committed);
mi_assert_internal(mi_page_not_in_queue(page, tld));
mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE);
@ -1071,7 +1031,7 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
_mi_page_free_collect(page, false); // ensure used count is up to date
if (mi_page_all_free(page)) {
// if everything free already, clear the page directly
mi_segment_page_clear(segment, page, true, tld); // reset is ok now
mi_segment_page_clear(segment, page, tld); // reset is ok now
}
else {
// otherwise reclaim it into the heap
@ -1081,10 +1041,12 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
}
}
}
else if (page->is_committed && !page->is_reset) { // not in-use, and not reset yet
/* expired
else if (page->is_committed) { // not in-use, and not reset yet
// note: do not reset as this includes pages that were not touched before
// mi_pages_reset_add(segment, page, tld);
// mi_pages_purge_add(segment, page, tld);
}
*/
}
mi_assert_internal(segment->abandoned == 0);
if (segment->used == 0) {
@ -1271,7 +1233,7 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment,
page->xblock_size = (psize > MI_HUGE_BLOCK_SIZE ? MI_HUGE_BLOCK_SIZE : (uint32_t)psize);
// reset the part of the page that will not be used; this can be quite large (close to MI_SEGMENT_SIZE)
if (page_alignment > 0 && !segment->mem_is_pinned && page->is_committed) {
if (page_alignment > 0 && segment->allow_decommit && page->is_committed) {
uint8_t* aligned_p = (uint8_t*)_mi_align_up((uintptr_t)start, page_alignment);
mi_assert_internal(_mi_is_aligned(aligned_p, page_alignment));
mi_assert_internal(psize - (aligned_p - start) >= size);
@ -1319,7 +1281,7 @@ void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_bloc
mi_assert_internal(segment == _mi_page_segment(page));
mi_assert_internal(page->used == 1); // this is called just before the free
mi_assert_internal(page->free == NULL);
if (!segment->mem_is_pinned && page->is_committed) {
if (segment->allow_decommit && page->is_committed) {
const size_t usize = mi_usable_size(block) - sizeof(mi_block_t);
uint8_t* p = (uint8_t*)block + sizeof(mi_block_t);
_mi_os_reset(p, usize, &_mi_stats_main);
@ -1354,7 +1316,7 @@ mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t pag
}
mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld));
mi_assert_internal(page == NULL || (mi_segment_page_size(_mi_page_segment(page)) - (MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= block_size);
mi_reset_delayed(tld);
// mi_segment_try_purge(tld);
mi_assert_internal(page == NULL || mi_page_not_in_queue(page, tld));
return page;
}