mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-01-15 09:38:00 +08:00
avoid caching segments in pinned arenas; happes with huge OS page reservations
This commit is contained in:
parent
391f8bbd72
commit
6dd3073a75
@ -216,20 +216,27 @@ mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t me
|
||||
return false;
|
||||
#else
|
||||
|
||||
// only for normal segment blocks
|
||||
// purge expired entries
|
||||
mi_segment_cache_purge(false /* limit purges to a constant N */, false /* don't force unexpired */, tld);
|
||||
|
||||
// only cache normal segment blocks
|
||||
if (size != MI_SEGMENT_SIZE || ((uintptr_t)start % MI_SEGMENT_ALIGN) != 0) return false;
|
||||
|
||||
// Also do not cache arena allocated segments that cannot be decommitted. (as arena allocation is fast)
|
||||
// This is a common case with reserved huge OS pages.
|
||||
//
|
||||
// (note: we could also allow segments that are already fully decommitted but that never happens
|
||||
// as the first slice is always committed (for the segment metadata))
|
||||
if (!_mi_arena_is_os_allocated(memid) && is_pinned) return false;
|
||||
|
||||
// numa node determines start field
|
||||
int numa_node = _mi_os_numa_node(NULL);
|
||||
size_t start_field = 0;
|
||||
if (numa_node > 0) {
|
||||
start_field = (MI_CACHE_FIELDS / _mi_os_numa_node_count())*numa_node;
|
||||
start_field = (MI_CACHE_FIELDS / _mi_os_numa_node_count()) * numa_node;
|
||||
if (start_field >= MI_CACHE_FIELDS) start_field = 0;
|
||||
}
|
||||
|
||||
// purge expired entries
|
||||
mi_segment_cache_purge(false /* limit purges to a constant N */, false /* don't force unexpired */, tld);
|
||||
|
||||
// find an available slot
|
||||
mi_bitmap_index_t bitidx;
|
||||
bool claimed = _mi_bitmap_try_find_from_claim(cache_inuse, MI_CACHE_FIELDS, start_field, 1, &bitidx);
|
||||
|
@ -397,8 +397,10 @@ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||
if (size != MI_SEGMENT_SIZE || segment->mem_align_offset != 0 || segment->kind == MI_SEGMENT_HUGE || // only push regular segments on the cache
|
||||
!_mi_segment_cache_push(segment, size, segment->memid, &segment->commit_mask, &segment->decommit_mask, segment->mem_is_large, segment->mem_is_pinned, tld->os))
|
||||
{
|
||||
const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size);
|
||||
if (csize > 0 && !segment->mem_is_pinned) _mi_stat_decrease(&_mi_stats_main.committed, csize);
|
||||
if (!segment->mem_is_pinned) {
|
||||
const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size);
|
||||
if (csize > 0) { _mi_stat_decrease(&_mi_stats_main.committed, csize); }
|
||||
}
|
||||
_mi_abandoned_await_readers(); // wait until safe to free
|
||||
_mi_arena_free(segment, mi_segment_size(segment), segment->mem_alignment, segment->mem_align_offset, segment->memid, segment->mem_is_pinned /* pretend not committed to not double count decommits */, tld->stats);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user