use allow_decommit option for both the segment cache and pages

This commit is contained in:
daan 2020-09-24 17:20:39 -07:00
parent b149099bf3
commit e1c38eef76
4 changed files with 7 additions and 7 deletions

View File

@ -314,7 +314,7 @@ typedef enum mi_option_e {
mi_option_eager_commit_delay,
mi_option_allow_decommit,
mi_option_reset_delay,
mi_option_arena_reset_delay,
mi_option_segment_decommit_delay,
mi_option_use_numa_nodes,
mi_option_limit_os_alloc,
mi_option_os_tag,

View File

@ -88,8 +88,8 @@ static mi_option_desc_t options[_mi_option_last] =
{ 1, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed (but per page in the segment on demand)
#endif
{ 1, UNINIT, MI_OPTION(allow_decommit) }, // decommit slices when no longer used (after reset_delay milli-seconds)
{ 500, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds
{ 1000, UNINIT, MI_OPTION(arena_reset_delay) }, // reset delay in milli-seconds for freed segments
{ 500, UNINIT, MI_OPTION(reset_delay) }, // page reset delay in milli-seconds (= decommit)
{ 1000, UNINIT, MI_OPTION(segment_decommit_delay) },// decommit delay in milli-seconds for freed segments
{ 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
{ 0, UNINIT, MI_OPTION(limit_os_alloc) }, // 1 = do not use OS memory for allocation (but only reserved arenas)
{ 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose

View File

@ -177,8 +177,8 @@ mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t me
slot->is_pinned = is_pinned;
mi_atomic_storei64_relaxed(&slot->expire,(mi_msecs_t)0);
slot->commit_mask = commit_mask;
if (!mi_commit_mask_is_empty(commit_mask) && !is_large && !is_pinned) {
long delay = mi_option_get(mi_option_arena_reset_delay);
if (!mi_commit_mask_is_empty(commit_mask) && !is_large && !is_pinned && mi_option_is_enabled(mi_option_allow_decommit)) {
long delay = mi_option_get(mi_option_segment_decommit_delay);
if (delay == 0) {
_mi_abandoned_await_readers(); // wait until safe to decommit
mi_commit_mask_decommit(&slot->commit_mask, start, MI_SEGMENT_SIZE, tld->stats);

View File

@ -384,7 +384,7 @@ static bool mi_segment_commitx(mi_segment_t* segment, bool commit, uint8_t* p, s
mi_assert_internal((void*)start != (void*)segment);
mi_commit_mask_t cmask = mi_commit_mask_intersect(segment->commit_mask, mask);
_mi_stat_increase(&_mi_stats_main.committed, full_size - mi_commit_mask_committed_size(cmask, MI_SEGMENT_SIZE)); // adjust for overlap
_mi_os_decommit(start, full_size, stats); // ok if this fails
if (segment->allow_decommit) { _mi_os_decommit(start, full_size, stats); } // ok if this fails
mi_commit_mask_clear(&segment->commit_mask, mask);
}
// increase expiration of reusing part of the delayed decommit
@ -422,7 +422,7 @@ static void mi_segment_perhaps_decommit(mi_segment_t* segment, uint8_t* p, size_
}
static void mi_segment_delayed_decommit(mi_segment_t* segment, bool force, mi_stats_t* stats) {
if (mi_commit_mask_is_empty(segment->decommit_mask)) return;
if (!segment->allow_decommit || mi_commit_mask_is_empty(segment->decommit_mask)) return;
mi_msecs_t now = _mi_clock_now();
if (!force && now < segment->decommit_expire) return;