fix bug in collect where has_page was not set on free pages

This commit is contained in:
daan 2020-01-23 21:37:14 -08:00
parent 4a2a0c2d50
commit 58fdcbb0cd
3 changed files with 40 additions and 10 deletions

View File

@ -70,7 +70,7 @@ static mi_option_desc_t options[_mi_option_last] =
{ 0, UNINIT, MI_OPTION(page_reset) }, // reset page memory on free
{ 0, UNINIT, MI_OPTION(abandoned_page_reset) },// reset free page memory when a thread terminates
{ 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free (needs eager commit)
{ 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
{ 1, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
{ 100, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds
{ 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
{ 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose

View File

@ -231,6 +231,7 @@ static void mi_segment_protect(mi_segment_t* segment, bool protect, mi_os_tld_t*
----------------------------------------------------------- */
static void mi_page_reset(mi_segment_t* segment, mi_page_t* page, size_t size, mi_segments_tld_t* tld) {
mi_assert_internal(page->is_committed);
if (!mi_option_is_enabled(mi_option_page_reset)) return;
if (segment->mem_is_fixed || page->segment_in_use || page->is_reset) return;
size_t psize;
@ -330,7 +331,7 @@ static void mi_pages_reset_remove_all_in_segment(mi_segment_t* segment, bool for
if (segment->mem_is_fixed) return; // never reset in huge OS pages
for (size_t i = 0; i < segment->capacity; i++) {
mi_page_t* page = &segment->pages[i];
if (!page->segment_in_use && !page->is_reset) {
if (!page->segment_in_use && page->is_committed && !page->is_reset) {
mi_pages_reset_remove(page, tld);
if (force_reset) {
mi_page_reset(segment, page, 0, tld);
@ -544,8 +545,12 @@ void _mi_segment_thread_collect(mi_segments_tld_t* tld) {
}
mi_assert_internal(tld->cache_count == 0);
mi_assert_internal(tld->cache == NULL);
#if MI_DEBUG>=2
if (!_mi_is_main_thread()) {
mi_assert_internal(tld->pages_reset.first == NULL);
mi_assert_internal(tld->pages_reset.last == NULL);
}
#endif
}
@ -979,7 +984,7 @@ static bool mi_segment_pages_collect(mi_segment_t* segment, size_t block_size, m
// if everything free already, clear the page directly
segment->abandoned--;
_mi_stat_decrease(&tld->stats->pages_abandoned, 1);
mi_segment_page_clear(segment, page, false, tld); // no reset allowed (as the segment is still abandoned)
mi_segment_page_clear(segment, page, false, tld); // no (delayed) reset allowed (as the segment is still abandoned)
has_page = true;
}
else if (page->xblock_size == block_size && page->used < page->reserved) {
@ -987,6 +992,9 @@ static bool mi_segment_pages_collect(mi_segment_t* segment, size_t block_size, m
has_page = true;
}
}
else {
has_page = true;
}
}
return has_page;
}
@ -1046,7 +1054,8 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
}
else {
// otherwise return the segment as it will contain some free pages
mi_assert_internal(segment->used < segment->capacity);
// (except for abandoned_reclaim_all which uses a block_size of zero)
mi_assert_internal(segment->used < segment->capacity || block_size == 0);
return segment;
}
}

View File

@ -57,6 +57,7 @@ const uintptr_t cookie = 0xbf58476d1ce4e5b9UL;
const uintptr_t cookie = 0x1ce4e5b9UL;
#endif
static uintptr_t ticks(void);
static void* atomic_exchange_ptr(volatile void** p, void* newval);
typedef uintptr_t* random_t;
@ -121,7 +122,7 @@ static void free_items(void* p) {
static void stress(intptr_t tid) {
//bench_start_thread();
uintptr_t r = tid * 43;
uintptr_t r = (tid * 43)^ticks();
const size_t max_item_shift = 5; // 128
const size_t max_item_retained_shift = max_item_shift + 2;
size_t allocs = 100 * ((size_t)SCALE) * (tid % 8 + 1); // some threads do more
@ -194,9 +195,9 @@ static void test_stress(void) {
}
static void leak(intptr_t tid) {
uintptr_t r = 43*tid;
uintptr_t r = (43*tid)^ticks();
void* p = alloc_items(pick(&r)%128, &r);
if (chance(10, &r)) {
if (chance(50, &r)) {
intptr_t i = (pick(&r) % TRANSFERS);
void* q = atomic_exchange_ptr(&transfer[i], p);
free_items(q);
@ -259,6 +260,12 @@ static void (*thread_entry_fun)(intptr_t) = &stress;
#include <windows.h>
static uintptr_t ticks(void) {
LARGE_INTEGER t;
QueryPerformanceCounter(&t);
return (uintptr_t)t.QuadPart;
}
static DWORD WINAPI thread_entry(LPVOID param) {
thread_entry_fun((intptr_t)param);
return 0;
@ -323,4 +330,18 @@ static void* atomic_exchange_ptr(volatile void** p, void* newval) {
}
#endif
#include <time.h>
#ifdef CLOCK_REALTIME
uintptr_t ticks(void) {
struct timespec t;
clock_gettime(CLOCK_REALTIME, &t);
return (uintptr_t)t.tv_sec * 1000) + ((uintptr_t)t.tv_nsec / 1000000);
}
#else
// low resolution timer
uintptr_t _mi_clock_now(void) {
return ((uintptr_t)clock() / ((uintptr_t)CLOCKS_PER_SEC / 1000));
}
#endif
#endif