fix issue #204 (and #205) by doing thread delayed free after absorbing the pages

This commit is contained in:
daan 2020-02-13 11:37:48 -08:00
parent f42b8526d0
commit 946a71c4a9
2 changed files with 64 additions and 32 deletions

View File

@ -312,33 +312,29 @@ static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
mi_assert_internal(heap!=NULL);
if (from==NULL || from->page_count == 0) return;
// unfull all full pages in the `from` heap
mi_page_t* page = from->pages[MI_BIN_FULL].first;
while (page != NULL) {
mi_page_t* next = page->next;
_mi_page_unfull(page);
page = next;
}
mi_assert_internal(from->pages[MI_BIN_FULL].first == NULL);
// free outstanding thread delayed free blocks
// reduce the size of the delayed frees
_mi_heap_delayed_free(from);
// transfer all pages by appending the queues; this will set
// a new heap field which is ok as all pages are unfull'd and thus
// other threads won't access this field anymore (see `mi_free_block_mt`)
for (size_t i = 0; i < MI_BIN_FULL; i++) {
// transfer all pages by appending the queues; this will set a new heap field
// so threads may do delayed frees in either heap for a while.
for (size_t i = 0; i <= MI_BIN_FULL; i++) {
mi_page_queue_t* pq = &heap->pages[i];
mi_page_queue_t* append = &from->pages[i];
size_t pcount = _mi_page_queue_append(heap, pq, append);
heap->page_count += pcount;
from->page_count -= pcount;
}
mi_assert_internal(from->thread_delayed_free == NULL);
mi_assert_internal(from->page_count == 0);
// and do outstanding delayed frees in the `from` heap
// note: be careful here as the `heap` field in all those pages no longer point to `from`,
// turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a
// the regular `_mi_free_delayed_block` which is safe.
_mi_heap_delayed_free(from);
mi_assert_internal(from->thread_delayed_free == NULL);
// and reset the `from` heap
mi_heap_reset_pages(from);
mi_heap_reset_pages(from);
}
// Safe delete a heap without freeing any still allocated blocks in that heap.

View File

@ -12,21 +12,27 @@
#include <mimalloc.h>
#include <assert.h>
// Issue #202
void thread_main() {
mi_heap_t* heap = mi_heap_new();
void* q = mi_heap_malloc(heap,1024);
// mi_heap_delete(heap); // uncomment to prevent assertion
}
#ifdef _WIN32
#include <windows.h>
static void msleep(unsigned long msecs) { Sleep(msecs); }
#else
#include <unistd.h>
static void msleep(unsigned long msecs) { usleep(msecs * 1000UL); }
#endif
void heap_no_delete();
void heap_late_free();
void various_tests();
int main() {
auto t1 = std::thread(thread_main);
t1.join();
mi_stats_reset(); // ignore earlier allocations
// heap_no_delete(); // issue #202
// heap_late_free(); // issue #204
various_tests();
mi_stats_print(NULL);
return 0;
}
/*
static void* p = malloc(8);
void free_p() {
@ -43,8 +49,7 @@ public:
};
int main() {
mi_stats_reset(); // ignore earlier allocations
void various_tests() {
atexit(free_p);
void* p1 = malloc(78);
void* p2 = mi_malloc_aligned(16,24);
@ -68,8 +73,6 @@ int main() {
delete t;
t = new (std::nothrow) Test(42);
delete t;
mi_stats_print(NULL);
return 0;
}
class Static {
@ -104,4 +107,37 @@ bool test_stl_allocator2() {
vec.pop_back();
return vec.size() == 0;
}
*/
// Issue #202
void heap_no_delete_worker() {
mi_heap_t* heap = mi_heap_new();
void* q = mi_heap_malloc(heap,1024);
// mi_heap_delete(heap); // uncomment to prevent assertion
}
void heap_no_delete() {
auto t1 = std::thread(heap_no_delete_worker);
t1.join();
}
// Issue #204
volatile void* global_p;
void t1main() {
mi_heap_t* heap = mi_heap_new();
global_p = mi_heap_malloc(heap, 1024);
mi_heap_delete(heap);
}
void heap_late_free() {
auto t1 = std::thread(t1main);
msleep(2000);
assert(global_p);
mi_free((void*)global_p);
t1.join();
}