mirror of
https://github.com/microsoft/mimalloc.git
synced 2024-12-26 21:04:27 +08:00
merge from dev
This commit is contained in:
commit
7e96634da4
48
src/alloc.c
48
src/alloc.c
@ -282,6 +282,41 @@ static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, co
|
||||
}
|
||||
#endif
|
||||
|
||||
// only maintain stats for smaller objects if requested
|
||||
#if (MI_STAT>1)
|
||||
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
|
||||
mi_heap_t* const heap = mi_heap_get_default();
|
||||
const size_t usize = mi_page_usable_size_of(page, block);
|
||||
const size_t bsize = mi_page_usable_block_size(page);
|
||||
mi_heap_stat_decrease(heap, malloc, usize);
|
||||
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
|
||||
mi_heap_stat_decrease(heap, normal[_mi_bin(bsize)], 1);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
|
||||
UNUSED(page); UNUSED(block);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if (MI_STAT>0)
|
||||
// maintain stats for huge objects
|
||||
static void mi_stat_huge_free(const mi_page_t* page) {
|
||||
mi_heap_t* const heap = mi_heap_get_default();
|
||||
const size_t bsize = mi_page_block_size(page); // to match stats in `page.c:mi_page_huge_alloc`
|
||||
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
|
||||
mi_heap_stat_decrease(heap, large, bsize);
|
||||
}
|
||||
else {
|
||||
mi_heap_stat_decrease(heap, huge, bsize);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void mi_stat_huge_free(const mi_page_t* page) {
|
||||
UNUSED(page);
|
||||
}
|
||||
#endif
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Free
|
||||
// ------------------------------------------------------
|
||||
@ -300,6 +335,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
|
||||
// huge page segments are always abandoned and can be freed immediately
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
if (segment->kind==MI_SEGMENT_HUGE) {
|
||||
mi_stat_huge_free(page);
|
||||
_mi_segment_huge_page_free(segment, page, block);
|
||||
return;
|
||||
}
|
||||
@ -343,7 +379,6 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// regular free
|
||||
static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block)
|
||||
{
|
||||
@ -383,6 +418,7 @@ mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* p
|
||||
static void mi_decl_noinline mi_free_generic(const mi_segment_t* segment, bool local, void* p) {
|
||||
mi_page_t* const page = _mi_segment_page_of(segment, p);
|
||||
mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p);
|
||||
mi_stat_free(page, block);
|
||||
_mi_free_block(page, local, block);
|
||||
}
|
||||
|
||||
@ -429,20 +465,12 @@ void mi_free(void* p) mi_attr_noexcept
|
||||
const uintptr_t tid = _mi_thread_id();
|
||||
mi_page_t* const page = _mi_segment_page_of(segment, p);
|
||||
|
||||
#if (MI_STAT>1)
|
||||
mi_heap_t* const heap = mi_heap_get_default();
|
||||
const size_t bsize = mi_page_usable_block_size(page);
|
||||
mi_heap_stat_decrease(heap, malloc, bsize);
|
||||
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { // huge page stats are accounted for in `_mi_page_retire`
|
||||
mi_heap_stat_decrease(heap, normal[_mi_bin(bsize)], 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (mi_likely(tid == segment->thread_id && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks
|
||||
// local, and not full or aligned
|
||||
mi_block_t* block = (mi_block_t*)(p);
|
||||
if (mi_unlikely(mi_check_is_double_free(page,block))) return;
|
||||
mi_check_padding(page, block);
|
||||
mi_stat_free(page, block);
|
||||
#if (MI_DEBUG!=0)
|
||||
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
|
||||
#endif
|
||||
|
15
src/heap.c
15
src/heap.c
@ -114,7 +114,7 @@ static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq
|
||||
|
||||
static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
|
||||
{
|
||||
if (!mi_heap_is_initialized(heap)) return;
|
||||
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
|
||||
_mi_deferred_free(heap, collect >= MI_FORCE);
|
||||
|
||||
// note: never reclaim on collect but leave it to threads that need storage to reclaim
|
||||
@ -213,6 +213,7 @@ uintptr_t _mi_heap_random_next(mi_heap_t* heap) {
|
||||
|
||||
// zero out the page queues
|
||||
static void mi_heap_reset_pages(mi_heap_t* heap) {
|
||||
mi_assert_internal(heap != NULL);
|
||||
mi_assert_internal(mi_heap_is_initialized(heap));
|
||||
// TODO: copy full empty heap instead?
|
||||
memset(&heap->pages_free_direct, 0, sizeof(heap->pages_free_direct));
|
||||
@ -228,6 +229,7 @@ static void mi_heap_reset_pages(mi_heap_t* heap) {
|
||||
static void mi_heap_free(mi_heap_t* heap) {
|
||||
mi_assert(heap != NULL);
|
||||
mi_assert_internal(mi_heap_is_initialized(heap));
|
||||
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
|
||||
if (mi_heap_is_backing(heap)) return; // dont free the backing heap
|
||||
|
||||
// reset default
|
||||
@ -310,7 +312,7 @@ void mi_heap_destroy(mi_heap_t* heap) {
|
||||
mi_assert(mi_heap_is_initialized(heap));
|
||||
mi_assert(heap->no_reclaim);
|
||||
mi_assert_expensive(mi_heap_is_valid(heap));
|
||||
if (!mi_heap_is_initialized(heap)) return;
|
||||
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
|
||||
if (!heap->no_reclaim) {
|
||||
// don't free in case it may contain reclaimed pages
|
||||
mi_heap_delete(heap);
|
||||
@ -366,7 +368,7 @@ void mi_heap_delete(mi_heap_t* heap)
|
||||
mi_assert(heap != NULL);
|
||||
mi_assert(mi_heap_is_initialized(heap));
|
||||
mi_assert_expensive(mi_heap_is_valid(heap));
|
||||
if (!mi_heap_is_initialized(heap)) return;
|
||||
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
|
||||
|
||||
if (!mi_heap_is_backing(heap)) {
|
||||
// tranfer still used pages to the backing heap
|
||||
@ -381,8 +383,9 @@ void mi_heap_delete(mi_heap_t* heap)
|
||||
}
|
||||
|
||||
mi_heap_t* mi_heap_set_default(mi_heap_t* heap) {
|
||||
mi_assert(heap != NULL);
|
||||
mi_assert(mi_heap_is_initialized(heap));
|
||||
if (!mi_heap_is_initialized(heap)) return NULL;
|
||||
if (heap==NULL || !mi_heap_is_initialized(heap)) return NULL;
|
||||
mi_assert_expensive(mi_heap_is_valid(heap));
|
||||
mi_heap_t* old = mi_get_default_heap();
|
||||
_mi_heap_set_default_direct(heap);
|
||||
@ -408,7 +411,7 @@ static mi_heap_t* mi_heap_of_block(const void* p) {
|
||||
|
||||
bool mi_heap_contains_block(mi_heap_t* heap, const void* p) {
|
||||
mi_assert(heap != NULL);
|
||||
if (!mi_heap_is_initialized(heap)) return false;
|
||||
if (heap==NULL || !mi_heap_is_initialized(heap)) return false;
|
||||
return (heap == mi_heap_of_block(p));
|
||||
}
|
||||
|
||||
@ -426,7 +429,7 @@ static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_pa
|
||||
|
||||
bool mi_heap_check_owned(mi_heap_t* heap, const void* p) {
|
||||
mi_assert(heap != NULL);
|
||||
if (!mi_heap_is_initialized(heap)) return false;
|
||||
if (heap==NULL || !mi_heap_is_initialized(heap)) return false;
|
||||
if (((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) return false; // only aligned pointers
|
||||
bool found = false;
|
||||
mi_heap_visit_pages(heap, &mi_heap_page_check_owned, (void*)p, &found);
|
||||
|
@ -722,14 +722,17 @@ static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
|
||||
mi_page_queue_t* pq = mi_page_queue(heap,size);
|
||||
mi_page_t* page = pq->first;
|
||||
if (page != NULL) {
|
||||
if ((MI_SECURE >= 3) && page->capacity < page->reserved && ((_mi_heap_random_next(heap) & 1) == 1)) {
|
||||
// in secure mode, we extend half the time to increase randomness
|
||||
#if (MI_SECURE>=3) // in secure mode, we extend half the time to increase randomness
|
||||
if (page->capacity < page->reserved && ((_mi_heap_random_next(heap) & 1) == 1)) {
|
||||
mi_page_extend_free(heap, page, heap->tld);
|
||||
mi_assert_internal(mi_page_immediate_available(page));
|
||||
}
|
||||
else {
|
||||
else
|
||||
#endif
|
||||
{
|
||||
_mi_page_free_collect(page,false);
|
||||
}
|
||||
|
||||
if (mi_page_immediate_available(page)) {
|
||||
page->retire_expire = 0;
|
||||
return page; // fast path
|
||||
|
@ -1324,7 +1324,7 @@ void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block
|
||||
|
||||
// claim it and free
|
||||
mi_heap_t* heap = mi_heap_get_default(); // issue #221; don't use the internal get_default_heap as we need to ensure the thread is initialized.
|
||||
// paranoia: if this it the last reference, the cas should always succeed
|
||||
// paranoia: if this is the last reference, the cas should always succeed
|
||||
uintptr_t expected_tid = 0;
|
||||
if (mi_atomic_cas_strong_acq_rel(&segment->thread_id, &expected_tid, heap->thread_id)) {
|
||||
mi_block_set_next(page, block, page->free);
|
||||
@ -1333,13 +1333,6 @@ void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block
|
||||
page->is_zero = false;
|
||||
mi_assert(page->used == 0);
|
||||
mi_tld_t* tld = heap->tld;
|
||||
const size_t bsize = mi_page_usable_block_size(page);
|
||||
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
|
||||
_mi_stat_decrease(&tld->stats.large, bsize);
|
||||
}
|
||||
else {
|
||||
_mi_stat_decrease(&tld->stats.huge, bsize);
|
||||
}
|
||||
// mi_segments_track_size((long)segment->segment_size, tld);
|
||||
_mi_segment_page_free(page, true, &tld->segments);
|
||||
}
|
||||
|
@ -182,11 +182,12 @@ static void invalid_free();
|
||||
static void test_aslr(void);
|
||||
static void test_process_info(void);
|
||||
static void test_reserved(void);
|
||||
static void negative_stat(void);
|
||||
|
||||
|
||||
int main() {
|
||||
mi_version();
|
||||
|
||||
mi_stats_reset();
|
||||
// detect double frees and heap corruption
|
||||
// double_free1();
|
||||
// double_free2();
|
||||
@ -195,28 +196,29 @@ int main() {
|
||||
// test_aslr();
|
||||
// invalid_free();
|
||||
// test_reserved();
|
||||
|
||||
// negative_stat();
|
||||
|
||||
void* p1 = malloc(78);
|
||||
void* p2 = malloc(24);
|
||||
free(p1);
|
||||
p1 = mi_malloc(8);
|
||||
//char* s = strdup("hello\n");
|
||||
char* s = strdup("hello\n");
|
||||
free(p2);
|
||||
|
||||
p2 = malloc(16);
|
||||
p1 = realloc(p1, 32);
|
||||
free(p1);
|
||||
free(p2);
|
||||
//free(s);
|
||||
//mi_collect(true);
|
||||
|
||||
free(s);
|
||||
|
||||
/* now test if override worked by allocating/freeing across the api's*/
|
||||
//p1 = mi_malloc(32);
|
||||
//free(p1);
|
||||
//p2 = malloc(32);
|
||||
//mi_free(p2);
|
||||
//mi_free(p2);
|
||||
mi_collect(true);
|
||||
mi_stats_print(NULL);
|
||||
test_process_info();
|
||||
// test_process_info();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -336,3 +338,13 @@ static void test_reserved(void) {
|
||||
p3 = malloc(1*GiB);
|
||||
free(p4);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static void negative_stat(void) {
|
||||
int* p = mi_malloc(60000);
|
||||
mi_stats_print_out(NULL, NULL);
|
||||
*p = 100;
|
||||
mi_free(p);
|
||||
mi_stats_print_out(NULL, NULL);
|
||||
}
|
@ -123,7 +123,7 @@ static void free_items(void* p) {
|
||||
|
||||
static void stress(intptr_t tid) {
|
||||
//bench_start_thread();
|
||||
uintptr_t r = (tid * 43); // rand();
|
||||
uintptr_t r = ((tid + 1) * 43); // rand();
|
||||
const size_t max_item_shift = 5; // 128
|
||||
const size_t max_item_retained_shift = max_item_shift + 2;
|
||||
size_t allocs = 100 * ((size_t)SCALE) * (tid % 8 + 1); // some threads do more
|
||||
@ -243,14 +243,18 @@ int main(int argc, char** argv) {
|
||||
|
||||
// Run ITER full iterations where half the objects in the transfer buffer survive to the next round.
|
||||
srand(0x7feb352d);
|
||||
// mi_stats_reset();
|
||||
#ifndef NDEBUG
|
||||
mi_stats_reset();
|
||||
#endif
|
||||
#ifdef STRESS
|
||||
test_stress();
|
||||
#else
|
||||
test_leak();
|
||||
#endif
|
||||
|
||||
// mi_collect(true);
|
||||
#ifndef NDEBUG
|
||||
mi_collect(true);
|
||||
#endif
|
||||
mi_stats_print(NULL);
|
||||
//bench_end_program();
|
||||
return 0;
|
||||
|
Loading…
x
Reference in New Issue
Block a user