diff --git a/src/alloc.c b/src/alloc.c index ac167891..da647ca6 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -46,11 +46,12 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz // zero the block? note: we need to zero the full block size (issue #63) if mi_unlikely(zero) { mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic) + mi_assert_internal(page->xblock_size >= MI_PADDING_SIZE); if (page->free_is_zero) { block->next = 0; + mi_track_mem_defined(block, page->xblock_size - MI_PADDING_SIZE); } else { - mi_assert_internal(page->xblock_size >= MI_PADDING_SIZE); _mi_memzero_aligned(block, page->xblock_size - MI_PADDING_SIZE); } } diff --git a/src/page.c b/src/page.c index c61677b6..93636b0e 100644 --- a/src/page.c +++ b/src/page.c @@ -663,7 +663,12 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi page->keys[1] = _mi_heap_random_next(heap); #endif page->free_is_zero = page->is_zero_init; - mi_assert_expensive(!page->is_zero_init || mi_mem_is_zero(page_start, page_size)); + #if MI_DEBUG>2 + if (page->is_zero_init) { + mi_track_mem_defined(page_start, page_size); + mi_assert_expensive(!page->is_zero_init || mi_mem_is_zero(page_start, page_size)); + } + #endif mi_assert_internal(page->capacity == 0); mi_assert_internal(page->free == NULL); diff --git a/test/test-stress.c b/test/test-stress.c index 0dccec0a..3ecb67bd 100644 --- a/test/test-stress.c +++ b/test/test-stress.c @@ -39,7 +39,7 @@ static int ITER = 50; // N full iterations destructing and re-creating a #define STRESS // undefine for leak test -static bool allow_large_objects = false; // allow very large objects? (set to `true` if SCALE>100) +static bool allow_large_objects = true; // allow very large objects? (set to `true` if SCALE>100) static size_t use_one_size = 0; // use single object size of `N * sizeof(uintptr_t)`?