diff --git a/CMakeLists.txt b/CMakeLists.txt index 74c1f291..9a33b6c8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,7 +6,7 @@ set(CMAKE_CXX_STANDARD 17) option(MI_SECURE "Use full security mitigations (like guard pages, allocation randomization, double-free mitigation, and free-list corruption detection)" OFF) option(MI_DEBUG_FULL "Use full internal heap invariant checking in DEBUG mode (expensive)" OFF) -option(MI_PADDING "Enable padding to detect heap block overflow (used only in DEBUG mode or with Valgrind)" ON) +option(MI_PADDING "Enable padding to detect heap block overflow (always on in DEBUG mode or with Valgrind)" OFF) option(MI_OVERRIDE "Override the standard malloc interface (e.g. define entry points for malloc() etc)" ON) option(MI_XMALLOC "Enable abort() call on memory allocation failure by default" OFF) option(MI_SHOW_ERRORS "Show error and warning messages by default (only enabled by default in DEBUG mode)" OFF) @@ -51,6 +51,8 @@ set(mi_sources src/options.c src/init.c) +set(mi_cflags "") +set(mi_libraries "") # ----------------------------------------------------------------------------- # Convenience: set default build type depending on the build directory @@ -141,10 +143,15 @@ if(MI_VALGRIND) endif() if(MI_ASAN) + if (APPLE AND MI_OVERRIDE) + set(MI_ASAN OFF) + message(WARNING "Cannot enable address sanitizer support on macOS if MI_OVERRIDE is ON (MI_ASAN=OFF)") + endif() if (MI_VALGRIND) set(MI_ASAN OFF) message(WARNING "Cannot enable address sanitizer support with also Valgrind support enabled (MI_ASAN=OFF)") - else() + endif() + if(MI_ASAN) CHECK_INCLUDE_FILES("sanitizer/asan_interface.h" MI_HAS_ASANH) if (NOT MI_HAS_ASANH) set(MI_ASAN OFF) @@ -154,7 +161,7 @@ if(MI_ASAN) message(STATUS "Compile with address sanitizer support (MI_ASAN=ON)") list(APPEND mi_defines MI_ASAN=1) list(APPEND mi_cflags -fsanitize=address) - list(APPEND CMAKE_EXE_LINKER_FLAGS -fsanitize=address) + list(APPEND mi_libraries -fsanitize=address) endif() endif() endif() @@ -179,9 +186,9 @@ if(MI_DEBUG_FULL) list(APPEND mi_defines MI_DEBUG=3) # full invariant checking endif() -if(NOT MI_PADDING) - message(STATUS "Disable padding of heap blocks in debug mode (MI_PADDING=OFF)") - list(APPEND mi_defines MI_PADDING=0) +if(MI_PADDING) + message(STATUS "Enable padding of heap blocks explicitly (MI_PADDING=ON)") + list(APPEND mi_defines MI_PADDING=1) endif() if(MI_XMALLOC) @@ -199,7 +206,7 @@ if(MI_DEBUG_TSAN) message(STATUS "Build with thread sanitizer (MI_DEBUG_TSAN=ON)") list(APPEND mi_defines MI_TSAN=1) list(APPEND mi_cflags -fsanitize=thread -g -O1) - list(APPEND CMAKE_EXE_LINKER_FLAGS -fsanitize=thread) + list(APPEND mi_libraries -fsanitize=thread) else() message(WARNING "Can only use thread sanitizer with clang (MI_DEBUG_TSAN=ON but ignored)") endif() @@ -210,7 +217,7 @@ if(MI_DEBUG_UBSAN) if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") message(STATUS "Build with undefined-behavior sanitizer (MI_DEBUG_UBSAN=ON)") list(APPEND mi_cflags -fsanitize=undefined -g -fno-sanitize-recover=undefined) - list(APPEND CMAKE_EXE_LINKER_FLAGS -fsanitize=undefined) + list(APPEND mi_libraries -fsanitize=undefined) if (NOT MI_USE_CXX) message(STATUS "(switch to use C++ due to MI_DEBUG_UBSAN)") set(MI_USE_CXX "ON") @@ -363,7 +370,7 @@ if(MI_BUILD_SHARED) set_target_properties(mimalloc PROPERTIES VERSION ${mi_version} SOVERSION ${mi_version_major} OUTPUT_NAME ${mi_basename} ) target_compile_definitions(mimalloc PRIVATE ${mi_defines} MI_SHARED_LIB MI_SHARED_LIB_EXPORT) target_compile_options(mimalloc PRIVATE ${mi_cflags}) - target_link_libraries(mimalloc PRIVATE ${mi_libraries}) + target_link_libraries(mimalloc PRIVATE ${mi_libraries} -fsanitize=address) target_include_directories(mimalloc PUBLIC $ $ diff --git a/include/mimalloc-internal.h b/include/mimalloc-internal.h index a68e6966..ecc006c9 100644 --- a/include/mimalloc-internal.h +++ b/include/mimalloc-internal.h @@ -173,6 +173,7 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p); bool _mi_free_delayed_block(mi_block_t* block); void _mi_free_generic(const mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept; // for runtime integration +void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size); #if MI_DEBUG>1 bool _mi_page_is_valid(mi_page_t* page); diff --git a/include/mimalloc-track.h b/include/mimalloc-track.h index f60d7acd..35fb786a 100644 --- a/include/mimalloc-track.h +++ b/include/mimalloc-track.h @@ -13,6 +13,7 @@ terms of the MIT license. A copy of the license can be found in the file // address sanitizer, or other memory checkers. // ------------------------------------------------------ + #if MI_VALGRIND #define MI_TRACK_ENABLED 1 @@ -23,8 +24,7 @@ terms of the MIT license. A copy of the license can be found in the file #define mi_track_malloc(p,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,MI_PADDING_SIZE /*red zone*/,zero) #define mi_track_resize(p,oldsize,newsize) VALGRIND_RESIZEINPLACE_BLOCK(p,oldsize,newsize,MI_PADDING_SIZE /*red zone*/) -#define mi_track_free(p) VALGRIND_FREELIKE_BLOCK(p,MI_PADDING_SIZE /*red zone*/) -#define mi_track_free_size(p,_size) mi_track_free(p) +#define mi_track_free_size(p,_size) VALGRIND_FREELIKE_BLOCK(p,MI_PADDING_SIZE /*red zone*/) #define mi_track_mem_defined(p,size) VALGRIND_MAKE_MEM_DEFINED(p,size) #define mi_track_mem_undefined(p,size) VALGRIND_MAKE_MEM_UNDEFINED(p,size) #define mi_track_mem_noaccess(p,size) VALGRIND_MAKE_MEM_NOACCESS(p,size) @@ -38,7 +38,6 @@ terms of the MIT license. A copy of the license can be found in the file #define mi_track_malloc(p,size,zero) ASAN_UNPOISON_MEMORY_REGION(p,size) #define mi_track_resize(p,oldsize,newsize) ASAN_POISON_MEMORY_REGION(p,oldsize); ASAN_UNPOISON_MEMORY_REGION(p,newsize) -#define mi_track_free(p) ASAN_POISON_MEMORY_REGION(p,mi_usable_size(p)) #define mi_track_free_size(p,size) ASAN_POISON_MEMORY_REGION(p,size) #define mi_track_mem_defined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size) #define mi_track_mem_undefined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size) @@ -51,7 +50,6 @@ terms of the MIT license. A copy of the license can be found in the file #define mi_track_malloc(p,size,zero) #define mi_track_resize(p,oldsize,newsize) -#define mi_track_free(p) #define mi_track_free_size(p,_size) #define mi_track_mem_defined(p,size) #define mi_track_mem_undefined(p,size) @@ -59,4 +57,13 @@ terms of the MIT license. A copy of the license can be found in the file #endif +#ifndef mi_track_free +#define mi_track_free(p) mi_track_free_size(p,mi_usable_size(p)); +#endif + +#ifndef mi_track_resize +#define mi_track_resize(p,oldsize,newsize) mi_track_free_size(p,oldsize); mi_track_malloc(p,newsize,false) +#endif + + #endif diff --git a/include/mimalloc-types.h b/include/mimalloc-types.h index f3af528e..faf0f709 100644 --- a/include/mimalloc-types.h +++ b/include/mimalloc-types.h @@ -58,11 +58,16 @@ terms of the MIT license. A copy of the license can be found in the file #endif // Reserve extra padding at the end of each block to be more resilient against heap block overflows. -// The padding can detect byte-precise buffer overflow on free. -#if !defined(MI_PADDING) && (MI_DEBUG>=1 || MI_VALGRIND) +// The padding can detect buffer overflow on free. +#if !defined(MI_PADDING) && (MI_SECURE>=3 || MI_DEBUG>=1 || MI_VALGRIND || MI_ASAN) #define MI_PADDING 1 #endif +// Check padding bytes; allows byte-precise buffer overflow detection +#if !defined(MI_PADDING_CHECK) && MI_PADDING && (MI_SECURE>=3 || MI_DEBUG>=1) +#define MI_PADDING_CHECK 1 +#endif + // Encoded free lists allow detection of corrupted free lists // and can detect buffer overflows, modify after free, and double `free`s. @@ -290,8 +295,8 @@ typedef struct mi_page_s { uint32_t xblock_size; // size available in each block (always `>0`) mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`) - #ifdef MI_ENCODE_FREELIST - uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) + #if (MI_ENCODE_FREELIST || MI_PADDING) + uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) or padding canary #endif _Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads diff --git a/src/alloc-aligned.c b/src/alloc-aligned.c index 9fe82890..76b6fc3b 100644 --- a/src/alloc-aligned.c +++ b/src/alloc-aligned.c @@ -46,7 +46,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size); p = _mi_heap_malloc_zero_ex(heap, oversize, false, alignment); // the page block size should be large enough to align in the single huge page block // zero afterwards as only the area from the aligned_p may be committed! - if (p == NULL) return NULL; + if (p == NULL) return NULL; } else { // otherwise over-allocate @@ -61,7 +61,9 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* mi_assert_internal(adjust < alignment); void* aligned_p = (void*)((uintptr_t)p + adjust); if (aligned_p != p) { - mi_page_set_has_aligned(_mi_ptr_page(p), true); + mi_page_t* page = _mi_ptr_page(p); + mi_page_set_has_aligned(page, true); + _mi_padding_shrink(page, (mi_block_t*)p, adjust + size); } mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size); @@ -78,7 +80,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* #if MI_TRACK_ENABLED if (p != aligned_p) { - mi_track_free_size(p, oversize); + mi_track_free(p); mi_track_malloc(aligned_p, size, zero); } else { diff --git a/src/alloc-override.c b/src/alloc-override.c index 84a0d19d..40098ac5 100644 --- a/src/alloc-override.c +++ b/src/alloc-override.c @@ -57,7 +57,7 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t; // functions that are interposed (or the interposing does not work) #define MI_OSX_IS_INTERPOSED - mi_decl_externc static size_t mi_malloc_size_checked(void *p) { + mi_decl_externc size_t mi_malloc_size_checked(void *p) { if (!mi_is_in_heap_region(p)) return 0; return mi_usable_size(p); } diff --git a/src/alloc.c b/src/alloc.c index 86453f15..5fc7ec21 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -70,20 +70,22 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz } #endif -#if (MI_PADDING > 0) && defined(MI_ENCODE_FREELIST) && !MI_TRACK_ENABLED +#if MI_PADDING // && !MI_TRACK_ENABLED mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page)); ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE)); - #if (MI_DEBUG>1) + #if (MI_DEBUG>=2) mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta)); mi_track_mem_defined(padding,sizeof(mi_padding_t)); // note: re-enable since mi_page_usable_block_size may set noaccess #endif padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys)); padding->delta = (uint32_t)(delta); + #if MI_PADDING_CHECK if (!mi_page_is_huge(page)) { uint8_t* fill = (uint8_t*)padding - delta; const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; } } + #endif #endif return block; @@ -96,21 +98,23 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, mi_assert(heap->thread_id == 0 || heap->thread_id == tid); // heaps are thread local #endif mi_assert(size <= MI_SMALL_SIZE_MAX); -#if (MI_PADDING) - if (size == 0) { - size = sizeof(void*); - } -#endif + #if (MI_PADDING) + if (size == 0) { size = sizeof(void*); } + #endif mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE); void* p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero); + #if MI_PADDING + mi_assert_internal(p == NULL || mi_usable_size(p) == size); + #else mi_assert_internal(p == NULL || mi_usable_size(p) >= size); -#if MI_STAT>1 + #endif + #if MI_STAT>1 if (p != NULL) { if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); } mi_heap_stat_increase(heap, malloc, mi_usable_size(p)); } -#endif - mi_track_malloc(p,size,zero); + #endif + if (p!=NULL) { mi_track_malloc(p,size,zero); } return p; } @@ -133,14 +137,18 @@ extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool z mi_assert(heap!=NULL); mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment); // note: size can overflow but it is detected in malloc_generic + #if MI_PADDING + mi_assert_internal(p == NULL || mi_usable_size(p) == size); + #else mi_assert_internal(p == NULL || mi_usable_size(p) >= size); + #endif #if MI_STAT>1 if (p != NULL) { if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); } mi_heap_stat_increase(heap, malloc, mi_usable_size(p)); } #endif - mi_track_malloc(p,size,zero); + if (p!=NULL) { mi_track_malloc(p,size,zero); } return p; } } @@ -225,7 +233,7 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block // Check for heap block overflow by setting up padding at the end of the block // --------------------------------------------------------------------------- -#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST) && !MI_TRACK_ENABLED +#if MI_PADDING // && !MI_TRACK_ENABLED static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) { *bsize = mi_page_usable_block_size(page); const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize); @@ -249,6 +257,40 @@ static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* bl return (ok ? bsize - delta : 0); } +// When a non-thread-local block is freed, it becomes part of the thread delayed free +// list that is freed later by the owning heap. If the exact usable size is too small to +// contain the pointer for the delayed list, then shrink the padding (by decreasing delta) +// so it will later not trigger an overflow error in `mi_free_block`. +void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) { + size_t bsize; + size_t delta; + bool ok = mi_page_decode_padding(page, block, &delta, &bsize); + mi_assert_internal(ok); + if (!ok || (bsize - delta) >= min_size) return; // usually already enough space + mi_assert_internal(bsize >= min_size); + if (bsize < min_size) return; // should never happen + size_t new_delta = (bsize - min_size); + mi_assert_internal(new_delta < bsize); + mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize); + mi_track_mem_defined(padding,sizeof(mi_padding_t)); + padding->delta = (uint32_t)new_delta; + mi_track_mem_noaccess(padding,sizeof(mi_padding_t)); +} +#else +static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) { + MI_UNUSED(block); + return mi_page_usable_block_size(page); +} + +void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) { + MI_UNUSED(page); + MI_UNUSED(block); + MI_UNUSED(min_size); +} +#endif + +#if MI_PADDING && MI_PADDING_CHECK + static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) { size_t bsize; size_t delta; @@ -281,39 +323,13 @@ static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) { } } -// When a non-thread-local block is freed, it becomes part of the thread delayed free -// list that is freed later by the owning heap. If the exact usable size is too small to -// contain the pointer for the delayed list, then shrink the padding (by decreasing delta) -// so it will later not trigger an overflow error in `mi_free_block`. -static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) { - size_t bsize; - size_t delta; - bool ok = mi_page_decode_padding(page, block, &delta, &bsize); - mi_assert_internal(ok); - if (!ok || (bsize - delta) >= min_size) return; // usually already enough space - mi_assert_internal(bsize >= min_size); - if (bsize < min_size) return; // should never happen - size_t new_delta = (bsize - min_size); - mi_assert_internal(new_delta < bsize); - mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize); - padding->delta = (uint32_t)new_delta; -} #else + static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) { MI_UNUSED(page); MI_UNUSED(block); } -static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) { - MI_UNUSED(block); - return mi_page_usable_block_size(page); -} - -static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) { - MI_UNUSED(page); - MI_UNUSED(block); - MI_UNUSED(min_size); -} #endif // only maintain stats for smaller objects if requested @@ -377,7 +393,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc // The padding check may access the non-thread-owned page for the key values. // that is safe as these are constant and the page won't be freed (as the block is not freed yet). mi_check_padding(page, block); - mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection + _mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection // huge page segments are always abandoned and can be freed immediately mi_segment_t* segment = _mi_page_segment(page); @@ -682,9 +698,10 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) // (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.) const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL (with size 0) if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0) - // todo: adjust potential padding to reflect the new size? - mi_track_free_size(p, size); - mi_track_malloc(p,newsize,true); + mi_assert_internal(p!=NULL); + // todo: do not track as the usable size is still the same in the free; adjust potential padding? + // mi_track_free(p); + // mi_track_malloc(p,newsize,true); return p; // reallocation still fits and not more than 50% waste } void* newp = mi_heap_malloc(heap,newsize); @@ -927,7 +944,7 @@ static bool mi_try_new_handler(bool nothrow) { } #endif -static mi_decl_noinline void* mi_heap_try_new(mi_heap_t* heap, size_t size, bool nothrow ) { +mi_decl_export mi_decl_noinline void* mi_heap_try_new(mi_heap_t* heap, size_t size, bool nothrow ) { void* p = NULL; while(p == NULL && mi_try_new_handler(nothrow)) { p = mi_heap_malloc(heap,size); @@ -940,7 +957,7 @@ static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow) { } -mi_decl_nodiscard mi_decl_restrict extern inline void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) { +mi_decl_nodiscard mi_decl_restrict void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) { void* p = mi_heap_malloc(heap,size); if mi_unlikely(p == NULL) return mi_heap_try_new(heap, size, false); return p; @@ -951,7 +968,7 @@ mi_decl_nodiscard mi_decl_restrict void* mi_new(size_t size) { } -mi_decl_nodiscard mi_decl_restrict extern inline void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) { +mi_decl_nodiscard mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) { size_t total; if mi_unlikely(mi_count_size_overflow(count, size, &total)) { mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc @@ -1024,8 +1041,8 @@ void* _mi_externs[] = { (void*)&mi_zalloc_small, (void*)&mi_heap_malloc, (void*)&mi_heap_zalloc, - (void*)&mi_heap_malloc_small, - (void*)&mi_heap_alloc_new, - (void*)&mi_heap_alloc_new_n + (void*)&mi_heap_malloc_small + // (void*)&mi_heap_alloc_new, + // (void*)&mi_heap_alloc_new_n }; #endif diff --git a/src/init.c b/src/init.c index c416208c..5a82f2f7 100644 --- a/src/init.c +++ b/src/init.c @@ -22,7 +22,7 @@ const mi_page_t _mi_page_empty = { 0, // used 0, // xblock_size NULL, // local_free - #if MI_ENCODE_FREELIST + #if (MI_PADDING || MI_ENCODE_FREELIST) { 0, 0 }, #endif MI_ATOMIC_VAR_INIT(0), // xthread_free diff --git a/src/page.c b/src/page.c index 4250ff35..5f134785 100644 --- a/src/page.c +++ b/src/page.c @@ -663,7 +663,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi mi_assert_internal(page_size / block_size < (1L<<16)); page->reserved = (uint16_t)(page_size / block_size); mi_assert_internal(page->reserved > 0); - #ifdef MI_ENCODE_FREELIST + #if (MI_PADDING || MI_ENCODE_FREELIST) page->keys[0] = _mi_heap_random_next(heap); page->keys[1] = _mi_heap_random_next(heap); #endif @@ -683,7 +683,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi mi_assert_internal(page->prev == NULL); mi_assert_internal(page->retire_expire == 0); mi_assert_internal(!mi_page_has_aligned(page)); - #if (MI_ENCODE_FREELIST) + #if (MI_PADDING || MI_ENCODE_FREELIST) mi_assert_internal(page->keys[0] != 0); mi_assert_internal(page->keys[1] != 0); #endif diff --git a/test/test-api.c b/test/test-api.c index 884a433b..20050ce8 100644 --- a/test/test-api.c +++ b/test/test-api.c @@ -51,7 +51,7 @@ bool test_stl_allocator2(void); // --------------------------------------------------------------------------- int main(void) { mi_option_disable(mi_option_verbose); - + // --------------------------------------------------- // Malloc // --------------------------------------------------- @@ -149,7 +149,8 @@ int main(void) { for (size_t align = 1; align <= MI_ALIGNMENT_MAX && ok; align *= 2) { void* ps[8]; for (int i = 0; i < 8 && ok; i++) { - ps[i] = mi_malloc_aligned(align*5 /*size*/, align); + ps[i] = mi_malloc_aligned(align*13 // size + , align); if (ps[i] == NULL || (uintptr_t)(ps[i]) % align != 0) { ok = false; } diff --git a/test/test-stress.c b/test/test-stress.c index 133ba50c..8b96a5ae 100644 --- a/test/test-stress.c +++ b/test/test-stress.c @@ -244,6 +244,9 @@ int main(int argc, char** argv) { //printf("(reserve huge: %i\n)", res); //bench_start_program(); +#ifndef USE_STD_MALLOC + mi_stats_reset(); +#endif // Run ITER full iterations where half the objects in the transfer buffer survive to the next round. srand(0x7feb352d);