From 6eeb81ee05972ea5126ff03a28bea89249f218cf Mon Sep 17 00:00:00 2001 From: daan Date: Fri, 28 Oct 2022 19:54:56 -0700 Subject: [PATCH 01/12] initial progress on valgrind integration --- include/mimalloc-internal.h | 11 ++++++++-- include/mimalloc-track.h | 41 +++++++++++++++++++++++++++++++++++++ src/alloc.c | 34 +++++++++++++++++++++++------- test/CMakeLists.txt | 5 +++++ 4 files changed, 82 insertions(+), 9 deletions(-) create mode 100644 include/mimalloc-track.h diff --git a/include/mimalloc-internal.h b/include/mimalloc-internal.h index 9fa37108..c1716b44 100644 --- a/include/mimalloc-internal.h +++ b/include/mimalloc-internal.h @@ -9,6 +9,7 @@ terms of the MIT license. A copy of the license can be found in the file #define MIMALLOC_INTERNAL_H #include "mimalloc-types.h" +#include "mimalloc-track.h" #if (MI_DEBUG>0) #define mi_trace_message(...) _mi_trace_message(__VA_ARGS__) @@ -625,21 +626,27 @@ static inline mi_encoded_t mi_ptr_encode(const void* null, const void* p, const } static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, const uintptr_t* keys ) { + MI_TRACK_MEM_DEFINED(block,sizeof(mi_block_t)); + mi_block_t* next; #ifdef MI_ENCODE_FREELIST - return (mi_block_t*)mi_ptr_decode(null, block->next, keys); + next = (mi_block_t*)mi_ptr_decode(null, block->next, keys); #else MI_UNUSED(keys); MI_UNUSED(null); - return (mi_block_t*)block->next; + next = (mi_block_t*)block->next; #endif + MI_TRACK_MEM_NOACCESS(block,sizeof(mi_block_t)); + return next; } static inline void mi_block_set_nextx(const void* null, mi_block_t* block, const mi_block_t* next, const uintptr_t* keys) { + MI_TRACK_MEM_UNDEFINED(block,sizeof(mi_block_t)); #ifdef MI_ENCODE_FREELIST block->next = mi_ptr_encode(null, next, keys); #else MI_UNUSED(keys); MI_UNUSED(null); block->next = (mi_encoded_t)next; #endif + MI_TRACK_MEM_NOACCESS(block,sizeof(mi_block_t)); } static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* block) { diff --git a/include/mimalloc-track.h b/include/mimalloc-track.h new file mode 100644 index 00000000..53b93fb3 --- /dev/null +++ b/include/mimalloc-track.h @@ -0,0 +1,41 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2021, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_TRACK_H +#define MIMALLOC_TRACK_H + +// ------------------------------------------------------ +// Track memory ranges with macros for tools like Valgrind +// or other memory checkers. +// ------------------------------------------------------ + + +#define MI_VALGRIND 1 + +#if MI_VALGRIND +#include +#include + +#define MI_TRACK_ZALLOC(p,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,0 /*red zone*/,zero) +#define MI_TRACK_MALLOC(p,size) MI_TRACK_ZALLOC(p,size,false) +#define MI_TRACK_FREE(p) VALGRIND_FREELIKE_BLOCK(p,0 /*red zone*/) +#define MI_TRACK_MEM_DEFINED(p,size) VALGRIND_MAKE_MEM_DEFINED(p,size) +#define MI_TRACK_MEM_UNDEFINED(p,size) VALGRIND_MAKE_MEM_UNDEFINED(p,size) +#define MI_TRACK_MEM_NOACCESS(p,size) VALGRIND_MAKE_MEM_NOACCESS(p,size) + +#else + +#define MI_TRACK_ZALLOC(p,size,zero) +#define MI_TRACK_MALLOC(p,size) +#define MI_TRACK_FREE(p) +#define MI_TRACK_MEM_DEFINED(p,size) +#define MI_TRACK_MEM_UNDEFINED(p,size) +#define MI_TRACK_MEM_NOACCESS(p,size) + +#endif + +#endif diff --git a/src/alloc.c b/src/alloc.c index 19844849..5905733a 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -12,6 +12,7 @@ terms of the MIT license. A copy of the license can be found in the file #include "mimalloc-internal.h" #include "mimalloc-atomic.h" + #include // memset, strlen #include // malloc, exit @@ -37,6 +38,9 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz page->free = mi_block_next(page, block); mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page); + // allow use internally + MI_TRACK_MEM_UNDEFINED(block,mi_page_block_size(page)); + // zero the block? note: we need to zero the full block size (issue #63) if mi_unlikely(zero) { mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic) @@ -73,6 +77,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; } #endif + MI_TRACK_MEM_NOACCESS(block,mi_page_block_size(page)); return block; } @@ -94,6 +99,7 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, mi_heap_stat_increase(heap, malloc, mi_usable_size(p)); } #endif + MI_TRACK_ZALLOC(p,size,zero); return p; } @@ -122,6 +128,7 @@ extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero mi_heap_stat_increase(heap, malloc, mi_usable_size(p)); } #endif + MI_TRACK_ZALLOC(p,size,zero); return p; } } @@ -176,16 +183,19 @@ static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, con return false; } +#define MI_TRACK_PAGE(page,access) { size_t psize; void* pstart = _mi_page_start(_mi_page_segment(page),page,&psize); MI_TRACK_MEM_##access( pstart, psize); } + static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) { + bool is_double_free = false; mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer? (n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL? { // Suspicous: decoded value a in block is in the same page (or NULL) -- maybe a double free? // (continue in separate function to improve code generation) - return mi_check_is_double_freex(page, block); + is_double_free = mi_check_is_double_freex(page, block); } - return false; + return is_double_free; } #else static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) { @@ -203,8 +213,11 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) { *bsize = mi_page_usable_block_size(page); const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize); + MI_TRACK_MEM_DEFINED(padding,sizeof(*padding)); *delta = padding->delta; - return ((uint32_t)mi_ptr_encode(page,block,page->keys) == padding->canary && *delta <= *bsize); + bool ok = ((uint32_t)mi_ptr_encode(page,block,page->keys) == padding->canary && *delta <= *bsize); + MI_TRACK_MEM_NOACCESS(padding,sizeof(*padding)); + return ok; } // Return the exact usable size of a block. @@ -212,7 +225,7 @@ static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* bl size_t bsize; size_t delta; bool ok = mi_page_decode_padding(page, block, &delta, &bsize); - mi_assert_internal(ok); mi_assert_internal(delta <= bsize); + mi_assert_internal(ok); mi_assert_internal(delta <= bsize); return (ok ? bsize - delta : 0); } @@ -226,13 +239,16 @@ static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, si *size = bsize - delta; uint8_t* fill = (uint8_t*)block + bsize - delta; const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes + MI_TRACK_MEM_DEFINED(fill,maxpad); for (size_t i = 0; i < maxpad; i++) { if (fill[i] != MI_DEBUG_PADDING) { *wrong = bsize - delta + i; - return false; + ok = false; + break; } } - return true; + MI_TRACK_MEM_NOACCESS(fill,maxpad); + return ok; } static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) { @@ -465,6 +481,7 @@ void mi_free(void* p) mi_attr_noexcept { mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free"); if mi_unlikely(segment == NULL) return; + MI_TRACK_FREE(p); mi_threadid_t tid = _mi_thread_id(); mi_page_t* const page = _mi_segment_page_of(segment, p); @@ -472,10 +489,11 @@ void mi_free(void* p) mi_attr_noexcept if mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0) { // the thread id matches and it is not a full page, nor has aligned blocks // local, and not full or aligned - if mi_unlikely(mi_check_is_double_free(page,block)) return; + if mi_unlikely(mi_check_is_double_free(page,block)) return; mi_check_padding(page, block); mi_stat_free(page, block); #if (MI_DEBUG!=0) + MI_TRACK_MEM_UNDEFINED(block,mi_page_block_size(page)); memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); #endif mi_block_set_next(page, block, page->local_free); @@ -487,8 +505,10 @@ void mi_free(void* p) mi_attr_noexcept else { // non-local, aligned blocks, or a full page; use the more generic path // note: recalc page in generic to improve code generation + MI_TRACK_MEM_UNDEFINED(block,mi_page_block_size(page)); mi_free_generic(segment, tid == segment->thread_id, p); } + MI_TRACK_MEM_NOACCESS(block,mi_page_block_size(page)); } bool _mi_free_delayed_block(mi_block_t* block) { diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 054d9409..398637c8 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -47,3 +47,8 @@ target_link_libraries(static-override PUBLIC mimalloc-static) add_executable(static-override-cxx main-override.cpp) target_link_libraries(static-override-cxx PUBLIC mimalloc-static) + + +## test memory errors +add_executable(test-wrong test-wrong.c) +target_link_libraries(test-wrong PUBLIC mimalloc) From 093724bdef6511a41a86051e04fff79b572709ca Mon Sep 17 00:00:00 2001 From: daan Date: Fri, 28 Oct 2022 20:07:31 -0700 Subject: [PATCH 02/12] add test file for valgrind integration --- test/test-wrong.c | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 test/test-wrong.c diff --git a/test/test-wrong.c b/test/test-wrong.c new file mode 100644 index 00000000..7544b7fd --- /dev/null +++ b/test/test-wrong.c @@ -0,0 +1,34 @@ +#include +#include +#include "mimalloc.h" + +#ifdef USE_STD_MALLOC +# define mi(x) x +#else +# define mi(x) mi_##x +#endif + +int main(int argc, char** argv) { + int* p = mi(malloc)(3*sizeof(int)); + int* q = mi(malloc)(sizeof(int)); + + // undefined access + // printf("undefined: %d\n", *q); + + *q = 42; + + // buffer overflow + // q[1] = 43; + + mi(free)(q); + + // double free + mi(free)(q); + + // use after free + printf("use-after-free: %d\n", *q); + + // leak p + // mi_free(p) + return 0; +} \ No newline at end of file From bc8f23aa0d3dce3ed5854e8253a24fe1c92ea8fe Mon Sep 17 00:00:00 2001 From: daan Date: Sat, 29 Oct 2022 10:44:10 -0700 Subject: [PATCH 03/12] rename track macros to lowercase --- include/mimalloc-internal.h | 8 ++++---- include/mimalloc-track.h | 24 ++++++++++++------------ src/alloc.c | 28 ++++++++++++++-------------- 3 files changed, 30 insertions(+), 30 deletions(-) diff --git a/include/mimalloc-internal.h b/include/mimalloc-internal.h index c1716b44..11c3db2e 100644 --- a/include/mimalloc-internal.h +++ b/include/mimalloc-internal.h @@ -626,7 +626,7 @@ static inline mi_encoded_t mi_ptr_encode(const void* null, const void* p, const } static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, const uintptr_t* keys ) { - MI_TRACK_MEM_DEFINED(block,sizeof(mi_block_t)); + mi_track_mem_defined(block,sizeof(mi_block_t)); mi_block_t* next; #ifdef MI_ENCODE_FREELIST next = (mi_block_t*)mi_ptr_decode(null, block->next, keys); @@ -634,19 +634,19 @@ static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* bl MI_UNUSED(keys); MI_UNUSED(null); next = (mi_block_t*)block->next; #endif - MI_TRACK_MEM_NOACCESS(block,sizeof(mi_block_t)); + mi_track_mem_noaccess(block,sizeof(mi_block_t)); return next; } static inline void mi_block_set_nextx(const void* null, mi_block_t* block, const mi_block_t* next, const uintptr_t* keys) { - MI_TRACK_MEM_UNDEFINED(block,sizeof(mi_block_t)); + mi_track_mem_undefined(block,sizeof(mi_block_t)); #ifdef MI_ENCODE_FREELIST block->next = mi_ptr_encode(null, next, keys); #else MI_UNUSED(keys); MI_UNUSED(null); block->next = (mi_encoded_t)next; #endif - MI_TRACK_MEM_NOACCESS(block,sizeof(mi_block_t)); + mi_track_mem_noaccess(block,sizeof(mi_block_t)); } static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* block) { diff --git a/include/mimalloc-track.h b/include/mimalloc-track.h index 53b93fb3..be15c757 100644 --- a/include/mimalloc-track.h +++ b/include/mimalloc-track.h @@ -20,21 +20,21 @@ terms of the MIT license. A copy of the license can be found in the file #include #include -#define MI_TRACK_ZALLOC(p,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,0 /*red zone*/,zero) -#define MI_TRACK_MALLOC(p,size) MI_TRACK_ZALLOC(p,size,false) -#define MI_TRACK_FREE(p) VALGRIND_FREELIKE_BLOCK(p,0 /*red zone*/) -#define MI_TRACK_MEM_DEFINED(p,size) VALGRIND_MAKE_MEM_DEFINED(p,size) -#define MI_TRACK_MEM_UNDEFINED(p,size) VALGRIND_MAKE_MEM_UNDEFINED(p,size) -#define MI_TRACK_MEM_NOACCESS(p,size) VALGRIND_MAKE_MEM_NOACCESS(p,size) +#define mi_track_malloc(p,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,0 /*red zone*/,zero) +#define mi_track_resize(p,oldsize,newsize) VALGRIND_RESIZEINPLACE_BLOCK(p,oldsize,newsize,0 /*red zone*/) +#define mi_track_free(p) VALGRIND_FREELIKE_BLOCK(p,0 /*red zone*/) +#define mi_track_mem_defined(p,size) VALGRIND_MAKE_MEM_DEFINED(p,size) +#define mi_track_mem_undefined(p,size) VALGRIND_MAKE_MEM_UNDEFINED(p,size) +#define mi_track_mem_noaccess(p,size) VALGRIND_MAKE_MEM_NOACCESS(p,size) #else -#define MI_TRACK_ZALLOC(p,size,zero) -#define MI_TRACK_MALLOC(p,size) -#define MI_TRACK_FREE(p) -#define MI_TRACK_MEM_DEFINED(p,size) -#define MI_TRACK_MEM_UNDEFINED(p,size) -#define MI_TRACK_MEM_NOACCESS(p,size) +#define mi_track_malloc(p,size,zero) +#define mi_track_resize(p,oldsize,newsize) +#define mi_track_free(p) +#define mi_track_mem_defined(p,size) +#define mi_track_mem_undefined(p,size) +#define mi_track_mem_noaccess(p,size) #endif diff --git a/src/alloc.c b/src/alloc.c index 5905733a..fe744f37 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -38,8 +38,8 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz page->free = mi_block_next(page, block); mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page); - // allow use internally - MI_TRACK_MEM_UNDEFINED(block,mi_page_block_size(page)); + // allow use of the block internally + mi_track_mem_undefined(block,mi_page_block_size(page)); // zero the block? note: we need to zero the full block size (issue #63) if mi_unlikely(zero) { @@ -77,7 +77,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; } #endif - MI_TRACK_MEM_NOACCESS(block,mi_page_block_size(page)); + mi_track_mem_noaccess(block,mi_page_block_size(page)); return block; } @@ -99,7 +99,7 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, mi_heap_stat_increase(heap, malloc, mi_usable_size(p)); } #endif - MI_TRACK_ZALLOC(p,size,zero); + mi_track_malloc(p,size,zero); return p; } @@ -128,7 +128,7 @@ extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero mi_heap_stat_increase(heap, malloc, mi_usable_size(p)); } #endif - MI_TRACK_ZALLOC(p,size,zero); + mi_track_malloc(p,size,zero); return p; } } @@ -183,7 +183,7 @@ static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, con return false; } -#define MI_TRACK_PAGE(page,access) { size_t psize; void* pstart = _mi_page_start(_mi_page_segment(page),page,&psize); MI_TRACK_MEM_##access( pstart, psize); } +#define mi_track_page(page,access) { size_t psize; void* pstart = _mi_page_start(_mi_page_segment(page),page,&psize); mi_track_mem_##access( pstart, psize); } static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) { bool is_double_free = false; @@ -213,10 +213,10 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) { *bsize = mi_page_usable_block_size(page); const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize); - MI_TRACK_MEM_DEFINED(padding,sizeof(*padding)); + mi_track_mem_defined(padding,sizeof(*padding)); *delta = padding->delta; bool ok = ((uint32_t)mi_ptr_encode(page,block,page->keys) == padding->canary && *delta <= *bsize); - MI_TRACK_MEM_NOACCESS(padding,sizeof(*padding)); + mi_track_mem_noaccess(padding,sizeof(*padding)); return ok; } @@ -239,7 +239,7 @@ static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, si *size = bsize - delta; uint8_t* fill = (uint8_t*)block + bsize - delta; const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes - MI_TRACK_MEM_DEFINED(fill,maxpad); + mi_track_mem_defined(fill,maxpad); for (size_t i = 0; i < maxpad; i++) { if (fill[i] != MI_DEBUG_PADDING) { *wrong = bsize - delta + i; @@ -247,7 +247,7 @@ static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, si break; } } - MI_TRACK_MEM_NOACCESS(fill,maxpad); + mi_track_mem_noaccess(fill,maxpad); return ok; } @@ -481,7 +481,7 @@ void mi_free(void* p) mi_attr_noexcept { mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free"); if mi_unlikely(segment == NULL) return; - MI_TRACK_FREE(p); + mi_track_free(p); mi_threadid_t tid = _mi_thread_id(); mi_page_t* const page = _mi_segment_page_of(segment, p); @@ -493,7 +493,7 @@ void mi_free(void* p) mi_attr_noexcept mi_check_padding(page, block); mi_stat_free(page, block); #if (MI_DEBUG!=0) - MI_TRACK_MEM_UNDEFINED(block,mi_page_block_size(page)); + mi_track_mem_undefined(block,mi_page_block_size(page)); memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); #endif mi_block_set_next(page, block, page->local_free); @@ -505,10 +505,10 @@ void mi_free(void* p) mi_attr_noexcept else { // non-local, aligned blocks, or a full page; use the more generic path // note: recalc page in generic to improve code generation - MI_TRACK_MEM_UNDEFINED(block,mi_page_block_size(page)); + mi_track_mem_undefined(block,mi_page_block_size(page)); mi_free_generic(segment, tid == segment->thread_id, p); } - MI_TRACK_MEM_NOACCESS(block,mi_page_block_size(page)); + mi_track_mem_noaccess(block,mi_page_block_size(page)); } bool _mi_free_delayed_block(mi_block_t* block) { From eee7c40da53a2750056b1b626c3bc8b52f5f8a6a Mon Sep 17 00:00:00 2001 From: daan Date: Sat, 29 Oct 2022 11:43:09 -0700 Subject: [PATCH 04/12] updates to run valgrind on test-api --- include/mimalloc-internal.h | 2 +- include/mimalloc-track.h | 5 +++++ src/alloc-aligned.c | 4 ++++ src/alloc.c | 18 ++++++++++++++---- src/os.c | 2 +- src/segment.c | 9 ++++++--- test/test-api.c | 4 +++- test/test-wrong.c | 7 +++++-- 8 files changed, 39 insertions(+), 12 deletions(-) diff --git a/include/mimalloc-internal.h b/include/mimalloc-internal.h index 11c3db2e..7f16544b 100644 --- a/include/mimalloc-internal.h +++ b/include/mimalloc-internal.h @@ -931,7 +931,7 @@ static inline size_t mi_bsr(uintptr_t x) { // (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017). See also issue #201 and pr #253. // --------------------------------------------------------------------------------- -#if defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64)) +#if !MI_TRACK_ENABLED && defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64)) #include #include extern bool _mi_cpu_has_fsrm; diff --git a/include/mimalloc-track.h b/include/mimalloc-track.h index be15c757..200f2529 100644 --- a/include/mimalloc-track.h +++ b/include/mimalloc-track.h @@ -17,6 +17,9 @@ terms of the MIT license. A copy of the license can be found in the file #define MI_VALGRIND 1 #if MI_VALGRIND + +#define MI_TRACK_ENABLED 1 + #include #include @@ -29,6 +32,8 @@ terms of the MIT license. A copy of the license can be found in the file #else +#define MI_TRACK_ENABLED 0 + #define mi_track_malloc(p,size,zero) #define mi_track_resize(p,oldsize,newsize) #define mi_track_free(p) diff --git a/src/alloc-aligned.c b/src/alloc-aligned.c index 4b4c43cb..e72d363f 100644 --- a/src/alloc-aligned.c +++ b/src/alloc-aligned.c @@ -41,6 +41,9 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* if (aligned_p != p) mi_page_set_has_aligned(_mi_ptr_page(p), true); mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0); mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p)); + + mi_track_free(p); + mi_track_malloc(aligned_p,size,zero); return aligned_p; } @@ -82,6 +85,7 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t void* p = _mi_page_malloc(heap, page, padsize, zero); // TODO: inline _mi_page_malloc mi_assert_internal(p != NULL); mi_assert_internal(((uintptr_t)p + offset) % alignment == 0); + mi_track_malloc(p,size,zero); return p; } } diff --git a/src/alloc.c b/src/alloc.c index fe744f37..33e565bf 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -39,6 +39,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page); // allow use of the block internally + // todo: can we optimize this call away for non-zero'd release mode? mi_track_mem_undefined(block,mi_page_block_size(page)); // zero the block? note: we need to zero the full block size (issue #63) @@ -77,6 +78,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; } #endif + // mark as no-access again mi_track_mem_noaccess(block,mi_page_block_size(page)); return block; } @@ -341,7 +343,7 @@ static void mi_stat_huge_free(const mi_page_t* page) { // Free // ------------------------------------------------------ -// multi-threaded free +// multi-threaded free (or free in huge block) static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block) { // The padding check may access the non-thread-owned page for the key values. @@ -349,6 +351,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc mi_check_padding(page, block); mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection #if (MI_DEBUG!=0) + mi_track_mem_undefined(block, mi_page_block_size(page)); // note: check padding may set parts to noaccess memset(block, MI_DEBUG_FREED, mi_usable_size(block)); #endif @@ -408,6 +411,7 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block if mi_unlikely(mi_check_is_double_free(page, block)) return; mi_check_padding(page, block); #if (MI_DEBUG!=0) + mi_track_mem_undefined(block, mi_page_block_size(page)); // note: check padding may set parts to noaccess memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); #endif mi_block_set_next(page, block, page->local_free); @@ -486,6 +490,10 @@ void mi_free(void* p) mi_attr_noexcept mi_threadid_t tid = _mi_thread_id(); mi_page_t* const page = _mi_segment_page_of(segment, p); mi_block_t* const block = (mi_block_t*)p; + + #if MI_TRACK_ENABLED + const size_t track_bsize = mi_page_block_size(page); + #endif if mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0) { // the thread id matches and it is not a full page, nor has aligned blocks // local, and not full or aligned @@ -493,7 +501,7 @@ void mi_free(void* p) mi_attr_noexcept mi_check_padding(page, block); mi_stat_free(page, block); #if (MI_DEBUG!=0) - mi_track_mem_undefined(block,mi_page_block_size(page)); + mi_track_mem_undefined(block,track_bsize); // note: check padding may set parts to noaccess memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); #endif mi_block_set_next(page, block, page->local_free); @@ -505,10 +513,10 @@ void mi_free(void* p) mi_attr_noexcept else { // non-local, aligned blocks, or a full page; use the more generic path // note: recalc page in generic to improve code generation - mi_track_mem_undefined(block,mi_page_block_size(page)); + mi_track_mem_undefined(block,track_bsize); mi_free_generic(segment, tid == segment->thread_id, p); } - mi_track_mem_noaccess(block,mi_page_block_size(page)); + mi_track_mem_noaccess(block,track_bsize); // cannot use mi_page_block_size as the segment might be deallocated by now } bool _mi_free_delayed_block(mi_block_t* block) { @@ -641,10 +649,12 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) // else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)). // (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.) const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL (with size 0) + #if !MI_TRACK_ENABLED if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0) // todo: adjust potential padding to reflect the new size? return p; // reallocation still fits and not more than 50% waste } + #endif void* newp = mi_heap_malloc(heap,newsize); if mi_likely(newp != NULL) { if (zero && newsize > size) { diff --git a/src/os.c b/src/os.c index 855cc3a7..1640657e 100644 --- a/src/os.c +++ b/src/os.c @@ -986,7 +986,7 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats) else _mi_stat_decrease(&stats->reset, csize); if (!reset) return true; // nothing to do on unreset! - #if (MI_DEBUG>1) + #if (MI_DEBUG>1) && !MI_TRACK_ENABLED if (MI_SECURE==0) { memset(start, 0, csize); // pretend it is eagerly reset } diff --git a/src/segment.c b/src/segment.c index f9edf04a..f68b3b76 100644 --- a/src/segment.c +++ b/src/segment.c @@ -476,6 +476,7 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se fully_committed = false; } _mi_mem_free(segment, segment_size, segment->memid, fully_committed, any_reset, tld->os); + //mi_track_mem_noaccess(segment,segment_size); } // called by threads that are terminating to free cached segments @@ -588,9 +589,11 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_ segment->mem_is_pinned = (mem_large || is_pinned); segment->mem_is_committed = commit; mi_segments_track_size((long)segment_size, tld); - } + } mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0); mi_assert_internal(segment->mem_is_pinned ? segment->mem_is_committed : true); + //mi_track_mem_defined(segment,info_size); + mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); // tsan if (!pages_still_good) { // zero the segment info (but not the `mem` fields) @@ -1208,7 +1211,7 @@ static mi_page_t* mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, mi_p mi_assert_internal(free_queue->first != NULL); mi_page_t* const page = mi_segment_page_alloc_in(free_queue->first, tld); mi_assert_internal(page != NULL); -#if MI_DEBUG>=2 +#if MI_DEBUG>=2 && !MI_TRACK_ENABLED // verify it is committed _mi_segment_page_start(_mi_page_segment(page), page, sizeof(void*), NULL, NULL)[0] = 0; #endif @@ -1232,7 +1235,7 @@ static mi_page_t* mi_segment_large_page_alloc(mi_heap_t* heap, size_t block_size if (segment == NULL) return NULL; mi_page_t* page = mi_segment_find_free(segment, tld); mi_assert_internal(page != NULL); -#if MI_DEBUG>=2 +#if MI_DEBUG>=2 && !MI_TRACK_ENABLED _mi_segment_page_start(segment, page, sizeof(void*), NULL, NULL)[0] = 0; #endif return page; diff --git a/test/test-api.c b/test/test-api.c index c56fb88d..1d427c01 100644 --- a/test/test-api.c +++ b/test/test-api.c @@ -72,7 +72,9 @@ int main(void) { result = (mi_calloc((size_t)&mi_calloc,SIZE_MAX/1000) == NULL); }; CHECK_BODY("calloc0") { - result = (mi_usable_size(mi_calloc(0,1000)) <= 16); + void* p = mi_calloc(0,1000); + result = (mi_usable_size(p) <= 16); + mi_free(p); }; CHECK_BODY("malloc-large") { // see PR #544. void* p = mi_malloc(67108872); diff --git a/test/test-wrong.c b/test/test-wrong.c index 7544b7fd..b0e59834 100644 --- a/test/test-wrong.c +++ b/test/test-wrong.c @@ -11,7 +11,10 @@ int main(int argc, char** argv) { int* p = mi(malloc)(3*sizeof(int)); int* q = mi(malloc)(sizeof(int)); - + + int* r = mi_malloc_aligned(8,16); + mi_free(r); + // undefined access // printf("undefined: %d\n", *q); @@ -23,7 +26,7 @@ int main(int argc, char** argv) { mi(free)(q); // double free - mi(free)(q); + // mi(free)(q); // use after free printf("use-after-free: %d\n", *q); From c61b365e7666a9cb7536b9843accad4e7e2c745d Mon Sep 17 00:00:00 2001 From: daan Date: Sat, 29 Oct 2022 11:51:04 -0700 Subject: [PATCH 05/12] valgrind works on test-stress --- src/region.c | 2 +- src/segment.c | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/region.c b/src/region.c index 72ce8494..15b2cce1 100644 --- a/src/region.c +++ b/src/region.c @@ -395,7 +395,7 @@ void _mi_mem_free(void* p, size_t size, size_t id, bool full_commit, bool any_re if (p==NULL) return; if (size==0) return; size = _mi_align_up(size, _mi_os_page_size()); - + size_t arena_memid = 0; mi_bitmap_index_t bit_idx; mem_region_t* region; diff --git a/src/segment.c b/src/segment.c index f68b3b76..9a76385a 100644 --- a/src/segment.c +++ b/src/segment.c @@ -476,7 +476,6 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se fully_committed = false; } _mi_mem_free(segment, segment_size, segment->memid, fully_committed, any_reset, tld->os); - //mi_track_mem_noaccess(segment,segment_size); } // called by threads that are terminating to free cached segments @@ -592,7 +591,7 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_ } mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0); mi_assert_internal(segment->mem_is_pinned ? segment->mem_is_committed : true); - //mi_track_mem_defined(segment,info_size); + mi_track_mem_defined(segment,info_size); mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); // tsan if (!pages_still_good) { From a1f5a5d962fc819f2da7266a6f4877790b474d36 Mon Sep 17 00:00:00 2001 From: daan Date: Sat, 29 Oct 2022 14:37:55 -0700 Subject: [PATCH 06/12] fix various false positives in test-stress from valgrind --- include/mimalloc-track.h | 9 +++----- include/mimalloc-types.h | 7 ++++-- src/alloc.c | 48 ++++++++++++++++++++++++---------------- src/region.c | 6 ++--- src/segment.c | 7 +++--- test/test-wrong.c | 9 +++++--- 6 files changed, 50 insertions(+), 36 deletions(-) diff --git a/include/mimalloc-track.h b/include/mimalloc-track.h index 200f2529..25f800b5 100644 --- a/include/mimalloc-track.h +++ b/include/mimalloc-track.h @@ -13,9 +13,6 @@ terms of the MIT license. A copy of the license can be found in the file // or other memory checkers. // ------------------------------------------------------ - -#define MI_VALGRIND 1 - #if MI_VALGRIND #define MI_TRACK_ENABLED 1 @@ -23,9 +20,9 @@ terms of the MIT license. A copy of the license can be found in the file #include #include -#define mi_track_malloc(p,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,0 /*red zone*/,zero) -#define mi_track_resize(p,oldsize,newsize) VALGRIND_RESIZEINPLACE_BLOCK(p,oldsize,newsize,0 /*red zone*/) -#define mi_track_free(p) VALGRIND_FREELIKE_BLOCK(p,0 /*red zone*/) +#define mi_track_malloc(p,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,MI_PADDING_SIZE /*red zone*/,(zero?1:0)) +#define mi_track_resize(p,oldsize,newsize) VALGRIND_RESIZEINPLACE_BLOCK(p,oldsize,newsize,MI_PADDING_SIZE /*red zone*/) +#define mi_track_free(p) VALGRIND_FREELIKE_BLOCK(p,MI_PADDING_SIZE /*red zone*/) #define mi_track_mem_defined(p,size) VALGRIND_MAKE_MEM_DEFINED(p,size) #define mi_track_mem_undefined(p,size) VALGRIND_MAKE_MEM_UNDEFINED(p,size) #define mi_track_mem_noaccess(p,size) VALGRIND_MAKE_MEM_NOACCESS(p,size) diff --git a/include/mimalloc-types.h b/include/mimalloc-types.h index a3fad92d..66b31069 100644 --- a/include/mimalloc-types.h +++ b/include/mimalloc-types.h @@ -29,6 +29,9 @@ terms of the MIT license. A copy of the license can be found in the file // Define NDEBUG in the release version to disable assertions. // #define NDEBUG +// Define MI_VALGRIND to enable valgrind support +#define MI_VALGRIND 1 + // Define MI_STAT as 1 to maintain statistics; set it to 2 to have detailed statistics (but costs some performance). // #define MI_STAT 1 @@ -56,7 +59,7 @@ terms of the MIT license. A copy of the license can be found in the file // Reserve extra padding at the end of each block to be more resilient against heap block overflows. // The padding can detect byte-precise buffer overflow on free. -#if !defined(MI_PADDING) && (MI_DEBUG>=1) +#if !defined(MI_PADDING) && (MI_DEBUG>=1 || MI_VALGRIND) #define MI_PADDING 1 #endif @@ -64,7 +67,7 @@ terms of the MIT license. A copy of the license can be found in the file // Encoded free lists allow detection of corrupted free lists // and can detect buffer overflows, modify after free, and double `free`s. #if (MI_SECURE>=3 || MI_DEBUG>=1 || MI_PADDING > 0) -#define MI_ENCODE_FREELIST 1 +#define MI_ENCODE_FREELIST 1 #endif diff --git a/src/alloc.c b/src/alloc.c index 33e565bf..24c1adff 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -40,7 +40,10 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz // allow use of the block internally // todo: can we optimize this call away for non-zero'd release mode? - mi_track_mem_undefined(block,mi_page_block_size(page)); + #if MI_TRACK_ENABLED + const size_t track_bsize = mi_page_block_size(page); + if (zero) mi_track_mem_defined(block,track_bsize); else mi_track_mem_undefined(block,track_bsize); + #endif // zero the block? note: we need to zero the full block size (issue #63) if mi_unlikely(zero) { @@ -70,7 +73,10 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz #if (MI_PADDING > 0) && defined(MI_ENCODE_FREELIST) mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page)); ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE)); + #if (MI_DEBUG>1) mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta)); + mi_track_mem_defined(padding,sizeof(mi_padding_t)); // note: re-enable since mi_page_usable_block_size may set noaccess + #endif padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys)); padding->delta = (uint32_t)(delta); uint8_t* fill = (uint8_t*)padding - delta; @@ -79,7 +85,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz #endif // mark as no-access again - mi_track_mem_noaccess(block,mi_page_block_size(page)); + mi_track_mem_noaccess(block,track_bsize); return block; } @@ -215,10 +221,14 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) { *bsize = mi_page_usable_block_size(page); const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize); - mi_track_mem_defined(padding,sizeof(*padding)); + mi_track_mem_defined(padding,sizeof(mi_padding_t)); *delta = padding->delta; - bool ok = ((uint32_t)mi_ptr_encode(page,block,page->keys) == padding->canary && *delta <= *bsize); - mi_track_mem_noaccess(padding,sizeof(*padding)); + uint32_t canary = padding->canary; + uintptr_t keys[2]; + keys[0] = page->keys[0]; + keys[1] = page->keys[1]; + bool ok = ((uint32_t)mi_ptr_encode(page,block,keys) == canary && *delta <= *bsize); + mi_track_mem_noaccess(padding,sizeof(mi_padding_t)); return ok; } @@ -349,9 +359,9 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc // The padding check may access the non-thread-owned page for the key values. // that is safe as these are constant and the page won't be freed (as the block is not freed yet). mi_check_padding(page, block); - mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection - #if (MI_DEBUG!=0) - mi_track_mem_undefined(block, mi_page_block_size(page)); // note: check padding may set parts to noaccess + mi_track_mem_defined(block, mi_page_block_size(page)); // ensure the padding can be accessed + mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection + #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED // note: when tracking, cannot use mi_usable_size with multi-threading memset(block, MI_DEBUG_FREED, mi_usable_size(block)); #endif @@ -406,6 +416,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block) { // and push it on the free list + //const size_t bsize = mi_page_block_size(page); if mi_likely(local) { // owning thread can free a block directly if mi_unlikely(mi_check_is_double_free(page, block)) return; @@ -442,8 +453,13 @@ mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* p static void mi_decl_noinline mi_free_generic(const mi_segment_t* segment, bool local, void* p) mi_attr_noexcept { mi_page_t* const page = _mi_segment_page_of(segment, p); mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p); - mi_stat_free(page, block); + //#if MI_TRACK_ENABLED + //const size_t track_bsize = mi_page_block_size(page); + //#endif + //mi_track_mem_defined(block,track_bsize); + mi_stat_free(page, block); // stat_free may access the padding _mi_free_block(page, local, block); + //mi_track_mem_noaccess(block,track_bsize); // use track_bsize as the page may have been deallocated by now } // Get the segment data belonging to a pointer @@ -485,27 +501,22 @@ void mi_free(void* p) mi_attr_noexcept { mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free"); if mi_unlikely(segment == NULL) return; - mi_track_free(p); mi_threadid_t tid = _mi_thread_id(); mi_page_t* const page = _mi_segment_page_of(segment, p); mi_block_t* const block = (mi_block_t*)p; - #if MI_TRACK_ENABLED - const size_t track_bsize = mi_page_block_size(page); - #endif - if mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0) { // the thread id matches and it is not a full page, nor has aligned blocks // local, and not full or aligned if mi_unlikely(mi_check_is_double_free(page,block)) return; mi_check_padding(page, block); mi_stat_free(page, block); - #if (MI_DEBUG!=0) - mi_track_mem_undefined(block,track_bsize); // note: check padding may set parts to noaccess + #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); #endif mi_block_set_next(page, block, page->local_free); page->local_free = block; + // mi_track_mem_noaccess(block,mi_page_block_size(page)); if mi_unlikely(--page->used == 0) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page)) _mi_page_retire(page); } @@ -513,10 +524,9 @@ void mi_free(void* p) mi_attr_noexcept else { // non-local, aligned blocks, or a full page; use the more generic path // note: recalc page in generic to improve code generation - mi_track_mem_undefined(block,track_bsize); mi_free_generic(segment, tid == segment->thread_id, p); - } - mi_track_mem_noaccess(block,track_bsize); // cannot use mi_page_block_size as the segment might be deallocated by now + } + mi_track_free(p); } bool _mi_free_delayed_block(mi_block_t* block) { diff --git a/src/region.c b/src/region.c index 15b2cce1..54b7be0c 100644 --- a/src/region.c +++ b/src/region.c @@ -330,7 +330,7 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* large, bool* } mi_assert_internal(!_mi_bitmap_is_any_claimed(®ion->reset, 1, blocks, bit_idx)); - #if (MI_DEBUG>=2) + #if (MI_DEBUG>=2) && !MI_TRACK_ENABLED if (*commit) { ((uint8_t*)p)[0] = 0; } #endif @@ -376,9 +376,9 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* l if (p != NULL) { mi_assert_internal((uintptr_t)p % alignment == 0); -#if (MI_DEBUG>=2) + #if (MI_DEBUG>=2) && !MI_TRACK_ENABLED if (*commit) { ((uint8_t*)p)[0] = 0; } // ensure the memory is committed -#endif + #endif } return p; } diff --git a/src/segment.c b/src/segment.c index 9a76385a..68174bb2 100644 --- a/src/segment.c +++ b/src/segment.c @@ -531,6 +531,7 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_ // Try to get it from our thread local cache first if (segment != NULL) { // came from cache + mi_track_mem_defined(segment,info_size); mi_assert_internal(segment->segment_size == segment_size); if (page_kind <= MI_PAGE_MEDIUM && segment->page_kind == page_kind && segment->segment_size == segment_size) { pages_still_good = true; @@ -584,15 +585,15 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_ return NULL; } } + mi_track_mem_undefined(segment,info_size); segment->memid = memid; segment->mem_is_pinned = (mem_large || is_pinned); segment->mem_is_committed = commit; mi_segments_track_size((long)segment_size, tld); } mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0); - mi_assert_internal(segment->mem_is_pinned ? segment->mem_is_committed : true); - mi_track_mem_defined(segment,info_size); - + mi_assert_internal(segment->mem_is_pinned ? segment->mem_is_committed : true); + mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); // tsan if (!pages_still_good) { // zero the segment info (but not the `mem` fields) diff --git a/test/test-wrong.c b/test/test-wrong.c index b0e59834..61edb7ea 100644 --- a/test/test-wrong.c +++ b/test/test-wrong.c @@ -16,17 +16,20 @@ int main(int argc, char** argv) { mi_free(r); // undefined access - // printf("undefined: %d\n", *q); + printf("undefined: %d\n", *q); *q = 42; // buffer overflow - // q[1] = 43; + q[1] = 43; + + // buffer underflow + q[-1] = 44; mi(free)(q); // double free - // mi(free)(q); + mi(free)(q); // use after free printf("use-after-free: %d\n", *q); From 84c706508c83e49158069dca3f65a853b019f6e4 Mon Sep 17 00:00:00 2001 From: daan Date: Sun, 30 Oct 2022 10:45:51 -0700 Subject: [PATCH 07/12] fix false positives from valgrind in rptest --- include/mimalloc-track.h | 2 +- include/mimalloc-types.h | 2 +- src/alloc-aligned.c | 6 ++++-- src/alloc-override.c | 12 ++++++------ src/alloc.c | 31 +++++++++++++++++-------------- test/test-wrong.c | 13 +++++++++++-- 6 files changed, 40 insertions(+), 26 deletions(-) diff --git a/include/mimalloc-track.h b/include/mimalloc-track.h index 25f800b5..bb9df4fa 100644 --- a/include/mimalloc-track.h +++ b/include/mimalloc-track.h @@ -20,7 +20,7 @@ terms of the MIT license. A copy of the license can be found in the file #include #include -#define mi_track_malloc(p,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,MI_PADDING_SIZE /*red zone*/,(zero?1:0)) +#define mi_track_malloc(p,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,MI_PADDING_SIZE /*red zone*/,zero) #define mi_track_resize(p,oldsize,newsize) VALGRIND_RESIZEINPLACE_BLOCK(p,oldsize,newsize,MI_PADDING_SIZE /*red zone*/) #define mi_track_free(p) VALGRIND_FREELIKE_BLOCK(p,MI_PADDING_SIZE /*red zone*/) #define mi_track_mem_defined(p,size) VALGRIND_MAKE_MEM_DEFINED(p,size) diff --git a/include/mimalloc-types.h b/include/mimalloc-types.h index 66b31069..1455da6f 100644 --- a/include/mimalloc-types.h +++ b/include/mimalloc-types.h @@ -66,7 +66,7 @@ terms of the MIT license. A copy of the license can be found in the file // Encoded free lists allow detection of corrupted free lists // and can detect buffer overflows, modify after free, and double `free`s. -#if (MI_SECURE>=3 || MI_DEBUG>=1 || MI_PADDING > 0) +#if (MI_SECURE>=3 || MI_DEBUG>=1 || MI_PADDING > 0) && !MI_VALGRIND #define MI_ENCODE_FREELIST 1 #endif diff --git a/src/alloc-aligned.c b/src/alloc-aligned.c index e72d363f..10e145b4 100644 --- a/src/alloc-aligned.c +++ b/src/alloc-aligned.c @@ -42,8 +42,10 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0); mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p)); - mi_track_free(p); - mi_track_malloc(aligned_p,size,zero); + if (p != aligned_p) { + mi_track_free(p); + mi_track_malloc(aligned_p,size,zero); + } return aligned_p; } diff --git a/src/alloc-override.c b/src/alloc-override.c index e29cb4b2..9534e9d5 100644 --- a/src/alloc-override.c +++ b/src/alloc-override.c @@ -29,7 +29,7 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t; // Override system malloc // ------------------------------------------------------ -#if (defined(__GNUC__) || defined(__clang__)) && !defined(__APPLE__) && !defined(MI_VALGRIND) +#if (defined(__GNUC__) || defined(__clang__)) && !defined(__APPLE__) && !MI_TRACK_ENABLED // gcc, clang: use aliasing to alias the exported function to one of our `mi_` functions #if (defined(__GNUC__) && __GNUC__ >= 9) #pragma GCC diagnostic ignored "-Wattributes" // or we get warnings that nodiscard is ignored on a forward @@ -44,7 +44,7 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t; #define MI_FORWARD02(fun,x,y) MI_FORWARD(fun) #else // otherwise use forwarding by calling our `mi_` function - #define MI_FORWARD1(fun,x) { return fun(x); } + #define MI_FORWARD1(fun,x) { return fun(x); } #define MI_FORWARD2(fun,x,y) { return fun(x,y); } #define MI_FORWARD3(fun,x,y,z) { return fun(x,y,z); } #define MI_FORWARD0(fun,x) { fun(x); } @@ -123,10 +123,10 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t; // we just override new/delete which does work in a static library. #else // On all other systems forward to our API - void* malloc(size_t size) MI_FORWARD1(mi_malloc, size) - void* calloc(size_t size, size_t n) MI_FORWARD2(mi_calloc, size, n) - void* realloc(void* p, size_t newsize) MI_FORWARD2(mi_realloc, p, newsize) - void free(void* p) MI_FORWARD0(mi_free, p) + mi_decl_export void* malloc(size_t size) MI_FORWARD1(mi_malloc, size) + mi_decl_export void* calloc(size_t size, size_t n) MI_FORWARD2(mi_calloc, size, n) + mi_decl_export void* realloc(void* p, size_t newsize) MI_FORWARD2(mi_realloc, p, newsize) + mi_decl_export void free(void* p) MI_FORWARD0(mi_free, p) #endif #if (defined(__GNUC__) || defined(__clang__)) && !defined(__APPLE__) diff --git a/src/alloc.c b/src/alloc.c index 24c1adff..f243e8c5 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -42,18 +42,20 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz // todo: can we optimize this call away for non-zero'd release mode? #if MI_TRACK_ENABLED const size_t track_bsize = mi_page_block_size(page); - if (zero) mi_track_mem_defined(block,track_bsize); else mi_track_mem_undefined(block,track_bsize); + mi_assert_internal(track_bsize >= size); + mi_track_mem_undefined(block,track_bsize - MI_PADDING_SIZE); #endif // zero the block? note: we need to zero the full block size (issue #63) if mi_unlikely(zero) { mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic) - const size_t zsize = (page->is_zero ? sizeof(block->next) : page->xblock_size); - _mi_memzero_aligned(block, zsize); + const size_t zsize = (page->is_zero ? sizeof(block->next) + MI_PADDING_SIZE : page->xblock_size); + mi_assert(zsize <= track_bsize && zsize >= MI_PADDING_SIZE); + _mi_memzero_aligned(block, zsize - MI_PADDING_SIZE); } #if (MI_DEBUG>0) - if (!page->is_zero && !zero) { memset(block, MI_DEBUG_UNINIT, size); } + if (!page->is_zero && !zero) { memset(block, MI_DEBUG_UNINIT, size - MI_PADDING_SIZE); } #elif (MI_SECURE!=0) if (!zero) { block->next = 0; } // don't leak internal data #endif @@ -70,7 +72,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz } #endif -#if (MI_PADDING > 0) && defined(MI_ENCODE_FREELIST) +#if (MI_PADDING > 0) && defined(MI_ENCODE_FREELIST) && !MI_TRACK_ENABLED mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page)); ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE)); #if (MI_DEBUG>1) @@ -85,7 +87,10 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz #endif // mark as no-access again - mi_track_mem_noaccess(block,track_bsize); + // mi_track_mem_noaccess(block, track_bsize); + // mi_track_resize(block,track_bsize,size - MI_PADDING_SIZE); + // mi_track_free(block); + //mi_track_malloc(block,size - MI_PADDING_SIZE,zero); return block; } @@ -217,7 +222,7 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block // Check for heap block overflow by setting up padding at the end of the block // --------------------------------------------------------------------------- -#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST) +#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST) && !MI_TRACK_ENABLED static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) { *bsize = mi_page_usable_block_size(page); const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize); @@ -421,7 +426,7 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block // owning thread can free a block directly if mi_unlikely(mi_check_is_double_free(page, block)) return; mi_check_padding(page, block); - #if (MI_DEBUG!=0) + #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED mi_track_mem_undefined(block, mi_page_block_size(page)); // note: check padding may set parts to noaccess memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); #endif @@ -453,13 +458,11 @@ mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* p static void mi_decl_noinline mi_free_generic(const mi_segment_t* segment, bool local, void* p) mi_attr_noexcept { mi_page_t* const page = _mi_segment_page_of(segment, p); mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p); - //#if MI_TRACK_ENABLED - //const size_t track_bsize = mi_page_block_size(page); - //#endif - //mi_track_mem_defined(block,track_bsize); + const size_t track_bsize = mi_page_block_size(page); + //mi_track_mem_defined(block,track_bsize); mi_stat_free(page, block); // stat_free may access the padding - _mi_free_block(page, local, block); - //mi_track_mem_noaccess(block,track_bsize); // use track_bsize as the page may have been deallocated by now + _mi_free_block(page, local, block); + //mi_track_mem_noaccess(block,track_bsize); } // Get the segment data belonging to a pointer diff --git a/test/test-wrong.c b/test/test-wrong.c index 61edb7ea..bb556003 100644 --- a/test/test-wrong.c +++ b/test/test-wrong.c @@ -10,13 +10,21 @@ int main(int argc, char** argv) { int* p = mi(malloc)(3*sizeof(int)); - int* q = mi(malloc)(sizeof(int)); - + int* r = mi_malloc_aligned(8,16); mi_free(r); + // illegal byte wise read + char* c = (char*)mi(malloc)(3); + printf("invalid byte: over: %d, under: %d\n", c[4], c[-1]); + mi(free)(c); + // undefined access + int* q = mi(malloc)(sizeof(int)); printf("undefined: %d\n", *q); + + // illegal int read + printf("invalid: over: %d, under: %d\n", q[1], q[-1]); *q = 42; @@ -28,6 +36,7 @@ int main(int argc, char** argv) { mi(free)(q); + // double free mi(free)(q); From 6e11a054a4f751c17c030167e2ea33124d8083da Mon Sep 17 00:00:00 2001 From: daan Date: Sun, 30 Oct 2022 12:03:14 -0700 Subject: [PATCH 08/12] further improve precision of malloc/free tracking in valgrind --- src/alloc.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/src/alloc.c b/src/alloc.c index f243e8c5..c9265b0a 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -364,7 +364,6 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc // The padding check may access the non-thread-owned page for the key values. // that is safe as these are constant and the page won't be freed (as the block is not freed yet). mi_check_padding(page, block); - mi_track_mem_defined(block, mi_page_block_size(page)); // ensure the padding can be accessed mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED // note: when tracking, cannot use mi_usable_size with multi-threading memset(block, MI_DEBUG_FREED, mi_usable_size(block)); @@ -427,7 +426,6 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block if mi_unlikely(mi_check_is_double_free(page, block)) return; mi_check_padding(page, block); #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED - mi_track_mem_undefined(block, mi_page_block_size(page)); // note: check padding may set parts to noaccess memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); #endif mi_block_set_next(page, block, page->local_free); @@ -458,11 +456,9 @@ mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* p static void mi_decl_noinline mi_free_generic(const mi_segment_t* segment, bool local, void* p) mi_attr_noexcept { mi_page_t* const page = _mi_segment_page_of(segment, p); mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p); - const size_t track_bsize = mi_page_block_size(page); - //mi_track_mem_defined(block,track_bsize); mi_stat_free(page, block); // stat_free may access the padding + mi_track_free(p); _mi_free_block(page, local, block); - //mi_track_mem_noaccess(block,track_bsize); } // Get the segment data belonging to a pointer @@ -517,19 +513,18 @@ void mi_free(void* p) mi_attr_noexcept #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); #endif + mi_track_free(p); mi_block_set_next(page, block, page->local_free); page->local_free = block; - // mi_track_mem_noaccess(block,mi_page_block_size(page)); if mi_unlikely(--page->used == 0) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page)) _mi_page_retire(page); - } + } } else { // non-local, aligned blocks, or a full page; use the more generic path // note: recalc page in generic to improve code generation mi_free_generic(segment, tid == segment->thread_id, p); } - mi_track_free(p); } bool _mi_free_delayed_block(mi_block_t* block) { From b48040e20aa908b4ed4ec13d6a48b0e29214470f Mon Sep 17 00:00:00 2001 From: daan Date: Sun, 30 Oct 2022 12:23:11 -0700 Subject: [PATCH 09/12] set pages to noaccess explicitly for valgrind precision --- include/mimalloc-types.h | 2 +- src/alloc.c | 5 +++-- src/page.c | 4 +++- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/include/mimalloc-types.h b/include/mimalloc-types.h index 1455da6f..6c7c201a 100644 --- a/include/mimalloc-types.h +++ b/include/mimalloc-types.h @@ -66,7 +66,7 @@ terms of the MIT license. A copy of the license can be found in the file // Encoded free lists allow detection of corrupted free lists // and can detect buffer overflows, modify after free, and double `free`s. -#if (MI_SECURE>=3 || MI_DEBUG>=1 || MI_PADDING > 0) && !MI_VALGRIND +#if (MI_SECURE>=3 || MI_DEBUG>=1) #define MI_ENCODE_FREELIST 1 #endif diff --git a/src/alloc.c b/src/alloc.c index c9265b0a..a341e0e7 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -39,10 +39,11 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page); // allow use of the block internally - // todo: can we optimize this call away for non-zero'd release mode? + // note: when tracking we need to avoid ever touching the MI_PADDING since + // that is tracked by valgrind etc. as non-accessible (through the red-zone, see `mimalloc-track.h`) #if MI_TRACK_ENABLED const size_t track_bsize = mi_page_block_size(page); - mi_assert_internal(track_bsize >= size); + mi_assert_internal(track_bsize >= size && track_bsize >= MI_PADDING_SIZE); mi_track_mem_undefined(block,track_bsize - MI_PADDING_SIZE); #endif diff --git a/src/page.c b/src/page.c index 357e05e1..bb74c600 100644 --- a/src/page.c +++ b/src/page.c @@ -616,7 +616,9 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi // set fields mi_page_set_heap(page, heap); size_t page_size; - _mi_segment_page_start(segment, page, block_size, &page_size, NULL); + const void* page_start = _mi_segment_page_start(segment, page, block_size, &page_size, NULL); + MI_UNUSED(page_start); + mi_track_mem_noaccess(page_start,page_size); page->xblock_size = (block_size < MI_HUGE_BLOCK_SIZE ? (uint32_t)block_size : MI_HUGE_BLOCK_SIZE); mi_assert_internal(page_size / block_size < (1L<<16)); page->reserved = (uint16_t)(page_size / block_size); From 886fd7d1b8514452b04f3e92b3c835feed96376a Mon Sep 17 00:00:00 2001 From: daan Date: Sun, 30 Oct 2022 12:49:29 -0700 Subject: [PATCH 10/12] add cmakefile MI_VALGRIND option --- CMakeLists.txt | 22 ++++++++++++++++++++-- include/mimalloc-types.h | 2 +- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b7023009..93fbb1cd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,10 +6,11 @@ set(CMAKE_CXX_STANDARD 17) option(MI_SECURE "Use full security mitigations (like guard pages, allocation randomization, double-free mitigation, and free-list corruption detection)" OFF) option(MI_DEBUG_FULL "Use full internal heap invariant checking in DEBUG mode (expensive)" OFF) -option(MI_PADDING "Enable padding to detect heap block overflow (used only in DEBUG mode)" ON) +option(MI_PADDING "Enable padding to detect heap block overflow (used only in DEBUG mode or with Valgrind)" ON) option(MI_OVERRIDE "Override the standard malloc interface (e.g. define entry points for malloc() etc)" ON) option(MI_XMALLOC "Enable abort() call on memory allocation failure by default" OFF) option(MI_SHOW_ERRORS "Show error and warning messages by default (only enabled by default in DEBUG mode)" OFF) +option(MI_VALGRIND "Compile with Valgrind support (adds a small overhead)" OFF) option(MI_USE_CXX "Use the C++ compiler to compile the library (instead of the C compiler)" OFF) option(MI_SEE_ASM "Generate assembly files" OFF) option(MI_OSX_INTERPOSE "Use interpose to override standard malloc on macOS" ON) @@ -28,6 +29,7 @@ option(MI_CHECK_FULL "Use full internal invariant checking in DEBUG mode option(MI_INSTALL_TOPLEVEL "Install directly into $CMAKE_INSTALL_PREFIX instead of PREFIX/lib/mimalloc-version (deprecated)" OFF) option(MI_USE_LIBATOMIC "Explicitly link with -latomic (on older systems) (deprecated and detected automatically)" OFF) +include(CheckIncludeFiles) include(GNUInstallDirs) include("cmake/mimalloc-config-version.cmake") @@ -108,6 +110,18 @@ if(MI_SECURE) list(APPEND mi_defines MI_SECURE=4) endif() +if(MI_VALGRIND) + CHECK_INCLUDE_FILES("valgrind/valgrind.h;valgrind/memcheck.h" MI_HAS_VALGRINDH) + if (NOT MI_HAS_VALGRINDH) + set(MI_VALGRIND OFF) + message(WARNING "Cannot find the 'valgrind/valgrind.h' and 'valgrind/memcheck.h' -- install valgrind first") + message(STATUS "Compile **without** Valgrind support (MI_VALGRIND=OFF)") + else() + message(STATUS "Compile with Valgrind support (MI_VALGRIND=ON)") + list(APPEND mi_defines MI_VALGRIND=1) + endif() +endif() + if(MI_SEE_ASM) message(STATUS "Generate assembly listings (MI_SEE_ASM=ON)") list(APPEND mi_cflags -save-temps) @@ -254,7 +268,11 @@ endif() if(MI_SECURE) set(mi_basename "mimalloc-secure") else() - set(mi_basename "mimalloc") + if(MI_VALGRIND) + set(mi_basename "mimalloc-valgrind") + else() + set(mi_basename "mimalloc") + endif() endif() string(TOLOWER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_LC) diff --git a/include/mimalloc-types.h b/include/mimalloc-types.h index 6c7c201a..bca0ad61 100644 --- a/include/mimalloc-types.h +++ b/include/mimalloc-types.h @@ -30,7 +30,7 @@ terms of the MIT license. A copy of the license can be found in the file // #define NDEBUG // Define MI_VALGRIND to enable valgrind support -#define MI_VALGRIND 1 +// #define MI_VALGRIND 1 // Define MI_STAT as 1 to maintain statistics; set it to 2 to have detailed statistics (but costs some performance). // #define MI_STAT 1 From 05a75758dd41eb8e12d0155556364c427eaa93eb Mon Sep 17 00:00:00 2001 From: daan Date: Sun, 30 Oct 2022 14:07:41 -0700 Subject: [PATCH 11/12] fix tests --- src/alloc.c | 18 ++++-------------- test/test-api-fill.c | 10 ++++++++++ 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/alloc.c b/src/alloc.c index a341e0e7..ee3f8b8e 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -41,22 +41,17 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz // allow use of the block internally // note: when tracking we need to avoid ever touching the MI_PADDING since // that is tracked by valgrind etc. as non-accessible (through the red-zone, see `mimalloc-track.h`) - #if MI_TRACK_ENABLED - const size_t track_bsize = mi_page_block_size(page); - mi_assert_internal(track_bsize >= size && track_bsize >= MI_PADDING_SIZE); - mi_track_mem_undefined(block,track_bsize - MI_PADDING_SIZE); - #endif - + mi_track_mem_undefined(block, mi_page_usable_block_size(page)); + // zero the block? note: we need to zero the full block size (issue #63) if mi_unlikely(zero) { mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic) const size_t zsize = (page->is_zero ? sizeof(block->next) + MI_PADDING_SIZE : page->xblock_size); - mi_assert(zsize <= track_bsize && zsize >= MI_PADDING_SIZE); _mi_memzero_aligned(block, zsize - MI_PADDING_SIZE); } -#if (MI_DEBUG>0) - if (!page->is_zero && !zero) { memset(block, MI_DEBUG_UNINIT, size - MI_PADDING_SIZE); } +#if (MI_DEBUG>0) && !MI_TRACK_ENABLED + if (!page->is_zero && !zero) { memset(block, MI_DEBUG_UNINIT, mi_page_usable_block_size(page)); } #elif (MI_SECURE!=0) if (!zero) { block->next = 0; } // don't leak internal data #endif @@ -87,11 +82,6 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; } #endif - // mark as no-access again - // mi_track_mem_noaccess(block, track_bsize); - // mi_track_resize(block,track_bsize,size - MI_PADDING_SIZE); - // mi_track_free(block); - //mi_track_malloc(block,size - MI_PADDING_SIZE,zero); return block; } diff --git a/test/test-api-fill.c b/test/test-api-fill.c index ef50acc2..c205637c 100644 --- a/test/test-api-fill.c +++ b/test/test-api-fill.c @@ -309,6 +309,10 @@ bool check_zero_init(uint8_t* p, size_t size) { #if MI_DEBUG >= 2 bool check_debug_fill_uninit(uint8_t* p, size_t size) { +#if MI_VALGRIND + (void)p; (void)size; + return true; // when compiled with valgrind we don't init on purpose +#else if(!p) return false; @@ -317,9 +321,14 @@ bool check_debug_fill_uninit(uint8_t* p, size_t size) { result &= p[i] == MI_DEBUG_UNINIT; } return result; +#endif } bool check_debug_fill_freed(uint8_t* p, size_t size) { +#if MI_VALGRIND + (void)p; (void)size; + return true; // when compiled with valgrind we don't fill on purpose +#else if(!p) return false; @@ -328,5 +337,6 @@ bool check_debug_fill_freed(uint8_t* p, size_t size) { result &= p[i] == MI_DEBUG_FREED; } return result; +#endif } #endif From 74d002b61ceaa9d26d5fb731ba5b4bbb764b98a7 Mon Sep 17 00:00:00 2001 From: daan Date: Sun, 30 Oct 2022 14:20:35 -0700 Subject: [PATCH 12/12] better cmake process for MI_VALGRIND --- CMakeLists.txt | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 93fbb1cd..d826e646 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -108,6 +108,9 @@ endif() if(MI_SECURE) message(STATUS "Set full secure build (MI_SECURE=ON)") list(APPEND mi_defines MI_SECURE=4) + #if (MI_VALGRIND) + # message(WARNING "Secure mode is a bit weakened when compiling with Valgrind support as buffer overflow detection is no longer byte-precise (if running without valgrind)") + #endif() endif() if(MI_VALGRIND) @@ -265,20 +268,18 @@ else() set(mi_install_cmakedir "${CMAKE_INSTALL_LIBDIR}/cmake/mimalloc-${mi_version}") # for cmake package info endif() +set(mi_basename "mimalloc") if(MI_SECURE) - set(mi_basename "mimalloc-secure") -else() - if(MI_VALGRIND) - set(mi_basename "mimalloc-valgrind") - else() - set(mi_basename "mimalloc") - endif() + set(mi_basename "${mi_basename}-secure") +endif() +if(MI_VALGRIND) + set(mi_basename "${mi_basename}-valgrind") endif() - string(TOLOWER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_LC) if(NOT(CMAKE_BUILD_TYPE_LC MATCHES "^(release|relwithdebinfo|minsizerel|none)$")) set(mi_basename "${mi_basename}-${CMAKE_BUILD_TYPE_LC}") #append build type (e.g. -debug) if not a release version endif() + if(MI_BUILD_SHARED) list(APPEND mi_build_targets "shared") endif()