match track free size to tracked malloc size

This commit is contained in:
Daan 2023-03-05 11:01:51 -08:00
parent 20ae35a1d4
commit 056c2ce45b
4 changed files with 57 additions and 45 deletions

View File

@ -6,7 +6,7 @@ set(CMAKE_CXX_STANDARD 17)
option(MI_SECURE "Use full security mitigations (like guard pages, allocation randomization, double-free mitigation, and free-list corruption detection)" OFF) option(MI_SECURE "Use full security mitigations (like guard pages, allocation randomization, double-free mitigation, and free-list corruption detection)" OFF)
option(MI_DEBUG_FULL "Use full internal heap invariant checking in DEBUG mode (expensive)" OFF) option(MI_DEBUG_FULL "Use full internal heap invariant checking in DEBUG mode (expensive)" OFF)
option(MI_PADDING "Enable padding to detect heap block overflow (always on in DEBUG mode or with Valgrind)" OFF) option(MI_PADDING "Enable padding to detect heap block overflow (always on in DEBUG or SECURE mode, or with Valgrind/ASAN)" OFF)
option(MI_OVERRIDE "Override the standard malloc interface (e.g. define entry points for malloc() etc)" ON) option(MI_OVERRIDE "Override the standard malloc interface (e.g. define entry points for malloc() etc)" ON)
option(MI_XMALLOC "Enable abort() call on memory allocation failure by default" OFF) option(MI_XMALLOC "Enable abort() call on memory allocation failure by default" OFF)
option(MI_SHOW_ERRORS "Show error and warning messages by default (only enabled by default in DEBUG mode)" OFF) option(MI_SHOW_ERRORS "Show error and warning messages by default (only enabled by default in DEBUG mode)" OFF)
@ -25,6 +25,7 @@ option(MI_BUILD_TESTS "Build test executables" ON)
option(MI_DEBUG_TSAN "Build with thread sanitizer (needs clang)" OFF) option(MI_DEBUG_TSAN "Build with thread sanitizer (needs clang)" OFF)
option(MI_DEBUG_UBSAN "Build with undefined-behavior sanitizer (needs clang++)" OFF) option(MI_DEBUG_UBSAN "Build with undefined-behavior sanitizer (needs clang++)" OFF)
option(MI_SKIP_COLLECT_ON_EXIT, "Skip collecting memory on program exit" OFF) option(MI_SKIP_COLLECT_ON_EXIT, "Skip collecting memory on program exit" OFF)
option(MI_NO_PADDING "Force no use of padding even in DEBUG mode ets." OFF)
# deprecated options # deprecated options
option(MI_CHECK_FULL "Use full internal invariant checking in DEBUG mode (deprecated, use MI_DEBUG_FULL instead)" OFF) option(MI_CHECK_FULL "Use full internal invariant checking in DEBUG mode (deprecated, use MI_DEBUG_FULL instead)" OFF)
@ -186,9 +187,14 @@ if(MI_DEBUG_FULL)
list(APPEND mi_defines MI_DEBUG=3) # full invariant checking list(APPEND mi_defines MI_DEBUG=3) # full invariant checking
endif() endif()
if(MI_PADDING) if(MI_NO_PADDING)
message(STATUS "Enable padding of heap blocks explicitly (MI_PADDING=ON)") message(STATUS "Suppress any padding of heap blocks (MI_NO_PADDING=ON)")
list(APPEND mi_defines MI_PADDING=1) list(APPEND mi_defines MI_PADDING=0)
else()
if(MI_PADDING)
message(STATUS "Enable explicit padding of heap blocks (MI_PADDING=ON)")
list(APPEND mi_defines MI_PADDING=1)
endif()
endif() endif()
if(MI_XMALLOC) if(MI_XMALLOC)

View File

@ -13,10 +13,13 @@ Track memory ranges with macros for tools like Valgrind
address sanitizer, or other memory checkers. address sanitizer, or other memory checkers.
The macros are set up such that the size passed to `mi_track_free_size` The macros are set up such that the size passed to `mi_track_free_size`
matches the size of the allocation, or the newsize of a `mi_track_resize` (currently unused though). matches the size of the allocation, or the new size of a `mi_track_resize` (currently unused though).
The `size` is either byte precise (and what the user requested) if `MI_PADDING` is enabled, The `size` is either byte precise (and what the user requested) if `MI_PADDING` is enabled,
or otherwise it is the full block size which may be larger than the original request. or otherwise it is the full block size which may be larger than the original request.
Aligned pointers in a block are signaled right after a `mi_track_malloc`
with the `mi_track_align` macro. The corresponding `mi_track_free` still
uses the block start pointer and original size (corresponding to the `mi_track_malloc`).
-------------------------------------------------------------------------------------------------------*/ -------------------------------------------------------------------------------------------------------*/
#if MI_VALGRIND #if MI_VALGRIND
@ -27,12 +30,12 @@ or otherwise it is the full block size which may be larger than the original req
#include <valgrind/valgrind.h> #include <valgrind/valgrind.h>
#include <valgrind/memcheck.h> #include <valgrind/memcheck.h>
#define mi_track_malloc(p,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,MI_PADDING_SIZE /*red zone*/,zero) #define mi_track_malloc_size(p,reqsize,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,MI_PADDING_SIZE /*red zone*/,zero)
#define mi_track_resize(p,oldsize,newsize) VALGRIND_RESIZEINPLACE_BLOCK(p,oldsize,newsize,MI_PADDING_SIZE /*red zone*/) #define mi_track_free_size(p,_size) VALGRIND_FREELIKE_BLOCK(p,MI_PADDING_SIZE /*red zone*/)
#define mi_track_free_size(p,_size) VALGRIND_FREELIKE_BLOCK(p,MI_PADDING_SIZE /*red zone*/) #define mi_track_resize(p,oldsize,newsize) VALGRIND_RESIZEINPLACE_BLOCK(p,oldsize,newsize,MI_PADDING_SIZE /*red zone*/)
#define mi_track_mem_defined(p,size) VALGRIND_MAKE_MEM_DEFINED(p,size) #define mi_track_mem_defined(p,size) VALGRIND_MAKE_MEM_DEFINED(p,size)
#define mi_track_mem_undefined(p,size) VALGRIND_MAKE_MEM_UNDEFINED(p,size) #define mi_track_mem_undefined(p,size) VALGRIND_MAKE_MEM_UNDEFINED(p,size)
#define mi_track_mem_noaccess(p,size) VALGRIND_MAKE_MEM_NOACCESS(p,size) #define mi_track_mem_noaccess(p,size) VALGRIND_MAKE_MEM_NOACCESS(p,size)
#elif MI_ASAN #elif MI_ASAN
@ -41,34 +44,47 @@ or otherwise it is the full block size which may be larger than the original req
#include <sanitizer/asan_interface.h> #include <sanitizer/asan_interface.h>
#define mi_track_malloc(p,size,zero) ASAN_UNPOISON_MEMORY_REGION(p,size) #define mi_track_malloc_size(p,reqsize,size,zero) ASAN_UNPOISON_MEMORY_REGION(p,size)
#define mi_track_resize(p,oldsize,newsize) ASAN_POISON_MEMORY_REGION(p,oldsize); ASAN_UNPOISON_MEMORY_REGION(p,newsize) #define mi_track_free_size(p,size) ASAN_POISON_MEMORY_REGION(p,size)
#define mi_track_free_size(p,size) ASAN_POISON_MEMORY_REGION(p,size) #define mi_track_mem_defined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size)
#define mi_track_mem_defined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size) #define mi_track_mem_undefined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size)
#define mi_track_mem_undefined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size) #define mi_track_mem_noaccess(p,size) ASAN_POISON_MEMORY_REGION(p,size)
#define mi_track_mem_noaccess(p,size) ASAN_POISON_MEMORY_REGION(p,size)
#else #else
#define MI_TRACK_ENABLED 0 #define MI_TRACK_ENABLED 0
#define MI_TRACK_TOOL "none" #define MI_TRACK_TOOL "none"
#define mi_track_malloc(p,size,zero) #define mi_track_malloc_size(p,reqsize,size,zero)
#define mi_track_resize(p,oldsize,newsize)
#define mi_track_free_size(p,_size) #define mi_track_free_size(p,_size)
#define mi_track_align(p,alignedp,offset,size)
#define mi_track_resize(p,oldsize,newsize)
#define mi_track_mem_defined(p,size) #define mi_track_mem_defined(p,size)
#define mi_track_mem_undefined(p,size) #define mi_track_mem_undefined(p,size)
#define mi_track_mem_noaccess(p,size) #define mi_track_mem_noaccess(p,size)
#endif #endif
#ifndef mi_track_free
#define mi_track_free(p) mi_track_free_size(p,mi_usable_size(p));
#endif
#ifndef mi_track_resize #ifndef mi_track_resize
#define mi_track_resize(p,oldsize,newsize) mi_track_free_size(p,oldsize); mi_track_malloc(p,newsize,false) #define mi_track_resize(p,oldsize,newsize) mi_track_free_size(p,oldsize); mi_track_malloc(p,newsize,false)
#endif #endif
#ifndef mi_track_align
#define mi_track_align(p,alignedp,offset,size) mi_track_mem_noaccess(p,offset)
#endif
#if MI_PADDING
#define mi_track_malloc(p,reqsize,zero) \
if ((p)!=NULL) { \
mi_assert_internal(mi_usable_size(p)==(reqsize)); \
mi_track_malloc_size(p,reqsize,reqsize,zero); \
}
#else
#define mi_track_malloc(p,reqsize,zero) \
if ((p)!=NULL) { \
mi_assert_internal(mi_usable_size(p)>=(reqsize)); \
mi_track_malloc_size(p,reqsize,mi_usable_size(p),zero); \
}
#endif
#endif #endif

View File

@ -65,12 +65,14 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
mi_page_set_has_aligned(page, true); mi_page_set_has_aligned(page, true);
_mi_padding_shrink(page, (mi_block_t*)p, adjust + size); _mi_padding_shrink(page, (mi_block_t*)p, adjust + size);
} }
// todo: expand padding if overallocated and p==aligned_p ? // todo: expand padding if overallocated ?
mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size); mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size);
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p)); mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p));
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0); mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
mi_assert_internal(mi_usable_size(aligned_p)>=size);
mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust);
// now zero the block if needed // now zero the block if needed
if (zero && alignment > MI_ALIGNMENT_MAX) { if (zero && alignment > MI_ALIGNMENT_MAX) {
const ptrdiff_t diff = (uint8_t*)aligned_p - (uint8_t*)p; const ptrdiff_t diff = (uint8_t*)aligned_p - (uint8_t*)p;
@ -83,8 +85,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
#if MI_TRACK_ENABLED #if MI_TRACK_ENABLED
if (p != aligned_p) { if (p != aligned_p) {
mi_track_free(p); mi_track_align(p,aligned_p,adjust,mi_usable_size(aligned_p));
mi_track_malloc(aligned_p, size, zero);
} }
#endif #endif
return aligned_p; return aligned_p;

View File

@ -102,19 +102,14 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
if (size == 0) { size = sizeof(void*); } if (size == 0) { size = sizeof(void*); }
#endif #endif
mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE); mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
void* p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero); void* const p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
#if MI_PADDING mi_track_malloc(p,size,zero);
mi_assert_internal(p == NULL || mi_usable_size(p) == size);
#else
mi_assert_internal(p == NULL || mi_usable_size(p) >= size);
#endif
#if MI_STAT>1 #if MI_STAT>1
if (p != NULL) { if (p != NULL) {
if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); } if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); }
mi_heap_stat_increase(heap, malloc, mi_usable_size(p)); mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
} }
#endif #endif
if (p!=NULL) { mi_track_malloc(p,size,zero); }
return p; return p;
} }
@ -137,18 +132,13 @@ extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool z
mi_assert(heap!=NULL); mi_assert(heap!=NULL);
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment); // note: size can overflow but it is detected in malloc_generic void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment); // note: size can overflow but it is detected in malloc_generic
#if MI_PADDING mi_track_malloc(p,size,zero);
mi_assert_internal(p == NULL || mi_usable_size(p) == size);
#else
mi_assert_internal(p == NULL || mi_usable_size(p) >= size);
#endif
#if MI_STAT>1 #if MI_STAT>1
if (p != NULL) { if (p != NULL) {
if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); } if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); }
mi_heap_stat_increase(heap, malloc, mi_usable_size(p)); mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
} }
#endif #endif
if (p!=NULL) { mi_track_malloc(p,size,zero); }
return p; return p;
} }
} }
@ -499,7 +489,7 @@ mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* p
void mi_decl_noinline _mi_free_generic(const mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept { void mi_decl_noinline _mi_free_generic(const mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept {
mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p); mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p);
mi_stat_free(page, block); // stat_free may access the padding mi_stat_free(page, block); // stat_free may access the padding
mi_track_free(p); mi_track_free_size(block, mi_page_usable_size_of(page,block));
_mi_free_block(page, is_local, block); _mi_free_block(page, is_local, block);
} }
@ -559,7 +549,7 @@ void mi_free(void* p) mi_attr_noexcept
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED
memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
#endif #endif
mi_track_free_size(p, mi_page_usable_size_of(page,block)); // faster then mi_usable_size as we already now the page and that p is unaligned mi_track_free_size(p, mi_page_usable_size_of(page,block)); // faster then mi_usable_size as we already know the page and that p is unaligned
mi_block_set_next(page, block, page->local_free); mi_block_set_next(page, block, page->local_free);
page->local_free = block; page->local_free = block;
if mi_unlikely(--page->used == 0) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page)) if mi_unlikely(--page->used == 0) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
@ -695,8 +685,7 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero)
if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0) if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
mi_assert_internal(p!=NULL); mi_assert_internal(p!=NULL);
// todo: do not track as the usable size is still the same in the free; adjust potential padding? // todo: do not track as the usable size is still the same in the free; adjust potential padding?
// mi_track_free(p); // mi_track_resize(p,size,newsize)
// mi_track_malloc(p,newsize,true);
return p; // reallocation still fits and not more than 50% waste return p; // reallocation still fits and not more than 50% waste
} }
void* newp = mi_heap_malloc(heap,newsize); void* newp = mi_heap_malloc(heap,newsize);