updates to run valgrind on test-api

This commit is contained in:
daan 2022-10-29 11:43:09 -07:00
parent bc8f23aa0d
commit eee7c40da5
8 changed files with 39 additions and 12 deletions

View File

@ -931,7 +931,7 @@ static inline size_t mi_bsr(uintptr_t x) {
// (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017). See also issue #201 and pr #253. // (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017). See also issue #201 and pr #253.
// --------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------
#if defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64)) #if !MI_TRACK_ENABLED && defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64))
#include <intrin.h> #include <intrin.h>
#include <string.h> #include <string.h>
extern bool _mi_cpu_has_fsrm; extern bool _mi_cpu_has_fsrm;

View File

@ -17,6 +17,9 @@ terms of the MIT license. A copy of the license can be found in the file
#define MI_VALGRIND 1 #define MI_VALGRIND 1
#if MI_VALGRIND #if MI_VALGRIND
#define MI_TRACK_ENABLED 1
#include <valgrind/valgrind.h> #include <valgrind/valgrind.h>
#include <valgrind/memcheck.h> #include <valgrind/memcheck.h>
@ -29,6 +32,8 @@ terms of the MIT license. A copy of the license can be found in the file
#else #else
#define MI_TRACK_ENABLED 0
#define mi_track_malloc(p,size,zero) #define mi_track_malloc(p,size,zero)
#define mi_track_resize(p,oldsize,newsize) #define mi_track_resize(p,oldsize,newsize)
#define mi_track_free(p) #define mi_track_free(p)

View File

@ -41,6 +41,9 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
if (aligned_p != p) mi_page_set_has_aligned(_mi_ptr_page(p), true); if (aligned_p != p) mi_page_set_has_aligned(_mi_ptr_page(p), true);
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0); mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p)); mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p));
mi_track_free(p);
mi_track_malloc(aligned_p,size,zero);
return aligned_p; return aligned_p;
} }
@ -82,6 +85,7 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
void* p = _mi_page_malloc(heap, page, padsize, zero); // TODO: inline _mi_page_malloc void* p = _mi_page_malloc(heap, page, padsize, zero); // TODO: inline _mi_page_malloc
mi_assert_internal(p != NULL); mi_assert_internal(p != NULL);
mi_assert_internal(((uintptr_t)p + offset) % alignment == 0); mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
mi_track_malloc(p,size,zero);
return p; return p;
} }
} }

View File

@ -39,6 +39,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page); mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
// allow use of the block internally // allow use of the block internally
// todo: can we optimize this call away for non-zero'd release mode?
mi_track_mem_undefined(block,mi_page_block_size(page)); mi_track_mem_undefined(block,mi_page_block_size(page));
// zero the block? note: we need to zero the full block size (issue #63) // zero the block? note: we need to zero the full block size (issue #63)
@ -77,6 +78,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; } for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; }
#endif #endif
// mark as no-access again
mi_track_mem_noaccess(block,mi_page_block_size(page)); mi_track_mem_noaccess(block,mi_page_block_size(page));
return block; return block;
} }
@ -341,7 +343,7 @@ static void mi_stat_huge_free(const mi_page_t* page) {
// Free // Free
// ------------------------------------------------------ // ------------------------------------------------------
// multi-threaded free // multi-threaded free (or free in huge block)
static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block) static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block)
{ {
// The padding check may access the non-thread-owned page for the key values. // The padding check may access the non-thread-owned page for the key values.
@ -349,6 +351,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
mi_check_padding(page, block); mi_check_padding(page, block);
mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
#if (MI_DEBUG!=0) #if (MI_DEBUG!=0)
mi_track_mem_undefined(block, mi_page_block_size(page)); // note: check padding may set parts to noaccess
memset(block, MI_DEBUG_FREED, mi_usable_size(block)); memset(block, MI_DEBUG_FREED, mi_usable_size(block));
#endif #endif
@ -408,6 +411,7 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block
if mi_unlikely(mi_check_is_double_free(page, block)) return; if mi_unlikely(mi_check_is_double_free(page, block)) return;
mi_check_padding(page, block); mi_check_padding(page, block);
#if (MI_DEBUG!=0) #if (MI_DEBUG!=0)
mi_track_mem_undefined(block, mi_page_block_size(page)); // note: check padding may set parts to noaccess
memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
#endif #endif
mi_block_set_next(page, block, page->local_free); mi_block_set_next(page, block, page->local_free);
@ -486,6 +490,10 @@ void mi_free(void* p) mi_attr_noexcept
mi_threadid_t tid = _mi_thread_id(); mi_threadid_t tid = _mi_thread_id();
mi_page_t* const page = _mi_segment_page_of(segment, p); mi_page_t* const page = _mi_segment_page_of(segment, p);
mi_block_t* const block = (mi_block_t*)p; mi_block_t* const block = (mi_block_t*)p;
#if MI_TRACK_ENABLED
const size_t track_bsize = mi_page_block_size(page);
#endif
if mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0) { // the thread id matches and it is not a full page, nor has aligned blocks if mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0) { // the thread id matches and it is not a full page, nor has aligned blocks
// local, and not full or aligned // local, and not full or aligned
@ -493,7 +501,7 @@ void mi_free(void* p) mi_attr_noexcept
mi_check_padding(page, block); mi_check_padding(page, block);
mi_stat_free(page, block); mi_stat_free(page, block);
#if (MI_DEBUG!=0) #if (MI_DEBUG!=0)
mi_track_mem_undefined(block,mi_page_block_size(page)); mi_track_mem_undefined(block,track_bsize); // note: check padding may set parts to noaccess
memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
#endif #endif
mi_block_set_next(page, block, page->local_free); mi_block_set_next(page, block, page->local_free);
@ -505,10 +513,10 @@ void mi_free(void* p) mi_attr_noexcept
else { else {
// non-local, aligned blocks, or a full page; use the more generic path // non-local, aligned blocks, or a full page; use the more generic path
// note: recalc page in generic to improve code generation // note: recalc page in generic to improve code generation
mi_track_mem_undefined(block,mi_page_block_size(page)); mi_track_mem_undefined(block,track_bsize);
mi_free_generic(segment, tid == segment->thread_id, p); mi_free_generic(segment, tid == segment->thread_id, p);
} }
mi_track_mem_noaccess(block,mi_page_block_size(page)); mi_track_mem_noaccess(block,track_bsize); // cannot use mi_page_block_size as the segment might be deallocated by now
} }
bool _mi_free_delayed_block(mi_block_t* block) { bool _mi_free_delayed_block(mi_block_t* block) {
@ -641,10 +649,12 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero)
// else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)). // else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)).
// (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.) // (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.)
const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL (with size 0) const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL (with size 0)
#if !MI_TRACK_ENABLED
if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0) if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
// todo: adjust potential padding to reflect the new size? // todo: adjust potential padding to reflect the new size?
return p; // reallocation still fits and not more than 50% waste return p; // reallocation still fits and not more than 50% waste
} }
#endif
void* newp = mi_heap_malloc(heap,newsize); void* newp = mi_heap_malloc(heap,newsize);
if mi_likely(newp != NULL) { if mi_likely(newp != NULL) {
if (zero && newsize > size) { if (zero && newsize > size) {

View File

@ -986,7 +986,7 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats)
else _mi_stat_decrease(&stats->reset, csize); else _mi_stat_decrease(&stats->reset, csize);
if (!reset) return true; // nothing to do on unreset! if (!reset) return true; // nothing to do on unreset!
#if (MI_DEBUG>1) #if (MI_DEBUG>1) && !MI_TRACK_ENABLED
if (MI_SECURE==0) { if (MI_SECURE==0) {
memset(start, 0, csize); // pretend it is eagerly reset memset(start, 0, csize); // pretend it is eagerly reset
} }

View File

@ -476,6 +476,7 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se
fully_committed = false; fully_committed = false;
} }
_mi_mem_free(segment, segment_size, segment->memid, fully_committed, any_reset, tld->os); _mi_mem_free(segment, segment_size, segment->memid, fully_committed, any_reset, tld->os);
//mi_track_mem_noaccess(segment,segment_size);
} }
// called by threads that are terminating to free cached segments // called by threads that are terminating to free cached segments
@ -588,9 +589,11 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
segment->mem_is_pinned = (mem_large || is_pinned); segment->mem_is_pinned = (mem_large || is_pinned);
segment->mem_is_committed = commit; segment->mem_is_committed = commit;
mi_segments_track_size((long)segment_size, tld); mi_segments_track_size((long)segment_size, tld);
} }
mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0); mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);
mi_assert_internal(segment->mem_is_pinned ? segment->mem_is_committed : true); mi_assert_internal(segment->mem_is_pinned ? segment->mem_is_committed : true);
//mi_track_mem_defined(segment,info_size);
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); // tsan mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); // tsan
if (!pages_still_good) { if (!pages_still_good) {
// zero the segment info (but not the `mem` fields) // zero the segment info (but not the `mem` fields)
@ -1208,7 +1211,7 @@ static mi_page_t* mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, mi_p
mi_assert_internal(free_queue->first != NULL); mi_assert_internal(free_queue->first != NULL);
mi_page_t* const page = mi_segment_page_alloc_in(free_queue->first, tld); mi_page_t* const page = mi_segment_page_alloc_in(free_queue->first, tld);
mi_assert_internal(page != NULL); mi_assert_internal(page != NULL);
#if MI_DEBUG>=2 #if MI_DEBUG>=2 && !MI_TRACK_ENABLED
// verify it is committed // verify it is committed
_mi_segment_page_start(_mi_page_segment(page), page, sizeof(void*), NULL, NULL)[0] = 0; _mi_segment_page_start(_mi_page_segment(page), page, sizeof(void*), NULL, NULL)[0] = 0;
#endif #endif
@ -1232,7 +1235,7 @@ static mi_page_t* mi_segment_large_page_alloc(mi_heap_t* heap, size_t block_size
if (segment == NULL) return NULL; if (segment == NULL) return NULL;
mi_page_t* page = mi_segment_find_free(segment, tld); mi_page_t* page = mi_segment_find_free(segment, tld);
mi_assert_internal(page != NULL); mi_assert_internal(page != NULL);
#if MI_DEBUG>=2 #if MI_DEBUG>=2 && !MI_TRACK_ENABLED
_mi_segment_page_start(segment, page, sizeof(void*), NULL, NULL)[0] = 0; _mi_segment_page_start(segment, page, sizeof(void*), NULL, NULL)[0] = 0;
#endif #endif
return page; return page;

View File

@ -72,7 +72,9 @@ int main(void) {
result = (mi_calloc((size_t)&mi_calloc,SIZE_MAX/1000) == NULL); result = (mi_calloc((size_t)&mi_calloc,SIZE_MAX/1000) == NULL);
}; };
CHECK_BODY("calloc0") { CHECK_BODY("calloc0") {
result = (mi_usable_size(mi_calloc(0,1000)) <= 16); void* p = mi_calloc(0,1000);
result = (mi_usable_size(p) <= 16);
mi_free(p);
}; };
CHECK_BODY("malloc-large") { // see PR #544. CHECK_BODY("malloc-large") { // see PR #544.
void* p = mi_malloc(67108872); void* p = mi_malloc(67108872);

View File

@ -11,7 +11,10 @@
int main(int argc, char** argv) { int main(int argc, char** argv) {
int* p = mi(malloc)(3*sizeof(int)); int* p = mi(malloc)(3*sizeof(int));
int* q = mi(malloc)(sizeof(int)); int* q = mi(malloc)(sizeof(int));
int* r = mi_malloc_aligned(8,16);
mi_free(r);
// undefined access // undefined access
// printf("undefined: %d\n", *q); // printf("undefined: %d\n", *q);
@ -23,7 +26,7 @@ int main(int argc, char** argv) {
mi(free)(q); mi(free)(q);
// double free // double free
mi(free)(q); // mi(free)(q);
// use after free // use after free
printf("use-after-free: %d\n", *q); printf("use-after-free: %d\n", *q);