diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 01133630..89710dce 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -103,6 +103,16 @@ jobs: CXX: clang++ BuildType: debug-asan-clang cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -DMI_TRACK_ASAN=ON + Debug UBSAN Clang: + CC: clang + CXX: clang++ + BuildType: debug-ubsan-clang + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -DMI_DEBUG_UBSAN=ON + Debug TSAN Clang++: + CC: clang + CXX: clang++ + BuildType: debug-tsan-clang-cxx + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_USE_CXX=ON -DMI_DEBUG_TSAN=ON steps: - task: CMake@1 diff --git a/src/alloc.c b/src/alloc.c index 301166eb..75d8999c 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -50,7 +50,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz _mi_memzero_aligned(block, zsize - MI_PADDING_SIZE); } -#if (MI_DEBUG>0) && !MI_TRACK_ENABLED +#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN if (!page->is_zero && !zero && !mi_page_is_huge(page)) { memset(block, MI_DEBUG_UNINIT, mi_page_usable_block_size(page)); } @@ -406,7 +406,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc } - #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED // note: when tracking, cannot use mi_usable_size with multi-threading + #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading memset(block, MI_DEBUG_FREED, mi_usable_size(block)); #endif @@ -458,7 +458,7 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block // owning thread can free a block directly if mi_unlikely(mi_check_is_double_free(page, block)) return; mi_check_padding(page, block); - #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED + #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED && !MI_TSAN memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); #endif mi_block_set_next(page, block, page->local_free); @@ -546,7 +546,7 @@ void mi_free(void* p) mi_attr_noexcept if mi_unlikely(mi_check_is_double_free(page, block)) return; mi_check_padding(page, block); mi_stat_free(page, block); - #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED + #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED && !MI_TSAN memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); #endif mi_track_free_size(p, mi_page_usable_size_of(page,block)); // faster then mi_usable_size as we already know the page and that p is unaligned diff --git a/src/os.c b/src/os.c index 242a4eef..f6cc1e68 100644 --- a/src/os.c +++ b/src/os.c @@ -411,7 +411,7 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats) else _mi_stat_decrease(&stats->reset, csize); if (!reset) return true; // nothing to do on unreset! - #if (MI_DEBUG>1) && !MI_TRACK_ENABLED + #if (MI_DEBUG>1) && !MI_TRACK_ENABLED // && !MI_TSAN if (MI_SECURE==0) { memset(start, 0, csize); // pretend it is eagerly reset } diff --git a/src/page.c b/src/page.c index 531293f3..aa9f1bde 100644 --- a/src/page.c +++ b/src/page.c @@ -92,10 +92,12 @@ static bool mi_page_is_valid_init(mi_page_t* page) { } #endif + #if !MI_TRACK_ENABLED && !MI_TSAN mi_block_t* tfree = mi_page_thread_free(page); mi_assert_internal(mi_page_list_is_valid(page, tfree)); //size_t tfree_count = mi_page_list_count(page, tfree); //mi_assert_internal(tfree_count <= page->thread_freed + 1); + #endif size_t free_count = mi_page_list_count(page, page->free) + mi_page_list_count(page, page->local_free); mi_assert_internal(page->used + free_count == page->capacity); diff --git a/src/region.c b/src/region.c index 77b07eb5..6c8ffb79 100644 --- a/src/region.c +++ b/src/region.c @@ -315,7 +315,7 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* large, bool* } mi_assert_internal(!_mi_bitmap_is_any_claimed(®ion->reset, 1, blocks, bit_idx)); - #if (MI_DEBUG>=2) && !MI_TRACK_ENABLED + #if (MI_DEBUG>=2) && !MI_TRACK_ENABLED // && !MI_TSAN if (*commit) { ((uint8_t*)p)[0] = 0; } #endif @@ -361,7 +361,7 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, size_t align_offset, if (p != NULL) { mi_assert_internal(((uintptr_t)p + align_offset) % alignment == 0); - #if (MI_DEBUG>=2) && !MI_TRACK_ENABLED + #if (MI_DEBUG>=2) && !MI_TRACK_ENABLED // && !MI_TSAN if (*commit) { ((uint8_t*)p)[0] = 0; } // ensure the memory is committed #endif } diff --git a/src/segment.c b/src/segment.c index 6cdf4fe7..8a9c8fe1 100644 --- a/src/segment.c +++ b/src/segment.c @@ -1182,7 +1182,7 @@ static mi_page_t* mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, mi_p mi_assert_internal(free_queue->first != NULL); mi_page_t* const page = mi_segment_page_alloc_in(free_queue->first, tld); mi_assert_internal(page != NULL); -#if MI_DEBUG>=2 && !MI_TRACK_ENABLED +#if MI_DEBUG>=2 && !MI_TRACK_ENABLED // && !MI_TSAN // verify it is committed _mi_segment_page_start(_mi_page_segment(page), page, sizeof(void*), NULL, NULL)[0] = 0; #endif @@ -1206,7 +1206,7 @@ static mi_page_t* mi_segment_large_page_alloc(mi_heap_t* heap, size_t block_size if (segment == NULL) return NULL; mi_page_t* page = mi_segment_find_free(segment, tld); mi_assert_internal(page != NULL); -#if MI_DEBUG>=2 && !MI_TRACK_ENABLED +#if MI_DEBUG>=2 && !MI_TRACK_ENABLED // && !MI_TSAN _mi_segment_page_start(segment, page, sizeof(void*), NULL, NULL)[0] = 0; #endif return page;