wip: initial work on tracking source of an allocation in debug mode

This commit is contained in:
daan 2020-02-11 09:37:26 -08:00
parent 0a77b7423f
commit 4090561975
10 changed files with 381 additions and 164 deletions

View File

@ -22,14 +22,18 @@ terms of the MIT license. A copy of the license can be found in the file
#define mi_decl_noinline __declspec(noinline)
#define mi_decl_thread __declspec(thread)
#define mi_decl_cache_align __declspec(align(MI_CACHE_LINE))
#include <intrin.h>
#define mi_return_address() _ReturnAddress()
#elif (defined(__GNUC__) && (__GNUC__>=3)) // includes clang and icc
#define mi_decl_noinline __attribute__((noinline))
#define mi_decl_thread __thread
#define mi_decl_cache_align __attribute__((aligned(MI_CACHE_LINE)))
#define mi_return_address() __builtin_return_address(0)
#else
#define mi_decl_noinline
#define mi_decl_thread __thread // hope for the best :-)
#define mi_decl_cache_align
#define mi_return_address()
#endif
@ -41,6 +45,7 @@ void _mi_verbose_message(const char* fmt, ...);
void _mi_trace_message(const char* fmt, ...);
void _mi_options_init(void);
void _mi_error_message(int err, const char* fmt, ...);
void _mi_page_block_error_message(int err, const mi_page_t* page, const mi_block_t* block, const char* fmt);
// random.c
void _mi_random_init(mi_random_ctx_t* ctx);
@ -89,7 +94,7 @@ void _mi_abandoned_await_readers(void);
// "page.c"
void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc;
void* _mi_malloc_generic(mi_heap_t* heap, size_t size MI_SOURCE_PARAM) mi_attr_noexcept mi_attr_malloc;
void _mi_page_retire(mi_page_t* page); // free the page if there are no other pages with many free blocks
void _mi_page_unfull(mi_page_t* page);
@ -122,13 +127,18 @@ mi_msecs_t _mi_clock_end(mi_msecs_t start);
mi_msecs_t _mi_clock_start(void);
// "alloc.c"
void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_malloc_generic`
void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero);
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero);
void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size MI_SOURCE_PARAM) mi_attr_noexcept; // called from `_mi_malloc_generic`
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p);
bool _mi_free_delayed_block(mi_block_t* block);
void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size);
mi_decl_allocator void* _mi_heapx_malloc(mi_heap_t* heap, size_t size MI_SOURCE_PARAM) mi_attr_noexcept;
mi_decl_allocator void* _mi_heapx_malloc_zero(mi_heap_t* heap, size_t size, bool zero MI_SOURCE_PARAM);
mi_decl_allocator void* _mi_heapx_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero MI_SOURCE_PARAM);
mi_decl_allocator void* _mi_heapx_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment MI_SOURCE_PARAM) mi_attr_noexcept;
mi_decl_allocator void* _mi_heapx_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size MI_SOURCE_PARAM) mi_attr_noexcept;
mi_decl_allocator char* _mi_heapx_strdup(mi_heap_t* heap, const char* s MI_SOURCE_PARAM) mi_attr_noexcept;
#if MI_DEBUG>1
bool _mi_page_is_valid(mi_page_t* page);
#endif
@ -606,7 +616,8 @@ static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t*
// check for free list corruption: is `next` at least in the same page?
// TODO: check if `next` is `page->block_size` aligned?
if (mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next))) {
_mi_error_message(EFAULT, "corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next);
//_mi_error_message(EFAULT, "corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next);
_mi_page_block_error_message(EFAULT, page, block, "corrupted free list entry" );
next = NULL;
}
return next;

View File

@ -269,6 +269,33 @@ typedef struct mi_segment_s {
} mi_segment_t;
// ------------------------------------------------------
// In debug mode there is a padding stucture at the end
// of the blocks to check for buffer overflows.
// ------------------------------------------------------
#if defined(MI_PADDING)
// compressed location:
// lsb=1: bit 63-32: relative file name char* (to `mi_fname_base`), bit 31-1: line number
// lsb=0: bit 63-01: return address
typedef int64_t mi_source_t;
typedef struct mi_padding_s {
uint32_t canary; // encoded block value to check validity of the padding (in case of heap block overflow)
uint32_t delta; // padding bytes before the block. (mi_usable_size(p) - delta == exact allocated bytes)
mi_source_t source; // source location
} mi_padding_t;
#define MI_PADDING_SIZE (sizeof(mi_padding_t))
#define MI_PADDING_WSIZE ((MI_PADDING_SIZE + MI_INTPTR_SIZE - 1) / MI_INTPTR_SIZE)
#define MI_SOURCE_PARAM , mi_source_t source
#define MI_SOURCE_ARG , source
#define MI_SOURCE_RET , ((intptr_t)mi_return_address() << (intptr_t)1)
#else
#define MI_PADDING_SIZE 0
#define MI_PADDING_WSIZE 0
#define MI_SOURCE_PARAM
#define MI_SOURCE_ARG
#define MI_SOURCE_RET
#endif
// ------------------------------------------------------
// Heaps
// Provide first-class heaps to allocate from.
@ -301,20 +328,6 @@ typedef struct mi_random_cxt_s {
int output_available;
} mi_random_ctx_t;
// In debug mode there is a padding stucture at the end of the blocks to check for buffer overflows
#if defined(MI_PADDING)
typedef struct mi_padding_s {
uint32_t canary; // encoded block value to check validity of the padding (in case of overflow)
uint32_t delta; // padding bytes before the block. (mi_usable_size(p) - delta == exact allocated bytes)
} mi_padding_t;
#define MI_PADDING_SIZE (sizeof(mi_padding_t))
#define MI_PADDING_WSIZE ((MI_PADDING_SIZE + MI_INTPTR_SIZE - 1) / MI_INTPTR_SIZE)
#else
#define MI_PADDING_SIZE 0
#define MI_PADDING_WSIZE 0
#endif
#define MI_PAGES_DIRECT (MI_SMALL_WSIZE_MAX + MI_PADDING_WSIZE + 1)

View File

@ -14,14 +14,14 @@ terms of the MIT license. A copy of the license can be found in the file
// Aligned Allocation
// ------------------------------------------------------
static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept {
static void* mi_heapx_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero MI_SOURCE_PARAM) mi_attr_noexcept {
// note: we don't require `size > offset`, we just guarantee that
// the address at offset is aligned regardless of the allocated size.
mi_assert(alignment > 0 && alignment % sizeof(void*) == 0);
if (mi_unlikely(size > PTRDIFF_MAX)) return NULL; // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
if (mi_unlikely(alignment==0 || !_mi_is_power_of_two(alignment))) return NULL; // require power-of-two (see <https://en.cppreference.com/w/c/memory/aligned_alloc>)
if (alignment <= MI_MAX_ALIGN_SIZE && offset==0) return _mi_heap_malloc_zero(heap, size, zero);
if (alignment <= MI_MAX_ALIGN_SIZE && offset==0) return _mi_heapx_malloc_zero(heap, size, zero MI_SOURCE_ARG);
const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
// try if there is a small block available with just the right alignment
@ -34,7 +34,7 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
#if MI_STAT>1
mi_heap_stat_increase( heap, malloc, size);
#endif
void* p = _mi_page_malloc(heap,page,padsize); // TODO: inline _mi_page_malloc
void* p = _mi_page_malloc(heap,page,padsize MI_SOURCE_ARG); // TODO: inline _mi_page_malloc
mi_assert_internal(p != NULL);
mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
if (zero) _mi_block_zero_init(page,p,size);
@ -44,13 +44,13 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
// use regular allocation if it is guaranteed to fit the alignment constraints
if (offset==0 && alignment<=padsize && padsize<=MI_MEDIUM_OBJ_SIZE_MAX && (padsize&align_mask)==0) {
void* p = _mi_heap_malloc_zero(heap, size, zero);
void* p = _mi_heapx_malloc_zero(heap, size, zero MI_SOURCE_ARG);
mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0);
return p;
}
// otherwise over-allocate
void* p = _mi_heap_malloc_zero(heap, size + alignment - 1, zero);
void* p = _mi_heapx_malloc_zero(heap, size + alignment - 1, zero MI_SOURCE_ARG);
if (p == NULL) return NULL;
// .. and align within the allocation
@ -64,68 +64,95 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
}
static inline void* mi_heapx_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset MI_SOURCE_PARAM) mi_attr_noexcept {
return mi_heapx_malloc_zero_aligned_at(heap, size, alignment, offset, false MI_SOURCE_ARG);
}
extern inline mi_decl_allocator void* _mi_heapx_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment MI_SOURCE_PARAM) mi_attr_noexcept {
return mi_heapx_malloc_zero_aligned_at(heap, size, alignment, 0, false MI_SOURCE_ARG);
}
static inline void* mi_heapx_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset MI_SOURCE_PARAM) mi_attr_noexcept {
return mi_heapx_malloc_zero_aligned_at(heap, size, alignment, offset, true MI_SOURCE_ARG);
}
static inline void* mi_heapx_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment MI_SOURCE_PARAM) mi_attr_noexcept {
return mi_heapx_malloc_zero_aligned_at(heap, size, alignment, 0, true MI_SOURCE_ARG);
}
static inline void* mi_heapx_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset MI_SOURCE_PARAM) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heapx_malloc_zero_aligned_at(heap, total, alignment, offset, true MI_SOURCE_ARG);
}
static inline void* mi_heapx_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment MI_SOURCE_PARAM) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heapx_malloc_zero_aligned_at(heap, total, alignment, 0, true MI_SOURCE_ARG);
}
mi_decl_allocator void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, false);
return mi_heapx_malloc_aligned_at(heap, size, alignment, offset MI_SOURCE_RET);
}
mi_decl_allocator void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_malloc_aligned_at(heap, size, alignment, 0);
return _mi_heapx_malloc_aligned(heap, size, alignment MI_SOURCE_RET);
}
mi_decl_allocator void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, true);
return mi_heapx_zalloc_aligned_at(heap, size, alignment, offset MI_SOURCE_RET);
}
mi_decl_allocator void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_zalloc_aligned_at(heap, size, alignment, 0);
return mi_heapx_zalloc_aligned(heap, size, alignment MI_SOURCE_RET);
}
mi_decl_allocator void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_zalloc_aligned_at(heap, total, alignment, offset);
return mi_heapx_calloc_aligned_at(heap, count, size, alignment, offset MI_SOURCE_RET);
}
mi_decl_allocator void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_calloc_aligned_at(heap,count,size,alignment,0);
return mi_heapx_calloc_aligned(heap, count, size, alignment MI_SOURCE_RET);
}
mi_decl_allocator void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_malloc_aligned_at(mi_get_default_heap(), size, alignment, offset);
return mi_heapx_malloc_aligned_at(mi_get_default_heap(), size, alignment, offset MI_SOURCE_RET);
}
mi_decl_allocator void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_malloc_aligned(mi_get_default_heap(), size, alignment);
return _mi_heapx_malloc_aligned(mi_get_default_heap(), size, alignment MI_SOURCE_RET);
}
mi_decl_allocator void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_zalloc_aligned_at(mi_get_default_heap(), size, alignment, offset);
return mi_heapx_zalloc_aligned_at(mi_get_default_heap(), size, alignment, offset MI_SOURCE_RET);
}
mi_decl_allocator void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_zalloc_aligned(mi_get_default_heap(), size, alignment);
return mi_heapx_zalloc_aligned(mi_get_default_heap(), size, alignment MI_SOURCE_RET);
}
mi_decl_allocator void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_calloc_aligned_at(mi_get_default_heap(), count, size, alignment, offset);
return mi_heapx_calloc_aligned_at(mi_get_default_heap(), count, size, alignment, offset MI_SOURCE_RET);
}
mi_decl_allocator void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_calloc_aligned(mi_get_default_heap(), count, size, alignment);
return mi_heapx_calloc_aligned(mi_get_default_heap(), count, size, alignment MI_SOURCE_RET);
}
static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset, bool zero) mi_attr_noexcept {
static void* mi_heapx_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset, bool zero MI_SOURCE_PARAM) mi_attr_noexcept {
mi_assert(alignment > 0);
if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,zero);
if (p == NULL) return mi_heap_malloc_zero_aligned_at(heap,newsize,alignment,offset,zero);
if (alignment <= sizeof(uintptr_t)) return _mi_heapx_realloc_zero(heap,p,newsize,zero MI_SOURCE_ARG);
if (p == NULL) return mi_heapx_malloc_zero_aligned_at(heap,newsize,alignment,offset,zero MI_SOURCE_ARG);
size_t size = mi_usable_size(p);
if (newsize <= size && newsize >= (size - (size / 2))
&& (((uintptr_t)p + offset) % alignment) == 0) {
return p; // reallocation still fits, is aligned and not more than 50% waste
}
else {
void* newp = mi_heap_malloc_aligned_at(heap,newsize,alignment,offset);
void* newp = mi_heapx_malloc_aligned_at(heap,newsize,alignment,offset MI_SOURCE_ARG);
if (newp != NULL) {
if (zero && newsize > size) {
const mi_page_t* page = _mi_ptr_page(newp);
@ -146,62 +173,90 @@ static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t ne
}
}
static void* mi_heap_realloc_zero_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, bool zero) mi_attr_noexcept {
static void* mi_heapx_realloc_zero_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, bool zero MI_SOURCE_PARAM) mi_attr_noexcept {
mi_assert(alignment > 0);
if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,zero);
if (alignment <= sizeof(uintptr_t)) return _mi_heapx_realloc_zero(heap,p,newsize,zero MI_SOURCE_ARG);
size_t offset = ((uintptr_t)p % alignment); // use offset of previous allocation (p can be NULL)
return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,zero);
return mi_heapx_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,zero MI_SOURCE_ARG);
}
static inline void* mi_heapx_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset MI_SOURCE_PARAM) mi_attr_noexcept {
return mi_heapx_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,false MI_SOURCE_ARG);
}
static inline void* mi_heapx_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment MI_SOURCE_PARAM) mi_attr_noexcept {
return mi_heapx_realloc_zero_aligned(heap,p,newsize,alignment,false MI_SOURCE_ARG);
}
static inline void* mi_heapx_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset MI_SOURCE_PARAM) mi_attr_noexcept {
return mi_heapx_realloc_zero_aligned_at(heap, p, newsize, alignment, offset, true MI_SOURCE_ARG);
}
static inline void* mi_heapx_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment MI_SOURCE_PARAM) mi_attr_noexcept {
return mi_heapx_realloc_zero_aligned(heap, p, newsize, alignment, true MI_SOURCE_ARG);
}
static inline void* mi_heapx_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset MI_SOURCE_PARAM) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(newcount, size, &total)) return NULL;
return mi_heapx_realloc_zero_aligned_at(heap, p, total, alignment, offset, true MI_SOURCE_ARG);
}
static inline void* mi_heapx_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment MI_SOURCE_PARAM) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(newcount, size, &total)) return NULL;
return mi_heapx_realloc_zero_aligned_at(heap, p, total, alignment, 0, true MI_SOURCE_ARG);
}
mi_decl_allocator void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heapx_realloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset MI_SOURCE_RET);
}
mi_decl_allocator void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heapx_realloc_aligned(mi_get_default_heap(), p, newsize, alignment MI_SOURCE_RET);
}
mi_decl_allocator void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heapx_rezalloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset MI_SOURCE_RET);
}
mi_decl_allocator void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heapx_rezalloc_aligned(mi_get_default_heap(), p, newsize, alignment MI_SOURCE_RET);
}
mi_decl_allocator void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heapx_recalloc_aligned_at(mi_get_default_heap(), p, newcount, size, alignment, offset MI_SOURCE_RET);
}
mi_decl_allocator void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heapx_recalloc_aligned(mi_get_default_heap(), p, newcount, size, alignment MI_SOURCE_RET);
}
mi_decl_allocator void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,false);
return mi_heapx_realloc_aligned_at(heap, p, newsize, alignment, offset MI_SOURCE_RET);
}
mi_decl_allocator void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned(heap,p,newsize,alignment,false);
return mi_heapx_realloc_aligned(heap, p, newsize, alignment MI_SOURCE_RET);
}
mi_decl_allocator void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned_at(heap, p, newsize, alignment, offset, true);
return mi_heapx_rezalloc_aligned_at(heap, p, newsize, alignment, offset MI_SOURCE_RET);
}
mi_decl_allocator void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned(heap, p, newsize, alignment, true);
return mi_heapx_rezalloc_aligned(heap, p, newsize, alignment MI_SOURCE_RET);
}
mi_decl_allocator void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(newcount, size, &total)) return NULL;
return mi_heap_rezalloc_aligned_at(heap, p, total, alignment, offset);
return mi_heapx_rezalloc_aligned_at(heap, p, total, alignment, offset MI_SOURCE_RET);
}
mi_decl_allocator void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(newcount, size, &total)) return NULL;
return mi_heap_rezalloc_aligned(heap, p, total, alignment);
return mi_heapx_rezalloc_aligned(heap, p, total, alignment MI_SOURCE_RET);
}
mi_decl_allocator void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_realloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset);
}
mi_decl_allocator void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_realloc_aligned(mi_get_default_heap(), p, newsize, alignment);
}
mi_decl_allocator void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_rezalloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset);
}
mi_decl_allocator void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_rezalloc_aligned(mi_get_default_heap(), p, newsize, alignment);
}
mi_decl_allocator void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_recalloc_aligned_at(mi_get_default_heap(), p, newcount, size, alignment, offset);
}
mi_decl_allocator void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_recalloc_aligned(mi_get_default_heap(), p, newcount, size, alignment);
}

View File

@ -186,8 +186,8 @@ int posix_memalign(void** p, size_t alignment, size_t size) { return mi_posix_me
void* __libc_valloc(size_t size) { return mi_valloc(size); }
void* __libc_pvalloc(size_t size) { return mi_pvalloc(size); }
void* __libc_memalign(size_t alignment, size_t size) { return mi_memalign(alignment,size); }
int __posix_memalign(void** p, size_t alignment, size_t size) { return mi_posix_memalign(p,alignment,size); }
void* __libc_memalign(size_t alignment, size_t size) { return mi_memalign(alignment, size); }
int __posix_memalign(void** p, size_t alignment, size_t size) { return mi_posix_memalign(p, alignment, size); }
#endif
#ifdef __cplusplus

View File

@ -48,7 +48,14 @@ int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept
if (p == NULL) return EINVAL;
if (alignment % sizeof(void*) != 0) return EINVAL; // natural alignment
if (!_mi_is_power_of_two(alignment)) return EINVAL; // not a power of 2
void* q = (alignment <= MI_MAX_ALIGN_SIZE ? mi_malloc(size) : mi_malloc_aligned(size, alignment));
mi_heap_t* heap = mi_get_default_heap();
void* q;
if (alignment <= MI_MAX_ALIGN_SIZE) {
q = _mi_heapx_malloc(heap, size MI_SOURCE_RET);
}
else {
q = _mi_heapx_malloc_aligned(heap, size, alignment MI_SOURCE_RET);
}
if (q==NULL && size != 0) return ENOMEM;
mi_assert_internal(((uintptr_t)q % alignment) == 0);
*p = q;
@ -56,32 +63,47 @@ int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept
}
void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept {
void* p = (alignment <= MI_MAX_ALIGN_SIZE ? mi_malloc(size) : mi_malloc_aligned(size, alignment));
mi_heap_t* heap = mi_get_default_heap();
void* p;
if (alignment <= MI_MAX_ALIGN_SIZE) {
p = _mi_heapx_malloc(heap, size MI_SOURCE_RET);
}
else {
p = _mi_heapx_malloc_aligned(heap, size, alignment MI_SOURCE_RET);
}
mi_assert_internal(((uintptr_t)p % alignment) == 0);
return p;
}
void* mi_valloc(size_t size) mi_attr_noexcept {
return mi_malloc_aligned(size, _mi_os_page_size());
return _mi_heapx_malloc_aligned(mi_get_default_heap(), size, _mi_os_page_size() MI_SOURCE_RET);
}
void* mi_pvalloc(size_t size) mi_attr_noexcept {
size_t psize = _mi_os_page_size();
if (size >= SIZE_MAX - psize) return NULL; // overflow
size_t asize = ((size + psize - 1) / psize) * psize;
return mi_malloc_aligned(asize, psize);
return _mi_heapx_malloc_aligned(mi_get_default_heap(), asize, psize MI_SOURCE_RET);
}
void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept {
if (alignment==0 || !_mi_is_power_of_two(alignment)) return NULL;
if ((size&(alignment-1)) != 0) return NULL; // C11 requires integral multiple, see <https://en.cppreference.com/w/c/memory/aligned_alloc>
void* p = (alignment <= MI_MAX_ALIGN_SIZE ? mi_malloc(size) : mi_malloc_aligned(size, alignment));
mi_heap_t* heap = mi_get_default_heap();
void* p;
if (alignment <= MI_MAX_ALIGN_SIZE) {
p = _mi_heapx_malloc(heap, size MI_SOURCE_RET);
}
else {
p = _mi_heapx_malloc_aligned(heap, size, alignment MI_SOURCE_RET);
}
mi_assert_internal(((uintptr_t)p % alignment) == 0);
return p;
}
void* mi_reallocarray( void* p, size_t count, size_t size ) mi_attr_noexcept { // BSD
void* newp = mi_reallocn(p,count,size);
mi_heap_t* heap = mi_get_default_heap();
void* newp = _mi_heapx_reallocn(heap, p, count, size MI_SOURCE_RET);
if (newp==NULL) errno = ENOMEM;
return newp;
}
@ -97,7 +119,7 @@ unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept {
size_t len;
for(len = 0; s[len] != 0; len++) { }
size_t size = (len+1)*sizeof(unsigned short);
unsigned short* p = (unsigned short*)mi_malloc(size);
unsigned short* p = (unsigned short*)_mi_heapx_malloc(mi_get_default_heap(), size MI_SOURCE_RET);
if (p != NULL) {
memcpy(p,s,size);
}
@ -105,7 +127,7 @@ unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept {
}
unsigned char* mi_mbsdup(const unsigned char* s) mi_attr_noexcept {
return (unsigned char*)mi_strdup((const char*)s);
return (unsigned char*)_mi_heapx_strdup(mi_get_default_heap(), (const char*)s MI_SOURCE_RET);
}
int mi_dupenv_s(char** buf, size_t* size, const char* name) mi_attr_noexcept {
@ -117,7 +139,7 @@ int mi_dupenv_s(char** buf, size_t* size, const char* name) mi_attr_noexcept {
*buf = NULL;
}
else {
*buf = mi_strdup(p);
*buf = _mi_heapx_strdup(mi_get_default_heap(), p MI_SOURCE_RET);
if (*buf==NULL) return ENOMEM;
if (size != NULL) *size = strlen(p);
}

View File

@ -21,11 +21,11 @@ terms of the MIT license. A copy of the license can be found in the file
// Fast allocation in a page: just pop from the free list.
// Fall back to generic allocation only if the list is empty.
extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept {
extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size MI_SOURCE_PARAM) mi_attr_noexcept {
mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size);
mi_block_t* block = page->free;
if (mi_unlikely(block == NULL)) {
return _mi_malloc_generic(heap, size); // slow path
return _mi_malloc_generic(heap, size MI_SOURCE_ARG); // slow path
}
mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
// pop from the free list
@ -50,6 +50,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta));
padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys));
padding->delta = (uint32_t)(delta);
padding->source = source;
uint8_t* fill = (uint8_t*)padding - delta;
const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes
for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; }
@ -58,12 +59,12 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
}
// allocate a small block
extern inline mi_decl_allocator void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept {
static inline mi_decl_allocator void* mi_heapx_malloc_small(mi_heap_t* heap, size_t size MI_SOURCE_PARAM) mi_attr_noexcept {
mi_assert(heap!=NULL);
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
mi_assert(size <= MI_SMALL_SIZE_MAX);
mi_page_t* page = _mi_heap_get_free_small_page(heap,size + MI_PADDING_SIZE);
void* p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE);
void* p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE MI_SOURCE_ARG);
mi_assert_internal(p==NULL || mi_usable_size(p) >= size);
#if MI_STAT>1
if (p != NULL) {
@ -74,19 +75,23 @@ extern inline mi_decl_allocator void* mi_heap_malloc_small(mi_heap_t* heap, size
return p;
}
extern inline mi_decl_allocator void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept {
return mi_heapx_malloc_small(heap, size MI_SOURCE_RET);
}
extern inline mi_decl_allocator void* mi_malloc_small(size_t size) mi_attr_noexcept {
return mi_heap_malloc_small(mi_get_default_heap(), size);
return mi_heapx_malloc_small(mi_get_default_heap(), size MI_SOURCE_RET);
}
// The main allocation function
extern inline mi_decl_allocator void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
inline mi_decl_allocator void* _mi_heapx_malloc(mi_heap_t* heap, size_t size MI_SOURCE_PARAM) mi_attr_noexcept {
if (mi_likely(size <= MI_SMALL_SIZE_MAX)) {
return mi_heap_malloc_small(heap, size);
return mi_heapx_malloc_small(heap, size MI_SOURCE_ARG);
}
else {
mi_assert(heap!=NULL);
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE);
void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE MI_SOURCE_ARG);
mi_assert_internal(p == NULL || mi_usable_size(p) >= size);
#if MI_STAT>1
if (p != NULL) {
@ -98,8 +103,11 @@ extern inline mi_decl_allocator void* mi_heap_malloc(mi_heap_t* heap, size_t siz
}
}
extern inline mi_decl_allocator void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
return _mi_heapx_malloc(heap, size MI_SOURCE_RET);
}
extern inline mi_decl_allocator void* mi_malloc(size_t size) mi_attr_noexcept {
return mi_heap_malloc(mi_get_default_heap(), size);
return _mi_heapx_malloc(mi_get_default_heap(), size MI_SOURCE_RET);
}
@ -123,27 +131,31 @@ void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size) {
// zero initialized small block
mi_decl_allocator void* mi_zalloc_small(size_t size) mi_attr_noexcept {
void* p = mi_malloc_small(size);
void* p = mi_heapx_malloc_small(mi_get_default_heap(), size MI_SOURCE_RET);
if (p != NULL) {
_mi_block_zero_init(_mi_ptr_page(p), p, size); // todo: can we avoid getting the page again?
}
return p;
}
void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) {
void* p = mi_heap_malloc(heap,size);
mi_decl_allocator void* _mi_heapx_malloc_zero(mi_heap_t* heap, size_t size, bool zero MI_SOURCE_PARAM) {
void* p = _mi_heapx_malloc(heap,size MI_SOURCE_ARG);
if (zero && p != NULL) {
_mi_block_zero_init(_mi_ptr_page(p),p,size); // todo: can we avoid getting the page again?
}
return p;
}
static inline mi_decl_allocator void* mi_heapx_zalloc(mi_heap_t* heap, size_t size MI_SOURCE_PARAM) mi_attr_noexcept {
return _mi_heapx_malloc_zero(heap, size, true MI_SOURCE_ARG);
}
extern inline mi_decl_allocator void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
return _mi_heap_malloc_zero(heap, size, true);
return mi_heapx_zalloc(heap, size MI_SOURCE_RET);
}
mi_decl_allocator void* mi_zalloc(size_t size) mi_attr_noexcept {
return mi_heap_zalloc(mi_get_default_heap(),size);
return mi_heapx_zalloc(mi_get_default_heap(), size MI_SOURCE_RET);
}
@ -169,7 +181,7 @@ static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, con
mi_list_contains(page, page->local_free, block) ||
mi_list_contains(page, mi_page_thread_free(page), block))
{
_mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_block_size(page));
_mi_page_block_error_message(EAGAIN, page, block, "double free detected" );
return true;
}
return false;
@ -238,7 +250,7 @@ static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
size_t size;
size_t wrong;
if (!mi_verify_padding(page,block,&size,&wrong)) {
_mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong );
_mi_page_block_error_message(EFAULT, page, block, "buffer overflow in heap block (write after %zu bytes)" );
}
}
@ -349,7 +361,7 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block
if (mi_unlikely(mi_check_is_double_free(page, block))) return;
mi_check_padding(page, block);
#if (MI_DEBUG!=0)
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
memset(block, MI_DEBUG_FREED, mi_usable_size(block)); // not full size to keep padding in case of a dangling pointer
#endif
mi_block_set_next(page, block, page->local_free);
page->local_free = block;
@ -429,7 +441,7 @@ void mi_free(void* p) mi_attr_noexcept
if (mi_unlikely(mi_check_is_double_free(page,block))) return;
mi_check_padding(page, block);
#if (MI_DEBUG!=0)
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
memset(block, MI_DEBUG_FREED, mi_usable_size(block));
#endif
mi_block_set_next(page, block, page->local_free);
page->local_free = block;
@ -523,25 +535,33 @@ void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept {
mi_free(p);
}
extern inline mi_decl_allocator void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
static inline mi_decl_allocator void* mi_heapx_calloc(mi_heap_t* heap, size_t count, size_t size MI_SOURCE_PARAM) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count,size,&total)) return NULL;
return mi_heap_zalloc(heap,total);
return mi_heapx_zalloc(heap, total MI_SOURCE_ARG);
}
mi_decl_allocator void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
return mi_heapx_calloc(heap, count, size MI_SOURCE_RET);
}
mi_decl_allocator void* mi_calloc(size_t count, size_t size) mi_attr_noexcept {
return mi_heap_calloc(mi_get_default_heap(),count,size);
return mi_heapx_calloc(mi_get_default_heap(), count, size MI_SOURCE_RET);
}
// Uninitialized `calloc`
extern mi_decl_allocator void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
static inline mi_decl_allocator void* mi_heapx_mallocn(mi_heap_t* heap, size_t count, size_t size MI_SOURCE_PARAM) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_malloc(heap, total);
return _mi_heapx_malloc(heap, total MI_SOURCE_ARG);
}
extern mi_decl_allocator void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
return mi_heapx_mallocn(heap, count, size MI_SOURCE_RET);
}
mi_decl_allocator void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept {
return mi_heap_mallocn(mi_get_default_heap(),count,size);
return mi_heapx_mallocn(mi_get_default_heap(), count, size MI_SOURCE_RET);
}
// Expand in place or fail
@ -552,13 +572,13 @@ mi_decl_allocator void* mi_expand(void* p, size_t newsize) mi_attr_noexcept {
return p; // it fits
}
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) {
if (p == NULL) return _mi_heap_malloc_zero(heap,newsize,zero);
mi_decl_allocator void* _mi_heapx_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero MI_SOURCE_PARAM) {
if (p == NULL) return _mi_heapx_malloc_zero(heap,newsize,zero MI_SOURCE_ARG);
size_t size = mi_usable_size(p);
if (newsize <= size && newsize >= (size / 2)) {
return p; // reallocation still fits and not more than 50% waste
}
void* newp = mi_heap_malloc(heap,newsize);
void* newp = _mi_heapx_malloc(heap,newsize MI_SOURCE_ARG);
if (mi_likely(newp != NULL)) {
if (zero && newsize > size) {
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized
@ -571,54 +591,65 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero)
return newp;
}
static mi_decl_allocator void* mi_heapx_realloc(mi_heap_t* heap, void* p, size_t newsize MI_SOURCE_PARAM) mi_attr_noexcept {
return _mi_heapx_realloc_zero(heap, p, newsize, false MI_SOURCE_ARG);
}
mi_decl_allocator void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
return _mi_heap_realloc_zero(heap, p, newsize, false);
return mi_heapx_realloc(heap, p, newsize MI_SOURCE_RET);
}
mi_decl_allocator void* _mi_heapx_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size MI_SOURCE_PARAM) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heapx_realloc(heap, p, total MI_SOURCE_ARG);
}
mi_decl_allocator void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_realloc(heap, p, total);
return _mi_heapx_reallocn(heap, p, count, size MI_SOURCE_RET);
}
// Reallocate but free `p` on errors
mi_decl_allocator void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
void* newp = mi_heap_realloc(heap, p, newsize);
void* newp = mi_heapx_realloc(heap, p, newsize MI_SOURCE_RET);
if (newp==NULL && p!=NULL) mi_free(p);
return newp;
}
mi_decl_allocator void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
return _mi_heap_realloc_zero(heap, p, newsize, true);
return _mi_heapx_realloc_zero(heap, p, newsize, true MI_SOURCE_RET);
}
mi_decl_allocator void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_rezalloc(heap, p, total);
return _mi_heapx_realloc_zero(heap, p, total, true MI_SOURCE_RET);
}
mi_decl_allocator void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept {
return mi_heap_realloc(mi_get_default_heap(),p,newsize);
return mi_heapx_realloc(mi_get_default_heap(),p,newsize MI_SOURCE_RET);
}
mi_decl_allocator void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept {
return mi_heap_reallocn(mi_get_default_heap(),p,count,size);
return _mi_heapx_reallocn(mi_get_default_heap(),p,count,size MI_SOURCE_RET);
}
// Reallocate but free `p` on errors
mi_decl_allocator void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept {
return mi_heap_reallocf(mi_get_default_heap(),p,newsize);
void* newp = mi_heapx_realloc(mi_get_default_heap(), p, newsize MI_SOURCE_RET);
if (newp==NULL && p!=NULL) mi_free(p);
return newp;
}
mi_decl_allocator void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept {
return mi_heap_rezalloc(mi_get_default_heap(), p, newsize);
return _mi_heapx_realloc_zero(mi_get_default_heap(), p, newsize, true MI_SOURCE_RET);
}
mi_decl_allocator void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept {
return mi_heap_recalloc(mi_get_default_heap(), p, count, size);
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return _mi_heapx_realloc_zero(mi_get_default_heap(), p, total, true MI_SOURCE_RET);
}
@ -628,32 +659,38 @@ mi_decl_allocator void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_
// ------------------------------------------------------
// `strdup` using mi_malloc
char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept {
mi_decl_allocator char* _mi_heapx_strdup(mi_heap_t* heap, const char* s MI_SOURCE_PARAM) mi_attr_noexcept {
if (s == NULL) return NULL;
size_t n = strlen(s);
char* t = (char*)mi_heap_malloc(heap,n+1);
char* t = (char*)_mi_heapx_malloc(heap, n+1 MI_SOURCE_ARG);
if (t != NULL) memcpy(t, s, n + 1);
return t;
}
char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept {
return _mi_heapx_strdup(heap, s MI_SOURCE_RET);
}
char* mi_strdup(const char* s) mi_attr_noexcept {
return mi_heap_strdup(mi_get_default_heap(), s);
return _mi_heapx_strdup(mi_get_default_heap(), s MI_SOURCE_RET);
}
// `strndup` using mi_malloc
char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept {
static char* mi_heapx_strndup(mi_heap_t* heap, const char* s, size_t n MI_SOURCE_PARAM) mi_attr_noexcept {
if (s == NULL) return NULL;
size_t m = strlen(s);
if (n > m) n = m;
char* t = (char*)mi_heap_malloc(heap, n+1);
char* t = (char*)_mi_heapx_malloc(heap, n+1 MI_SOURCE_ARG);
if (t == NULL) return NULL;
memcpy(t, s, n);
t[n] = 0;
return t;
}
char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept {
return mi_heapx_strndup(heap, s, n MI_SOURCE_RET);
}
char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
return mi_heap_strndup(mi_get_default_heap(),s,n);
return mi_heapx_strndup(mi_get_default_heap(), s, n MI_SOURCE_RET);
}
#ifndef __wasi__
@ -663,7 +700,7 @@ char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
#define PATH_MAX MAX_PATH
#endif
#include <windows.h>
char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
static char* mi_heapx_realpath(mi_heap_t* heap, const char* fname, char* resolved_name MI_SOURCE_PARAM) mi_attr_noexcept {
// todo: use GetFullPathNameW to allow longer file names
char buf[PATH_MAX];
DWORD res = GetFullPathNameA(fname, PATH_MAX, (resolved_name == NULL ? buf : resolved_name), NULL);
@ -677,7 +714,7 @@ char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name)
return resolved_name;
}
else {
return mi_heap_strndup(heap, buf, PATH_MAX);
return mi_heapx_strndup(heap, buf, PATH_MAX MI_SOURCE_ARG);
}
}
#else
@ -693,7 +730,7 @@ static size_t mi_path_max() {
return path_max;
}
char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
static char* mi_heapx_realpath(mi_heap_t* heap, const char* fname, char* resolved_name MI_SOURCE_PARAM) mi_attr_noexcept {
if (resolved_name != NULL) {
return realpath(fname,resolved_name);
}
@ -702,15 +739,19 @@ char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name)
char* buf = (char*)mi_malloc(n+1);
if (buf==NULL) return NULL;
char* rname = realpath(fname,buf);
char* result = mi_heap_strndup(heap,rname,n); // ok if `rname==NULL`
char* result = mi_heapx_strndup(heap, rname, n MI_SOURCE_ARG); // ok if `rname==NULL`
mi_free(buf);
return result;
}
}
#endif
char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
return mi_heapx_realpath(heap, fname, resolved_name MI_SOURCE_RET);
}
char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept {
return mi_heap_realpath(mi_get_default_heap(),fname,resolved_name);
return mi_heapx_realpath(mi_get_default_heap(), fname, resolved_name MI_SOURCE_RET);
}
#endif
@ -766,39 +807,47 @@ static bool mi_try_new_handler(bool nothrow) {
}
#endif
static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow ) {
static mi_decl_noinline void* mi_heapx_try_new(mi_heap_t* heap, size_t size, bool nothrow MI_SOURCE_PARAM) {
void* p = NULL;
while(p == NULL && mi_try_new_handler(nothrow)) {
p = mi_malloc(size);
p = _mi_heapx_malloc(heap, size MI_SOURCE_ARG);
}
return p;
}
void* mi_new(size_t size) {
void* p = mi_malloc(size);
if (mi_unlikely(p == NULL)) return mi_try_new(size,false);
static inline void* mi_heapx_new(size_t size MI_SOURCE_PARAM) {
mi_heap_t* const heap = mi_get_default_heap();
void* p = _mi_heapx_malloc(heap, size MI_SOURCE_ARG);
if (mi_unlikely(p == NULL)) return mi_heapx_try_new(heap, size, false MI_SOURCE_ARG);
return p;
}
void* mi_new(size_t size) {
return mi_heapx_new(size MI_SOURCE_RET);
}
void* mi_new_nothrow(size_t size) {
void* p = mi_malloc(size);
if (mi_unlikely(p == NULL)) return mi_try_new(size, true);
mi_heap_t* const heap = mi_get_default_heap();
void* p = _mi_heapx_malloc(heap, size MI_SOURCE_RET);
if (mi_unlikely(p == NULL)) return mi_heapx_try_new(heap, size, true MI_SOURCE_RET);
return p;
}
void* mi_new_aligned(size_t size, size_t alignment) {
mi_heap_t* const heap = mi_get_default_heap();
void* p;
do {
p = mi_malloc_aligned(size, alignment);
p = _mi_heapx_malloc_aligned(heap, size, alignment MI_SOURCE_RET);
}
while(p == NULL && mi_try_new_handler(false));
return p;
}
void* mi_new_aligned_nothrow(size_t size, size_t alignment) {
mi_heap_t* const heap = mi_get_default_heap();
void* p;
do {
p = mi_malloc_aligned(size, alignment);
p = _mi_heapx_malloc_aligned(heap, size, alignment MI_SOURCE_RET);
}
while(p == NULL && mi_try_new_handler(true));
return p;
@ -811,25 +860,29 @@ void* mi_new_n(size_t count, size_t size) {
return NULL;
}
else {
return mi_new(total);
return mi_heapx_new(total MI_SOURCE_RET);
}
}
void* mi_new_realloc(void* p, size_t newsize) {
mi_heap_t* const heap = mi_get_default_heap();
void* q;
do {
q = mi_realloc(p, newsize);
q = mi_heapx_realloc(heap, p, newsize MI_SOURCE_RET);
} while (q == NULL && mi_try_new_handler(false));
return q;
}
void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
mi_heap_t* const heap = mi_get_default_heap();
size_t total;
if (mi_unlikely(mi_count_size_overflow(newcount, size, &total))) {
mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
return NULL;
}
else {
return mi_new_realloc(p, total);
}
void* q;
do {
q = mi_heapx_realloc(heap, p, total MI_SOURCE_RET);
} while (q == NULL && mi_try_new_handler(false));
return q;
}

View File

@ -303,12 +303,19 @@ void _mi_verbose_message(const char* fmt, ...) {
va_end(args);
}
static void mi_show_error_message(const char* fmt, va_list args) {
static void mi_vshow_error_message(const char* fmt, va_list args) {
if (!mi_option_is_enabled(mi_option_show_errors) && !mi_option_is_enabled(mi_option_verbose)) return;
if (mi_atomic_increment(&error_count) > mi_max_error_count) return;
mi_vfprintf(NULL, NULL, "mimalloc: error: ", fmt, args);
}
static void mi_show_error_message(const char* fmt, ...) {
va_list args;
va_start(args, fmt);
mi_vshow_error_message(fmt,args);
va_end(args);
}
void _mi_warning_message(const char* fmt, ...) {
if (!mi_option_is_enabled(mi_option_show_errors) && !mi_option_is_enabled(mi_option_verbose)) return;
if (mi_atomic_increment(&error_count) > mi_max_error_count) return;
@ -355,21 +362,57 @@ void mi_register_error(mi_error_fun* fun, void* arg) {
mi_atomic_write_ptr(void,&mi_error_arg, arg);
}
void _mi_error_message(int err, const char* fmt, ...) {
// show detailed error message
va_list args;
va_start(args, fmt);
mi_show_error_message(fmt, args);
va_end(args);
// and call the error handler which may abort (or return normally)
static void mi_call_error_handler(int err) {
if (mi_error_handler != NULL) {
mi_error_handler(err, mi_atomic_read_ptr(void,&mi_error_arg));
mi_error_handler(err, mi_atomic_read_ptr(void, &mi_error_arg));
}
else {
mi_error_default(err);
}
}
void _mi_error_message(int err, const char* fmt, ...) {
// show detailed error message
va_list args;
va_start(args, fmt);
mi_vshow_error_message(fmt, args);
va_end(args);
// and call the error handler which may abort (or return normally)
mi_call_error_handler(err);
}
#if defined(MI_PADDING) && defined(MI_ENCODE_FREELIST)
const char* _mi_debug_fname_base = "mimalloc_fname_base";
#endif
void _mi_page_block_error_message(int err, const mi_page_t* page, const mi_block_t* block, const char* msg) {
#if defined(MI_PADDING) && defined(MI_ENCODE_FREELIST)
const size_t bsize = mi_page_usable_block_size(page);
const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + bsize);
const size_t size = (padding->delta <= bsize ? bsize - padding->delta : bsize);
if (padding->source==0) {
mi_show_error_message("%s: at block %p of size %zu\n", msg, block, size);
}
else if ((padding->source & 1) == 0) {
#ifdef _MSC_VER
const char* hint = " hint: paste the code address in the disassembly window in the debugger to find the source location.\n";
#else
const char* hint = "";
#endif
mi_show_error_message("%s: at block %p of size %zu allocated at 0x%p.\n%s", msg, block, size, (void*)(padding->source >> 1), hint);
}
else {
const char* fname = _mi_debug_fname_base + ((int32_t)(padding->source >> 32));
size_t lineno = ((uint32_t)padding->source) >> 1;
mi_show_error_message("%s: at block %p of size %zu allocated at %s:%zu", msg, block, size, fname, lineno);
}
#else
mi_show_error_message("%s: at block %p of size %zu", msg, block, mi_page_usable_block_size(page));
#endif
mi_call_error_handler(err);
}
// --------------------------------------------------------
// Initialize options by checking the environment
// --------------------------------------------------------

View File

@ -774,7 +774,7 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) {
// Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed.
void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
void* _mi_malloc_generic(mi_heap_t* heap, size_t size MI_SOURCE_PARAM) mi_attr_noexcept
{
mi_assert_internal(heap != NULL);
@ -815,5 +815,5 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
mi_assert_internal(mi_page_block_size(page) >= size);
// and try again, this time succeeding! (i.e. this should never recurse)
return _mi_page_malloc(heap, page, size);
return _mi_page_malloc(heap, page, size MI_SOURCE_ARG);
}

View File

@ -11,6 +11,7 @@ static void double_free1();
static void double_free2();
static void corrupt_free();
static void block_overflow1();
static void dangling_ptr_write();
int main() {
mi_version();
@ -20,6 +21,7 @@ int main() {
// double_free2();
// corrupt_free();
// block_overflow1();
dangling_ptr_write();
void* p1 = malloc(78);
void* p2 = malloc(24);
@ -49,6 +51,14 @@ static void block_overflow1() {
free(p);
}
static void dangling_ptr_write() {
for (int i = 0; i < 1000; i++) {
uint8_t* p = (uint8_t*)mi_malloc(16);
free(p);
p[0] = 0;
}
}
// The double free samples come ArcHeap [1] by Insu Yun (issue #161)
// [1]: https://arxiv.org/pdf/1903.00503.pdf

View File

@ -23,10 +23,12 @@ public:
~Test() { }
};
void dangling_ptr_write();
int main() {
mi_stats_reset(); // ignore earlier allocations
atexit(free_p);
dangling_ptr_write();
void* p1 = malloc(78);
void* p2 = mi_malloc_aligned(16,24);
free(p1);
@ -53,6 +55,14 @@ int main() {
return 0;
}
static void dangling_ptr_write() {
for (int i = 0; i < 1000; i++) {
uint8_t* p = new uint8_t[16];
free(p);
p[0] = 0;
}
}
class Static {
private:
void* p;