mirror of
https://github.com/microsoft/mimalloc.git
synced 2024-12-27 13:33:18 +08:00
set extra debug padding per-heap
This commit is contained in:
parent
ae608cda2f
commit
9ebb94fe17
@ -32,26 +32,6 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||||||
#define mi_decl_cache_align
|
#define mi_decl_cache_align
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* -----------------------------------------------------------
|
|
||||||
Padding
|
|
||||||
----------------------------------------------------------- */
|
|
||||||
#if (MI_PADDING)
|
|
||||||
#define MI_EXTRA_PADDING_XPARAM , size_t __extra_padding
|
|
||||||
#define MI_EXTRA_PADDING_XARG , __extra_padding
|
|
||||||
#define MI_EXTRA_PADDING_ARG __extra_padding
|
|
||||||
static inline size_t mi_extra_padding() {
|
|
||||||
return MI_PADDING_SIZE + mi_option_get(mi_option_debug_extra_padding);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
#define MI_EXTRA_PADDING_XPARAM
|
|
||||||
#define MI_EXTRA_PADDING_XARG
|
|
||||||
#define MI_EXTRA_PADDING_ARG 0
|
|
||||||
static inline size_t mi_extra_padding() {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// "options.c"
|
// "options.c"
|
||||||
void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message);
|
void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message);
|
||||||
@ -110,7 +90,7 @@ void _mi_abandoned_await_readers(void);
|
|||||||
|
|
||||||
|
|
||||||
// "page.c"
|
// "page.c"
|
||||||
void* _mi_malloc_generic(mi_heap_t* heap, size_t size MI_EXTRA_PADDING_XPARAM MI_SOURCE_XPARAM) mi_attr_noexcept mi_attr_malloc;
|
void* _mi_malloc_generic(mi_heap_t* heap, size_t size MI_SOURCE_XPARAM) mi_attr_noexcept mi_attr_malloc;
|
||||||
|
|
||||||
void _mi_page_retire(mi_page_t* page); // free the page if there are no other pages with many free blocks
|
void _mi_page_retire(mi_page_t* page); // free the page if there are no other pages with many free blocks
|
||||||
void _mi_page_unfull(mi_page_t* page);
|
void _mi_page_unfull(mi_page_t* page);
|
||||||
@ -143,7 +123,7 @@ mi_msecs_t _mi_clock_end(mi_msecs_t start);
|
|||||||
mi_msecs_t _mi_clock_start(void);
|
mi_msecs_t _mi_clock_start(void);
|
||||||
|
|
||||||
// "alloc.c"
|
// "alloc.c"
|
||||||
void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size MI_EXTRA_PADDING_XPARAM MI_SOURCE_XPARAM) mi_attr_noexcept; // called from `_mi_malloc_generic`
|
void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size MI_SOURCE_XPARAM) mi_attr_noexcept; // called from `_mi_malloc_generic`
|
||||||
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p);
|
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p);
|
||||||
bool _mi_free_delayed_block(mi_block_t* block);
|
bool _mi_free_delayed_block(mi_block_t* block);
|
||||||
void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size);
|
void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size);
|
||||||
@ -158,6 +138,21 @@ bool _mi_page_is_valid(mi_page_t* page);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
/* -----------------------------------------------------------
|
||||||
|
Padding
|
||||||
|
----------------------------------------------------------- */
|
||||||
|
#if (MI_PADDING)
|
||||||
|
static inline size_t mi_extra_padding(mi_heap_t* const heap) {
|
||||||
|
return (MI_PADDING_SIZE + heap->extra_padding);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline size_t mi_extra_padding() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// ------------------------------------------------------
|
// ------------------------------------------------------
|
||||||
// Branches
|
// Branches
|
||||||
// ------------------------------------------------------
|
// ------------------------------------------------------
|
||||||
|
@ -334,6 +334,7 @@ struct mi_heap_s {
|
|||||||
size_t page_count; // total number of pages in the `pages` queues.
|
size_t page_count; // total number of pages in the `pages` queues.
|
||||||
size_t page_retired_min; // smallest retired index (retired pages are fully free, but still in the page queues)
|
size_t page_retired_min; // smallest retired index (retired pages are fully free, but still in the page queues)
|
||||||
size_t page_retired_max; // largest retired index into the `pages` array.
|
size_t page_retired_max; // largest retired index into the `pages` array.
|
||||||
|
size_t extra_padding; // extra padding bytes in each heap block to better detect heap block overflows
|
||||||
mi_heap_t* next; // list of heaps per thread
|
mi_heap_t* next; // list of heaps per thread
|
||||||
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
|
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
|
||||||
};
|
};
|
||||||
|
@ -296,11 +296,12 @@ typedef bool (mi_cdecl mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_
|
|||||||
|
|
||||||
mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg);
|
mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg);
|
||||||
|
|
||||||
|
// Experimental
|
||||||
mi_decl_export void mi_heap_print_json(mi_heap_t* heap, mi_output_fun* out, void* arg);
|
mi_decl_export void mi_heap_print_json(mi_heap_t* heap, mi_output_fun* out, void* arg);
|
||||||
mi_decl_export bool mi_heap_is_empty(mi_heap_t* heap);
|
mi_decl_export bool mi_heap_is_empty(mi_heap_t* heap);
|
||||||
mi_decl_export void mi_heap_check_leak(mi_heap_t* heap, mi_output_fun* out, void* arg);
|
mi_decl_export void mi_heap_check_leak(mi_heap_t* heap, mi_output_fun* out, void* arg);
|
||||||
|
mi_decl_export void mi_heap_set_extra_padding(mi_heap_t* heap, size_t extra_padding);
|
||||||
|
|
||||||
// Experimental
|
|
||||||
mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept;
|
mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept;
|
||||||
mi_decl_nodiscard mi_decl_export bool mi_is_redirected() mi_attr_noexcept;
|
mi_decl_nodiscard mi_decl_export bool mi_is_redirected() mi_attr_noexcept;
|
||||||
|
|
||||||
|
@ -24,17 +24,17 @@ static mi_decl_restrict void* mi_base_malloc_zero_aligned_at(mi_heap_t* const he
|
|||||||
const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
|
const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
|
||||||
|
|
||||||
// try if there is a small block available with just the right alignment
|
// try if there is a small block available with just the right alignment
|
||||||
const size_t __extra_padding = mi_extra_padding();
|
const size_t extra_padding = mi_extra_padding(heap);
|
||||||
const size_t padsize = size + __extra_padding; // safe for overflow as size <= PTRDIFF_MAX
|
const size_t padsize = size + extra_padding; // safe for overflow as size <= PTRDIFF_MAX
|
||||||
if (mi_likely(padsize <= MI_SMALL_SIZE_MAX)) {
|
if (mi_likely(padsize <= MI_SMALL_SIZE_MAX)) {
|
||||||
mi_page_t* page = _mi_heap_get_free_small_page(heap,padsize);
|
mi_page_t* page = _mi_heap_get_free_small_page(heap,padsize);
|
||||||
const bool is_aligned = (((uintptr_t)page->free+offset) & align_mask)==0;
|
const bool is_aligned = (((uintptr_t)page->free+offset) & align_mask)==0;
|
||||||
if (mi_likely(page->free != NULL && is_aligned))
|
if (mi_likely(page->free != NULL && is_aligned))
|
||||||
{
|
{
|
||||||
#if MI_STAT>1
|
#if MI_STAT>1
|
||||||
mi_heap_stat_increase( heap, malloc, size);
|
mi_heap_stat_increase( heap, malloc, padsize);
|
||||||
#endif
|
#endif
|
||||||
void* p = _mi_page_malloc(heap,page,padsize MI_EXTRA_PADDING_XARG MI_SOURCE_XARG); // TODO: inline _mi_page_malloc
|
void* p = _mi_page_malloc(heap,page,padsize MI_SOURCE_XARG); // TODO: inline _mi_page_malloc
|
||||||
mi_assert_internal(p != NULL);
|
mi_assert_internal(p != NULL);
|
||||||
mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
|
mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
|
||||||
if (zero) _mi_block_zero_init(page,p,size);
|
if (zero) _mi_block_zero_init(page,p,size);
|
||||||
@ -44,7 +44,7 @@ static mi_decl_restrict void* mi_base_malloc_zero_aligned_at(mi_heap_t* const he
|
|||||||
|
|
||||||
// use regular allocation if it is guaranteed to fit the alignment constraints
|
// use regular allocation if it is guaranteed to fit the alignment constraints
|
||||||
if (offset==0 && alignment<=padsize && padsize<=MI_MEDIUM_OBJ_SIZE_MAX && (padsize&align_mask)==0) {
|
if (offset==0 && alignment<=padsize && padsize<=MI_MEDIUM_OBJ_SIZE_MAX && (padsize&align_mask)==0) {
|
||||||
void* p = _mi_base_malloc_zero(heap, size, zero MI_SOURCE_XARG);
|
void* p = _mi_base_malloc_zero(heap, size, zero MI_SOURCE_XARG); // base malloc adds padding again to size
|
||||||
mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0);
|
mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0);
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
24
src/alloc.c
24
src/alloc.c
@ -28,11 +28,11 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||||||
|
|
||||||
// Fast allocation in a page: just pop from the free list.
|
// Fast allocation in a page: just pop from the free list.
|
||||||
// Fall back to generic allocation only if the list is empty.
|
// Fall back to generic allocation only if the list is empty.
|
||||||
extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size MI_EXTRA_PADDING_XPARAM MI_SOURCE_XPARAM) mi_attr_noexcept {
|
extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size MI_SOURCE_XPARAM) mi_attr_noexcept {
|
||||||
mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size);
|
mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size);
|
||||||
mi_block_t* block = page->free;
|
mi_block_t* block = page->free;
|
||||||
if (mi_unlikely(block == NULL)) {
|
if (mi_unlikely(block == NULL)) {
|
||||||
return _mi_malloc_generic(heap, size MI_EXTRA_PADDING_XARG MI_SOURCE_XARG); // slow path
|
return _mi_malloc_generic(heap, size MI_SOURCE_XARG); // slow path
|
||||||
}
|
}
|
||||||
mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
|
mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
|
||||||
// pop from the free list
|
// pop from the free list
|
||||||
@ -52,9 +52,11 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST)
|
#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST)
|
||||||
|
const size_t extra_padding = mi_extra_padding(heap);
|
||||||
|
mi_assert_internal(extra_padding <= size && extra_padding >= MI_PADDING_SIZE);
|
||||||
mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page));
|
mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page));
|
||||||
ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - __extra_padding));
|
ptrdiff_t delta = extra_padding - MI_PADDING_SIZE;
|
||||||
mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - __extra_padding + delta));
|
mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - extra_padding + delta));
|
||||||
padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys));
|
padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys));
|
||||||
padding->delta = (uint32_t)(delta);
|
padding->delta = (uint32_t)(delta);
|
||||||
padding->source = __mi_source;
|
padding->source = __mi_source;
|
||||||
@ -71,17 +73,17 @@ MI_ALLOC_API1(inline mi_decl_restrict void*, malloc_small, mi_heap_t*, heap, siz
|
|||||||
mi_assert(heap!=NULL);
|
mi_assert(heap!=NULL);
|
||||||
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
|
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
|
||||||
mi_assert(size <= MI_SMALL_SIZE_MAX);
|
mi_assert(size <= MI_SMALL_SIZE_MAX);
|
||||||
const size_t __extra_padding = mi_extra_padding();
|
const size_t extra_padding = mi_extra_padding(heap);
|
||||||
#if (MI_PADDING)
|
#if (MI_PADDING)
|
||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
size = sizeof(void*);
|
size = sizeof(void*);
|
||||||
}
|
}
|
||||||
if ((size + __extra_padding) > MI_SMALL_SIZE_MAX) {
|
if ((size + extra_padding) > MI_SMALL_SIZE_MAX) {
|
||||||
return MI_SOURCE_ARG(mi_heap_malloc, heap, size); // call base malloc in case we were invoked directly
|
return MI_SOURCE_ARG(mi_heap_malloc, heap, size); // call base malloc in case we were invoked directly
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
mi_page_t* page = _mi_heap_get_free_small_page(heap,size + __extra_padding);
|
mi_page_t* page = _mi_heap_get_free_small_page(heap,size + extra_padding);
|
||||||
void* p = _mi_page_malloc(heap, page, size + __extra_padding MI_EXTRA_PADDING_XARG MI_SOURCE_XARG);
|
void* p = _mi_page_malloc(heap, page, size + extra_padding MI_SOURCE_XARG);
|
||||||
mi_assert_internal(p==NULL || mi_usable_size(p) >= size);
|
mi_assert_internal(p==NULL || mi_usable_size(p) >= size);
|
||||||
#if MI_STAT>1
|
#if MI_STAT>1
|
||||||
if (p != NULL) {
|
if (p != NULL) {
|
||||||
@ -96,14 +98,14 @@ MI_ALLOC_API1(inline mi_decl_restrict void*, malloc_small, mi_heap_t*, heap, siz
|
|||||||
// The main allocation function
|
// The main allocation function
|
||||||
MI_ALLOC_API1(inline mi_decl_restrict void*, malloc, mi_heap_t*, heap, size_t, size)
|
MI_ALLOC_API1(inline mi_decl_restrict void*, malloc, mi_heap_t*, heap, size_t, size)
|
||||||
{
|
{
|
||||||
const size_t __extra_padding = mi_extra_padding();
|
const size_t extra_padding = mi_extra_padding(heap);
|
||||||
if (mi_likely(size <= MI_SMALL_SIZE_MAX - __extra_padding && __extra_padding < MI_SMALL_SIZE_MAX)) { // careful for overflow
|
if (mi_likely(size <= MI_SMALL_SIZE_MAX - extra_padding && extra_padding < MI_SMALL_SIZE_MAX)) { // careful for overflow
|
||||||
return mi_base_malloc_small(heap, size MI_SOURCE_XARG);
|
return mi_base_malloc_small(heap, size MI_SOURCE_XARG);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
mi_assert(heap!=NULL);
|
mi_assert(heap!=NULL);
|
||||||
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
|
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
|
||||||
void* const p = _mi_malloc_generic(heap, size + __extra_padding MI_EXTRA_PADDING_XARG MI_SOURCE_XARG); // note: size + __extra_padding can overflow but it is detected in malloc_generic
|
void* const p = _mi_malloc_generic(heap, size + extra_padding MI_SOURCE_XARG); // note: size + __extra_padding can overflow but it is detected in malloc_generic
|
||||||
mi_assert_internal(p == NULL || mi_usable_size(p) >= size);
|
mi_assert_internal(p == NULL || mi_usable_size(p) >= size);
|
||||||
#if MI_STAT>1
|
#if MI_STAT>1
|
||||||
if (p != NULL) {
|
if (p != NULL) {
|
||||||
|
16
src/heap.c
16
src/heap.c
@ -200,6 +200,7 @@ mi_heap_t* mi_heap_new(void) {
|
|||||||
heap->cookie = _mi_heap_random_next(heap) | 1;
|
heap->cookie = _mi_heap_random_next(heap) | 1;
|
||||||
heap->keys[0] = _mi_heap_random_next(heap);
|
heap->keys[0] = _mi_heap_random_next(heap);
|
||||||
heap->keys[1] = _mi_heap_random_next(heap);
|
heap->keys[1] = _mi_heap_random_next(heap);
|
||||||
|
heap->extra_padding = bheap->extra_padding;
|
||||||
heap->no_reclaim = true; // don't reclaim abandoned pages or otherwise destroy is unsafe
|
heap->no_reclaim = true; // don't reclaim abandoned pages or otherwise destroy is unsafe
|
||||||
// push on the thread local heaps list
|
// push on the thread local heaps list
|
||||||
heap->next = heap->tld->heaps;
|
heap->next = heap->tld->heaps;
|
||||||
@ -211,6 +212,11 @@ uintptr_t _mi_heap_random_next(mi_heap_t* heap) {
|
|||||||
return _mi_random_next(&heap->random);
|
return _mi_random_next(&heap->random);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void mi_heap_set_extra_padding(mi_heap_t* heap, size_t extra_padding) {
|
||||||
|
if (extra_padding > 1*MiB) extra_padding = 1*MiB;
|
||||||
|
heap->extra_padding = extra_padding;
|
||||||
|
}
|
||||||
|
|
||||||
// zero out the page queues
|
// zero out the page queues
|
||||||
static void mi_heap_reset_pages(mi_heap_t* heap) {
|
static void mi_heap_reset_pages(mi_heap_t* heap) {
|
||||||
mi_assert_internal(mi_heap_is_initialized(heap));
|
mi_assert_internal(mi_heap_is_initialized(heap));
|
||||||
@ -580,13 +586,17 @@ static bool mi_heap_print_json_visit(const mi_heap_t* heap, const mi_heap_area_t
|
|||||||
_mi_fprintf(varg->out, varg->out_arg, varg->area_count==0 ? " {" : " ]\n}\n,{");
|
_mi_fprintf(varg->out, varg->out_arg, varg->area_count==0 ? " {" : " ]\n}\n,{");
|
||||||
varg->area_count++;
|
varg->area_count++;
|
||||||
varg->block_count = 0;
|
varg->block_count = 0;
|
||||||
_mi_fprintf(varg->out, varg->out_arg, "\"page\": %zu, \"start\": 0x%p, \"block_size\": %zu, \"used_size\": %zu,\n \"reserved\": %zu, \"committed\": %zu,", varg->area_count, area->blocks, area->block_size, area->used, area->reserved, area->committed);
|
_mi_fprintf(varg->out, varg->out_arg,
|
||||||
|
"\"page\": %zu, \"start\": 0x%p, \"block_size\": %zu, \"used_size\": %zu,\n \"reserved\": %zu, \"committed\": %zu,",
|
||||||
|
varg->area_count, area->blocks, area->block_size, area->used, area->reserved, area->committed);
|
||||||
_mi_fprintf(varg->out, varg->out_arg, " \"blocks\": [\n");
|
_mi_fprintf(varg->out, varg->out_arg, " \"blocks\": [\n");
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
_mi_fprintf(varg->out, varg->out_arg, varg->block_count==0 ? " {" : " ,{");
|
_mi_fprintf(varg->out, varg->out_arg, varg->block_count==0 ? " {" : " ,{");
|
||||||
varg->block_count++;
|
varg->block_count++;
|
||||||
_mi_fprintf(varg->out, varg->out_arg, "\"block\": 0x%p, \"valid\": %s, \"size\": %zu, \"usable_size\": %zu, \"allocated_size\": %zu,\n ", info->block, info->valid ? "true" : "false", info->size, info->usable_size, info->allocated_size);
|
_mi_fprintf(varg->out, varg->out_arg,
|
||||||
|
"\"block\": 0x%p, \"valid\": %s, \"size\": %zu, \"usable_size\": %zu, \"allocated_size\": %zu,\n ",
|
||||||
|
info->block, info->valid ? "true" : "false", info->size, info->usable_size, info->allocated_size);
|
||||||
int lineno;
|
int lineno;
|
||||||
const char* fname;
|
const char* fname;
|
||||||
void* ret = mi_source_unpack(info->source, &fname, &lineno);
|
void* ret = mi_source_unpack(info->source, &fname, &lineno);
|
||||||
@ -600,7 +610,7 @@ static bool mi_heap_print_json_visit(const mi_heap_t* heap, const mi_heap_area_t
|
|||||||
void mi_heap_print_json(mi_heap_t* heap, mi_output_fun* out, void* arg) {
|
void mi_heap_print_json(mi_heap_t* heap, mi_output_fun* out, void* arg) {
|
||||||
if (heap==NULL) heap = mi_heap_get_default();
|
if (heap==NULL) heap = mi_heap_get_default();
|
||||||
mi_print_json_t info = { 0, 0, out, arg };
|
mi_print_json_t info = { 0, 0, out, arg };
|
||||||
_mi_fprintf(info.out, info.out_arg, "{ \"heap\": 0x%p, \"thread_id\": 0x%zx, \"page_count\": %zu, \"block_padding\": %zu", heap, heap->thread_id, heap->page_count, mi_extra_padding() );
|
_mi_fprintf(info.out, info.out_arg, "{ \"heap\": 0x%p, \"thread_id\": 0x%zx, \"page_count\": %zu, \"block_padding\": %zu", heap, heap->thread_id, heap->page_count, mi_extra_padding(heap) );
|
||||||
_mi_fprintf(info.out, info.out_arg, ", \"pages\": [\n");
|
_mi_fprintf(info.out, info.out_arg, ", \"pages\": [\n");
|
||||||
mi_heap_visit_blocks(heap, true, &mi_heap_print_json_visit, &info);
|
mi_heap_visit_blocks(heap, true, &mi_heap_print_json_visit, &info);
|
||||||
_mi_fprintf(info.out, info.out_arg, info.area_count==0 ? "]\n" : " ] }\n] }\n");
|
_mi_fprintf(info.out, info.out_arg, info.area_count==0 ? "]\n" : " ] }\n] }\n");
|
||||||
|
@ -98,6 +98,7 @@ const mi_heap_t _mi_heap_empty = {
|
|||||||
{ {0}, {0}, 0 },
|
{ {0}, {0}, 0 },
|
||||||
0, // page count
|
0, // page count
|
||||||
MI_BIN_FULL, 0, // page retired min/max
|
MI_BIN_FULL, 0, // page retired min/max
|
||||||
|
0, // extra padding
|
||||||
NULL, // next
|
NULL, // next
|
||||||
false
|
false
|
||||||
};
|
};
|
||||||
@ -133,6 +134,7 @@ mi_heap_t _mi_heap_main = {
|
|||||||
{ {0x846ca68b}, {0}, 0 }, // random
|
{ {0x846ca68b}, {0}, 0 }, // random
|
||||||
0, // page count
|
0, // page count
|
||||||
MI_BIN_FULL, 0, // page retired min/max
|
MI_BIN_FULL, 0, // page retired min/max
|
||||||
|
0, // extra_padding
|
||||||
NULL, // next heap
|
NULL, // next heap
|
||||||
false // can reclaim
|
false // can reclaim
|
||||||
};
|
};
|
||||||
@ -149,6 +151,7 @@ static void mi_heap_main_init(void) {
|
|||||||
_mi_random_init(&_mi_heap_main.random);
|
_mi_random_init(&_mi_heap_main.random);
|
||||||
_mi_heap_main.keys[0] = _mi_heap_random_next(&_mi_heap_main);
|
_mi_heap_main.keys[0] = _mi_heap_random_next(&_mi_heap_main);
|
||||||
_mi_heap_main.keys[1] = _mi_heap_random_next(&_mi_heap_main);
|
_mi_heap_main.keys[1] = _mi_heap_random_next(&_mi_heap_main);
|
||||||
|
_mi_heap_main.extra_padding = mi_option_get(mi_option_debug_extra_padding);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -195,6 +198,7 @@ static bool _mi_heap_init(void) {
|
|||||||
heap->keys[0] = _mi_heap_random_next(heap);
|
heap->keys[0] = _mi_heap_random_next(heap);
|
||||||
heap->keys[1] = _mi_heap_random_next(heap);
|
heap->keys[1] = _mi_heap_random_next(heap);
|
||||||
heap->tld = tld;
|
heap->tld = tld;
|
||||||
|
heap->extra_padding = _mi_heap_main.extra_padding;
|
||||||
tld->heap_backing = heap;
|
tld->heap_backing = heap;
|
||||||
tld->heaps = heap;
|
tld->heaps = heap;
|
||||||
tld->segments.stats = &tld->stats;
|
tld->segments.stats = &tld->stats;
|
||||||
|
10
src/page.c
10
src/page.c
@ -792,7 +792,7 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) {
|
|||||||
|
|
||||||
// Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed.
|
// Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed.
|
||||||
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
|
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
|
||||||
void* _mi_malloc_generic(mi_heap_t* heap, size_t size MI_EXTRA_PADDING_XPARAM MI_SOURCE_XPARAM) mi_attr_noexcept
|
void* _mi_malloc_generic(mi_heap_t* heap, size_t size MI_SOURCE_XPARAM) mi_attr_noexcept
|
||||||
{
|
{
|
||||||
mi_assert_internal(heap != NULL);
|
mi_assert_internal(heap != NULL);
|
||||||
|
|
||||||
@ -811,8 +811,8 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size MI_EXTRA_PADDING_XPARAM
|
|||||||
|
|
||||||
// huge allocation?
|
// huge allocation?
|
||||||
mi_page_t* page;
|
mi_page_t* page;
|
||||||
const size_t req_size = size - MI_EXTRA_PADDING_ARG; // correct for padding_size in case of an overflow on `size`
|
const size_t req_size = size - mi_extra_padding(heap); // correct for padding_size in case of an overflow on `size`
|
||||||
if (mi_unlikely(req_size > (MI_LARGE_OBJ_SIZE_MAX - MI_EXTRA_PADDING_ARG) )) {
|
if (mi_unlikely(req_size > (MI_LARGE_OBJ_SIZE_MAX - mi_extra_padding(heap)) )) {
|
||||||
if (mi_unlikely(req_size > PTRDIFF_MAX)) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
|
if (mi_unlikely(req_size > PTRDIFF_MAX)) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
|
||||||
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu b requested)\n", req_size);
|
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu b requested)\n", req_size);
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -823,7 +823,7 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size MI_EXTRA_PADDING_XPARAM
|
|||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
// otherwise find a page with free blocks in our size segregated queues
|
// otherwise find a page with free blocks in our size segregated queues
|
||||||
mi_assert_internal(size >= MI_EXTRA_PADDING_ARG);
|
mi_assert_internal(size >= mi_extra_padding(heap));
|
||||||
page = mi_find_free_page(heap,size);
|
page = mi_find_free_page(heap,size);
|
||||||
}
|
}
|
||||||
if (mi_unlikely(page == NULL)) { // out of memory
|
if (mi_unlikely(page == NULL)) { // out of memory
|
||||||
@ -835,5 +835,5 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size MI_EXTRA_PADDING_XPARAM
|
|||||||
mi_assert_internal(mi_page_block_size(page) >= size);
|
mi_assert_internal(mi_page_block_size(page) >= size);
|
||||||
|
|
||||||
// and try again, this time succeeding! (i.e. this should never recurse)
|
// and try again, this time succeeding! (i.e. this should never recurse)
|
||||||
return _mi_page_malloc(heap, page, size MI_EXTRA_PADDING_XARG MI_SOURCE_XARG);
|
return _mi_page_malloc(heap, page, size MI_SOURCE_XARG);
|
||||||
}
|
}
|
||||||
|
@ -16,7 +16,7 @@ static void dangling_ptr_write();
|
|||||||
|
|
||||||
int main() {
|
int main() {
|
||||||
mi_version();
|
mi_version();
|
||||||
|
mi_heap_set_extra_padding(mi_heap_get_default(), 200);
|
||||||
// detect double frees and heap corruption
|
// detect double frees and heap corruption
|
||||||
// double_free1();
|
// double_free1();
|
||||||
// double_free2();
|
// double_free2();
|
||||||
|
@ -153,7 +153,10 @@ int main() {
|
|||||||
result = ok;
|
result = ok;
|
||||||
});
|
});
|
||||||
CHECK_BODY("malloc-aligned5", {
|
CHECK_BODY("malloc-aligned5", {
|
||||||
void* p = mi_malloc_aligned(4097,4096); size_t usable = mi_usable_size(p); result = usable >= 4097 && usable < 10000; mi_free(p);
|
void* p = mi_malloc_aligned(4097,4096);
|
||||||
|
size_t usable = mi_usable_size(p);
|
||||||
|
result = (usable >= 4097 && usable < 12000 && ((uintptr_t)p % 4096) == 0);
|
||||||
|
mi_free(p);
|
||||||
});
|
});
|
||||||
CHECK_BODY("malloc-aligned-at1", {
|
CHECK_BODY("malloc-aligned-at1", {
|
||||||
void* p = mi_malloc_aligned_at(48,32,0); result = (p != NULL && ((uintptr_t)(p) + 0) % 32 == 0); mi_free(p);
|
void* p = mi_malloc_aligned_at(48,32,0); result = (p != NULL && ((uintptr_t)(p) + 0) % 32 == 0); mi_free(p);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user