mirror of
https://github.com/microsoft/mimalloc.git
synced 2024-12-27 13:33:18 +08:00
add MI_PADDING build option to add padding to each block to detect heap block overflows
This commit is contained in:
parent
9a532037df
commit
5d212d688f
@ -12,6 +12,10 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||
#include <stdint.h> // uintptr_t, uint16_t, etc
|
||||
#include <mimalloc-atomic.h> // _Atomic
|
||||
|
||||
// Minimal alignment necessary. On most platforms 16 bytes are needed
|
||||
// due to SSE registers for example. This must be at least `MI_INTPTR_SIZE`
|
||||
#define MI_MAX_ALIGN_SIZE 16 // sizeof(max_align_t)
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Variants
|
||||
// ------------------------------------------------------
|
||||
@ -50,6 +54,16 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||
#define MI_ENCODE_FREELIST 1
|
||||
#endif
|
||||
|
||||
// Reserve extra padding at the end of each block; must be a multiple of `sizeof(intptr_t)`!
|
||||
// If free lists are encoded, the padding is checked if it was modified on free.
|
||||
#if (!defined(MI_PADDING))
|
||||
#if (MI_SECURE>=3 || MI_DEBUG>=1)
|
||||
#define MI_PADDING MI_MAX_ALIGN_SIZE
|
||||
#else
|
||||
#define MI_PADDING 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Platform specific values
|
||||
// ------------------------------------------------------
|
||||
@ -113,10 +127,6 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||
#define MI_LARGE_OBJ_WSIZE_MAX (MI_LARGE_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
|
||||
#define MI_HUGE_OBJ_SIZE_MAX (2*MI_INTPTR_SIZE*MI_SEGMENT_SIZE) // (must match MI_REGION_MAX_ALLOC_SIZE in memory.c)
|
||||
|
||||
// Minimal alignment necessary. On most platforms 16 bytes are needed
|
||||
// due to SSE registers for example. This must be at least `MI_INTPTR_SIZE`
|
||||
#define MI_MAX_ALIGN_SIZE 16 // sizeof(max_align_t)
|
||||
|
||||
// Maximum number of size classes. (spaced exponentially in 12.5% increments)
|
||||
#define MI_BIN_HUGE (73U)
|
||||
|
||||
|
40
src/alloc.c
40
src/alloc.c
@ -42,6 +42,11 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
|
||||
size_t bin = _mi_bin(size);
|
||||
mi_heap_stat_increase(heap,normal[bin], 1);
|
||||
}
|
||||
#endif
|
||||
#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST)
|
||||
mi_assert_internal((MI_PADDING % sizeof(mi_block_t*)) == 0);
|
||||
mi_block_t* const padding = (mi_block_t*)((uint8_t*)block + page->xblock_size - MI_PADDING);
|
||||
mi_block_set_nextx(page, padding, block, page->key[0], page->key[1]);
|
||||
#endif
|
||||
return block;
|
||||
}
|
||||
@ -54,6 +59,9 @@ extern inline mi_decl_allocator void* mi_heap_malloc_small(mi_heap_t* heap, size
|
||||
}
|
||||
|
||||
extern inline mi_decl_allocator void* mi_malloc_small(size_t size) mi_attr_noexcept {
|
||||
#if (MI_PADDING>0)
|
||||
size += MI_PADDING;
|
||||
#endif
|
||||
return mi_heap_malloc_small(mi_get_default_heap(), size);
|
||||
}
|
||||
|
||||
@ -69,6 +77,9 @@ mi_decl_allocator void* mi_zalloc_small(size_t size) mi_attr_noexcept {
|
||||
extern inline mi_decl_allocator void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
|
||||
mi_assert(heap!=NULL);
|
||||
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
|
||||
#if (MI_PADDING>0)
|
||||
size += MI_PADDING;
|
||||
#endif
|
||||
void* p;
|
||||
if (mi_likely(size <= MI_SMALL_SIZE_MAX)) {
|
||||
p = mi_heap_malloc_small(heap, size);
|
||||
@ -99,11 +110,11 @@ void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size) {
|
||||
if (page->is_zero) {
|
||||
// already zero initialized memory?
|
||||
((mi_block_t*)p)->next = 0; // clear the free list pointer
|
||||
mi_assert_expensive(mi_mem_is_zero(p, mi_page_block_size(page)));
|
||||
mi_assert_expensive(mi_mem_is_zero(p, mi_page_block_size(page) - MI_PADDING));
|
||||
}
|
||||
else {
|
||||
// otherwise memset
|
||||
memset(p, 0, mi_page_block_size(page));
|
||||
memset(p, 0, mi_page_block_size(page) - MI_PADDING);
|
||||
}
|
||||
}
|
||||
|
||||
@ -171,6 +182,20 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block
|
||||
}
|
||||
#endif
|
||||
|
||||
#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST)
|
||||
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
|
||||
mi_block_t* const padding = (mi_block_t*)((uint8_t*)block + page->xblock_size - MI_PADDING);
|
||||
mi_block_t* const decoded = mi_block_nextx(page, padding, page->key[0], page->key[1]);
|
||||
if (decoded != block) {
|
||||
_mi_error_message(EINVAL, "buffer overflow in heap block %p: write after %zu bytes\n", block, page->xblock_size);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
|
||||
UNUSED(page);
|
||||
UNUSED(block);
|
||||
}
|
||||
#endif
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Free
|
||||
@ -214,6 +239,8 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
|
||||
return;
|
||||
}
|
||||
|
||||
mi_check_padding(page, block);
|
||||
|
||||
mi_thread_free_t tfree;
|
||||
mi_thread_free_t tfreex;
|
||||
bool use_delayed;
|
||||
@ -258,13 +285,14 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
|
||||
static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block)
|
||||
{
|
||||
#if (MI_DEBUG)
|
||||
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
|
||||
memset(block, MI_DEBUG_FREED, mi_page_block_size(page) - MI_PADDING);
|
||||
#endif
|
||||
|
||||
// and push it on the free list
|
||||
if (mi_likely(local)) {
|
||||
// owning thread can free a block directly
|
||||
if (mi_unlikely(mi_check_is_double_free(page, block))) return;
|
||||
mi_check_padding(page, block);
|
||||
mi_block_set_next(page, block, page->local_free);
|
||||
page->local_free = block;
|
||||
page->used--;
|
||||
@ -341,6 +369,7 @@ void mi_free(void* p) mi_attr_noexcept
|
||||
// local, and not full or aligned
|
||||
mi_block_t* const block = (mi_block_t*)p;
|
||||
if (mi_unlikely(mi_check_is_double_free(page,block))) return;
|
||||
mi_check_padding(page, block);
|
||||
mi_block_set_next(page, block, page->local_free);
|
||||
page->local_free = block;
|
||||
page->used--;
|
||||
@ -381,8 +410,11 @@ bool _mi_free_delayed_block(mi_block_t* block) {
|
||||
size_t mi_usable_size(const void* p) mi_attr_noexcept {
|
||||
if (p==NULL) return 0;
|
||||
const mi_segment_t* segment = _mi_ptr_segment(p);
|
||||
const mi_page_t* page = _mi_segment_page_of(segment,p);
|
||||
const mi_page_t* page = _mi_segment_page_of(segment, p);
|
||||
size_t size = mi_page_block_size(page);
|
||||
#if defined(MI_PADDING)
|
||||
size -= MI_PADDING;
|
||||
#endif
|
||||
if (mi_unlikely(mi_page_has_aligned(page))) {
|
||||
ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)_mi_page_ptr_unalign(segment,page,p);
|
||||
mi_assert_internal(adjust >= 0 && (size_t)adjust <= size);
|
||||
|
@ -10,6 +10,7 @@
|
||||
static void double_free1();
|
||||
static void double_free2();
|
||||
static void corrupt_free();
|
||||
static void block_overflow1();
|
||||
|
||||
int main() {
|
||||
mi_version();
|
||||
@ -18,6 +19,7 @@ int main() {
|
||||
// double_free1();
|
||||
// double_free2();
|
||||
// corrupt_free();
|
||||
// block_overflow1();
|
||||
|
||||
void* p1 = malloc(78);
|
||||
void* p2 = malloc(24);
|
||||
@ -41,6 +43,11 @@ int main() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void block_overflow1() {
|
||||
void* p = mi_malloc(16);
|
||||
memset(p, 0, 17);
|
||||
free(p);
|
||||
}
|
||||
|
||||
// The double free samples come ArcHeap [1] by Insu Yun (issue #161)
|
||||
// [1]: https://arxiv.org/pdf/1903.00503.pdf
|
||||
|
Loading…
x
Reference in New Issue
Block a user