add MI_PADDING build option to add padding to each block to detect heap block overflows

This commit is contained in:
daan 2020-01-29 17:10:57 -08:00
parent 9a532037df
commit 5d212d688f
3 changed files with 57 additions and 8 deletions

View File

@ -12,6 +12,10 @@ terms of the MIT license. A copy of the license can be found in the file
#include <stdint.h> // uintptr_t, uint16_t, etc #include <stdint.h> // uintptr_t, uint16_t, etc
#include <mimalloc-atomic.h> // _Atomic #include <mimalloc-atomic.h> // _Atomic
// Minimal alignment necessary. On most platforms 16 bytes are needed
// due to SSE registers for example. This must be at least `MI_INTPTR_SIZE`
#define MI_MAX_ALIGN_SIZE 16 // sizeof(max_align_t)
// ------------------------------------------------------ // ------------------------------------------------------
// Variants // Variants
// ------------------------------------------------------ // ------------------------------------------------------
@ -50,6 +54,16 @@ terms of the MIT license. A copy of the license can be found in the file
#define MI_ENCODE_FREELIST 1 #define MI_ENCODE_FREELIST 1
#endif #endif
// Reserve extra padding at the end of each block; must be a multiple of `sizeof(intptr_t)`!
// If free lists are encoded, the padding is checked if it was modified on free.
#if (!defined(MI_PADDING))
#if (MI_SECURE>=3 || MI_DEBUG>=1)
#define MI_PADDING MI_MAX_ALIGN_SIZE
#else
#define MI_PADDING 0
#endif
#endif
// ------------------------------------------------------ // ------------------------------------------------------
// Platform specific values // Platform specific values
// ------------------------------------------------------ // ------------------------------------------------------
@ -113,10 +127,6 @@ terms of the MIT license. A copy of the license can be found in the file
#define MI_LARGE_OBJ_WSIZE_MAX (MI_LARGE_OBJ_SIZE_MAX/MI_INTPTR_SIZE) #define MI_LARGE_OBJ_WSIZE_MAX (MI_LARGE_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
#define MI_HUGE_OBJ_SIZE_MAX (2*MI_INTPTR_SIZE*MI_SEGMENT_SIZE) // (must match MI_REGION_MAX_ALLOC_SIZE in memory.c) #define MI_HUGE_OBJ_SIZE_MAX (2*MI_INTPTR_SIZE*MI_SEGMENT_SIZE) // (must match MI_REGION_MAX_ALLOC_SIZE in memory.c)
// Minimal alignment necessary. On most platforms 16 bytes are needed
// due to SSE registers for example. This must be at least `MI_INTPTR_SIZE`
#define MI_MAX_ALIGN_SIZE 16 // sizeof(max_align_t)
// Maximum number of size classes. (spaced exponentially in 12.5% increments) // Maximum number of size classes. (spaced exponentially in 12.5% increments)
#define MI_BIN_HUGE (73U) #define MI_BIN_HUGE (73U)

View File

@ -42,6 +42,11 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
size_t bin = _mi_bin(size); size_t bin = _mi_bin(size);
mi_heap_stat_increase(heap,normal[bin], 1); mi_heap_stat_increase(heap,normal[bin], 1);
} }
#endif
#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST)
mi_assert_internal((MI_PADDING % sizeof(mi_block_t*)) == 0);
mi_block_t* const padding = (mi_block_t*)((uint8_t*)block + page->xblock_size - MI_PADDING);
mi_block_set_nextx(page, padding, block, page->key[0], page->key[1]);
#endif #endif
return block; return block;
} }
@ -54,6 +59,9 @@ extern inline mi_decl_allocator void* mi_heap_malloc_small(mi_heap_t* heap, size
} }
extern inline mi_decl_allocator void* mi_malloc_small(size_t size) mi_attr_noexcept { extern inline mi_decl_allocator void* mi_malloc_small(size_t size) mi_attr_noexcept {
#if (MI_PADDING>0)
size += MI_PADDING;
#endif
return mi_heap_malloc_small(mi_get_default_heap(), size); return mi_heap_malloc_small(mi_get_default_heap(), size);
} }
@ -69,6 +77,9 @@ mi_decl_allocator void* mi_zalloc_small(size_t size) mi_attr_noexcept {
extern inline mi_decl_allocator void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept { extern inline mi_decl_allocator void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
mi_assert(heap!=NULL); mi_assert(heap!=NULL);
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
#if (MI_PADDING>0)
size += MI_PADDING;
#endif
void* p; void* p;
if (mi_likely(size <= MI_SMALL_SIZE_MAX)) { if (mi_likely(size <= MI_SMALL_SIZE_MAX)) {
p = mi_heap_malloc_small(heap, size); p = mi_heap_malloc_small(heap, size);
@ -99,11 +110,11 @@ void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size) {
if (page->is_zero) { if (page->is_zero) {
// already zero initialized memory? // already zero initialized memory?
((mi_block_t*)p)->next = 0; // clear the free list pointer ((mi_block_t*)p)->next = 0; // clear the free list pointer
mi_assert_expensive(mi_mem_is_zero(p, mi_page_block_size(page))); mi_assert_expensive(mi_mem_is_zero(p, mi_page_block_size(page) - MI_PADDING));
} }
else { else {
// otherwise memset // otherwise memset
memset(p, 0, mi_page_block_size(page)); memset(p, 0, mi_page_block_size(page) - MI_PADDING);
} }
} }
@ -171,6 +182,20 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block
} }
#endif #endif
#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST)
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
mi_block_t* const padding = (mi_block_t*)((uint8_t*)block + page->xblock_size - MI_PADDING);
mi_block_t* const decoded = mi_block_nextx(page, padding, page->key[0], page->key[1]);
if (decoded != block) {
_mi_error_message(EINVAL, "buffer overflow in heap block %p: write after %zu bytes\n", block, page->xblock_size);
}
}
#else
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
UNUSED(page);
UNUSED(block);
}
#endif
// ------------------------------------------------------ // ------------------------------------------------------
// Free // Free
@ -214,6 +239,8 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
return; return;
} }
mi_check_padding(page, block);
mi_thread_free_t tfree; mi_thread_free_t tfree;
mi_thread_free_t tfreex; mi_thread_free_t tfreex;
bool use_delayed; bool use_delayed;
@ -258,13 +285,14 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block) static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block)
{ {
#if (MI_DEBUG) #if (MI_DEBUG)
memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); memset(block, MI_DEBUG_FREED, mi_page_block_size(page) - MI_PADDING);
#endif #endif
// and push it on the free list // and push it on the free list
if (mi_likely(local)) { if (mi_likely(local)) {
// owning thread can free a block directly // owning thread can free a block directly
if (mi_unlikely(mi_check_is_double_free(page, block))) return; if (mi_unlikely(mi_check_is_double_free(page, block))) return;
mi_check_padding(page, block);
mi_block_set_next(page, block, page->local_free); mi_block_set_next(page, block, page->local_free);
page->local_free = block; page->local_free = block;
page->used--; page->used--;
@ -341,6 +369,7 @@ void mi_free(void* p) mi_attr_noexcept
// local, and not full or aligned // local, and not full or aligned
mi_block_t* const block = (mi_block_t*)p; mi_block_t* const block = (mi_block_t*)p;
if (mi_unlikely(mi_check_is_double_free(page,block))) return; if (mi_unlikely(mi_check_is_double_free(page,block))) return;
mi_check_padding(page, block);
mi_block_set_next(page, block, page->local_free); mi_block_set_next(page, block, page->local_free);
page->local_free = block; page->local_free = block;
page->used--; page->used--;
@ -381,8 +410,11 @@ bool _mi_free_delayed_block(mi_block_t* block) {
size_t mi_usable_size(const void* p) mi_attr_noexcept { size_t mi_usable_size(const void* p) mi_attr_noexcept {
if (p==NULL) return 0; if (p==NULL) return 0;
const mi_segment_t* segment = _mi_ptr_segment(p); const mi_segment_t* segment = _mi_ptr_segment(p);
const mi_page_t* page = _mi_segment_page_of(segment,p); const mi_page_t* page = _mi_segment_page_of(segment, p);
size_t size = mi_page_block_size(page); size_t size = mi_page_block_size(page);
#if defined(MI_PADDING)
size -= MI_PADDING;
#endif
if (mi_unlikely(mi_page_has_aligned(page))) { if (mi_unlikely(mi_page_has_aligned(page))) {
ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)_mi_page_ptr_unalign(segment,page,p); ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)_mi_page_ptr_unalign(segment,page,p);
mi_assert_internal(adjust >= 0 && (size_t)adjust <= size); mi_assert_internal(adjust >= 0 && (size_t)adjust <= size);

View File

@ -10,6 +10,7 @@
static void double_free1(); static void double_free1();
static void double_free2(); static void double_free2();
static void corrupt_free(); static void corrupt_free();
static void block_overflow1();
int main() { int main() {
mi_version(); mi_version();
@ -18,6 +19,7 @@ int main() {
// double_free1(); // double_free1();
// double_free2(); // double_free2();
// corrupt_free(); // corrupt_free();
// block_overflow1();
void* p1 = malloc(78); void* p1 = malloc(78);
void* p2 = malloc(24); void* p2 = malloc(24);
@ -41,6 +43,11 @@ int main() {
return 0; return 0;
} }
static void block_overflow1() {
void* p = mi_malloc(16);
memset(p, 0, 17);
free(p);
}
// The double free samples come ArcHeap [1] by Insu Yun (issue #161) // The double free samples come ArcHeap [1] by Insu Yun (issue #161)
// [1]: https://arxiv.org/pdf/1903.00503.pdf // [1]: https://arxiv.org/pdf/1903.00503.pdf