Merge branch 'dev' into dev-debug

This commit is contained in:
daan 2020-05-02 22:23:22 -07:00
commit faa67ff30b
4 changed files with 31 additions and 3 deletions

View File

@ -212,6 +212,7 @@ endif()
# static library
add_library(mimalloc-static STATIC ${mi_sources})
set_property(TARGET mimalloc-static PROPERTY POSITION_INDEPENDENT_CODE ON)
target_compile_definitions(mimalloc-static PRIVATE ${mi_defines} MI_STATIC_LIB)
target_compile_options(mimalloc-static PRIVATE ${mi_cflags})
target_link_libraries(mimalloc-static PUBLIC ${mi_libraries})
@ -248,6 +249,7 @@ endif()
# single object file for more predictable static overriding
add_library(mimalloc-obj OBJECT src/static.c)
set_property(TARGET mimalloc-obj PROPERTY POSITION_INDEPENDENT_CODE ON)
target_compile_definitions(mimalloc-obj PRIVATE ${mi_defines})
target_compile_options(mimalloc-obj PRIVATE ${mi_cflags})
target_include_directories(mimalloc-obj PUBLIC

View File

@ -255,6 +255,32 @@ OS will copy the entire 1GiB huge page (or 2MiB large page) which can cause the
[linux-huge]: https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/5/html/tuning_and_optimizing_red_hat_enterprise_linux_for_oracle_9i_and_10g_databases/sect-oracle_9i_and_10g_tuning_guide-large_memory_optimization_big_pages_and_huge_pages-configuring_huge_pages_in_red_hat_enterprise_linux_4_or_5
[windows-huge]: https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/enable-the-lock-pages-in-memory-option-windows?view=sql-server-2017
## Secure Mode
_mimalloc_ can be build in secure mode by using the `-DMI_SECURE=ON` flags in `cmake`. This build enables various mitigations
to make mimalloc more robust against exploits. In particular:
- All internal mimalloc pages are surrounded by guard pages and the heap metadata is behind a guard page as well (so a buffer overflow
exploit cannot reach into the metadata),
- All free list pointers are
[encoded](https://github.com/microsoft/mimalloc/blob/783e3377f79ee82af43a0793910a9f2d01ac7863/include/mimalloc-internal.h#L396)
with per-page keys which is used both to prevent overwrites with a known pointer, as well as to detect heap corruption,
- Double free's are detected (and ignored),
- The free lists are initialized in a random order and allocation randomly chooses between extension and reuse within a page to
mitigate against attacks that rely on a predicable allocation order. Similarly, the larger heap blocks allocated by mimalloc
from the OS are also address randomized.
As always, evaluate with care as part of an overall security strategy as all of the above are mitigations but not guarantees.
## Debug Mode
When _mimalloc_ is built using debug mode, various checks are done at runtime to catch development errors.
- Statistics are maintained in detail for each object size. They can be shown using `MIMALLOC_SHOW_STATS=1` at runtime.
- All objects have padding at the end to detect (byte precise) heap block overflows.
- Double free's, and freeing invalid heap pointers are detected.
- Corrupted free-lists and some forms of use-after-free are detected.
# Overriding Malloc

View File

@ -103,7 +103,7 @@ terms of the MIT license. A copy of the license can be found in the file
void operator delete[](void* p, std::size_t n) noexcept MI_FORWARD02(mi_free_size,p,n);
#endif
#if (__cplusplus > 201402L || defined(__cpp_aligned_new)) && (!defined(__GNUC__) || (__GNUC__ > 5))
#if (__cplusplus > 201402L && defined(__cpp_aligned_new)) && (!defined(__GNUC__) || (__GNUC__ > 5))
void operator delete (void* p, std::align_val_t al) noexcept { mi_free_aligned(p, static_cast<size_t>(al)); }
void operator delete[](void* p, std::align_val_t al) noexcept { mi_free_aligned(p, static_cast<size_t>(al)); }
void operator delete (void* p, std::size_t n, std::align_val_t al) noexcept { mi_free_size_aligned(p, n, static_cast<size_t>(al)); };

View File

@ -381,7 +381,7 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
}
#define MI_MAX_RETIRE_SIZE MI_LARGE_OBJ_SIZE_MAX
#define MI_RETIRE_CYCLES (16)
#define MI_RETIRE_CYCLES (8)
// Retire a page with no more used blocks
// Important to not retire too quickly though as new
@ -406,7 +406,7 @@ void _mi_page_retire(mi_page_t* page) {
if (mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_is_in_full(page))) {
if (pq->last==page && pq->first==page) { // the only page in the queue?
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
page->retire_expire = MI_RETIRE_CYCLES;
page->retire_expire = (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
mi_heap_t* heap = mi_page_heap(page);
mi_assert_internal(pq >= heap->pages);
const size_t index = pq - heap->pages;