merge from dev-win

This commit is contained in:
daan 2019-10-28 12:33:01 -07:00
commit 9d4f57abf3
11 changed files with 147 additions and 64 deletions

View File

@ -67,7 +67,7 @@ endif()
if(MI_SECURE MATCHES "ON")
message(STATUS "Set secure build (MI_SECURE=ON)")
list(APPEND mi_defines MI_SECURE=2)
list(APPEND mi_defines MI_SECURE=3)
endif()
if(MI_SEE_ASM MATCHES "ON")

View File

@ -116,7 +116,7 @@
<SDLCheck>true</SDLCheck>
<ConformanceMode>true</ConformanceMode>
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
<PreprocessorDefinitions>MI_DEBUG=3;%(PreprocessorDefinitions);</PreprocessorDefinitions>
<PreprocessorDefinitions>MI_DEBUG=1;%(PreprocessorDefinitions);</PreprocessorDefinitions>
<CompileAs>CompileAsCpp</CompileAs>
<SupportJustMyCode>false</SupportJustMyCode>
<LanguageStandard>stdcpp17</LanguageStandard>

View File

@ -20,6 +20,18 @@ terms of the MIT license. A copy of the license can be found in the file
#define mi_trace_message(...)
#endif
#if defined(_MSC_VER)
#define mi_decl_noinline __declspec(noinline)
#define mi_attr_noreturn
#elif defined(__GNUC__) || defined(__clang__)
#define mi_decl_noinline __attribute__((noinline))
#define mi_attr_noreturn __attribute__((noreturn))
#else
#define mi_decl_noinline
#define mi_attr_noreturn
#endif
// "options.c"
void _mi_fputs(mi_output_fun* out, const char* prefix, const char* message);
void _mi_fprintf(mi_output_fun* out, const char* fmt, ...);
@ -28,12 +40,12 @@ void _mi_warning_message(const char* fmt, ...);
void _mi_verbose_message(const char* fmt, ...);
void _mi_trace_message(const char* fmt, ...);
void _mi_options_init(void);
void _mi_fatal_error(const char* fmt, ...) mi_attr_noreturn;
// "init.c"
extern mi_stats_t _mi_stats_main;
extern const mi_page_t _mi_page_empty;
bool _mi_is_main_thread(void);
uintptr_t _mi_ptr_cookie(const void* p);
uintptr_t _mi_random_shuffle(uintptr_t x);
uintptr_t _mi_random_init(uintptr_t seed /* can be zero */);
bool _mi_preloading(); // true while the C runtime is not ready
@ -135,13 +147,6 @@ bool _mi_page_is_valid(mi_page_t* page);
#define __has_builtin(x) 0
#endif
#if defined(_MSC_VER)
#define mi_decl_noinline __declspec(noinline)
#elif defined(__GNUC__) || defined(__clang__)
#define mi_decl_noinline __attribute__((noinline))
#else
#define mi_decl_noinline
#endif
/* -----------------------------------------------------------
@ -254,6 +259,10 @@ static inline bool mi_heap_is_initialized(mi_heap_t* heap) {
return (heap != &_mi_heap_empty);
}
static inline uintptr_t _mi_ptr_cookie(const void* p) {
return ((uintptr_t)p ^ _mi_heap_main.cookie);
}
/* -----------------------------------------------------------
Pages
----------------------------------------------------------- */
@ -401,8 +410,12 @@ static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) {
// Encoding/Decoding the free list next pointers
// -------------------------------------------------------------------
static inline mi_block_t* mi_block_nextx( uintptr_t cookie, mi_block_t* block ) {
#if MI_SECURE
static inline bool mi_is_in_same_segment(const void* p, const void* q) {
return (_mi_ptr_segment(p) == _mi_ptr_segment(q));
}
static inline mi_block_t* mi_block_nextx( uintptr_t cookie, const mi_block_t* block ) {
#if MI_SECURE
return (mi_block_t*)(block->next ^ cookie);
#else
UNUSED(cookie);
@ -410,7 +423,7 @@ static inline mi_block_t* mi_block_nextx( uintptr_t cookie, mi_block_t* block )
#endif
}
static inline void mi_block_set_nextx(uintptr_t cookie, mi_block_t* block, mi_block_t* next) {
static inline void mi_block_set_nextx(uintptr_t cookie, mi_block_t* block, const mi_block_t* next) {
#if MI_SECURE
block->next = (mi_encoded_t)next ^ cookie;
#else
@ -419,16 +432,25 @@ static inline void mi_block_set_nextx(uintptr_t cookie, mi_block_t* block, mi_bl
#endif
}
static inline mi_block_t* mi_block_next(mi_page_t* page, mi_block_t* block) {
static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* block) {
#if MI_SECURE
return mi_block_nextx(page->cookie,block);
mi_block_t* next = mi_block_nextx(page->cookie,block);
#if MI_SECURE >= 4
// check if next is at least in our segment range
// TODO: it is better to check if it is actually inside our page but that is more expensive
// to calculate. Perhaps with a relative free list this becomes feasible?
if (next!=NULL && !mi_is_in_same_segment(block, next)) {
_mi_fatal_error("corrupted free list entry at %p: %zx\n", block, (uintptr_t)next);
}
#endif
return next;
#else
UNUSED(page);
return mi_block_nextx(0, block);
#endif
}
static inline void mi_block_set_next(mi_page_t* page, mi_block_t* block, mi_block_t* next) {
static inline void mi_block_set_next(const mi_page_t* page, mi_block_t* block, const mi_block_t* next) {
#if MI_SECURE
mi_block_set_nextx(page->cookie,block,next);
#else

View File

@ -22,8 +22,11 @@ terms of the MIT license. A copy of the license can be found in the file
// Define MI_STAT as 1 to maintain statistics; set it to 2 to have detailed statistics (but costs some performance).
// #define MI_STAT 1
// Define MI_SECURE as 1 to encode free lists
// #define MI_SECURE 1
// Define MI_SECURE to enable security mitigations
// #define MI_SECURE 1 // guard page around metadata
// #define MI_SECURE 2 // guard page around each mimalloc page
// #define MI_SECURE 3 // encode free lists
// #define MI_SECURE 4 // all security enabled (checks for double free, corrupted free list and invalid pointer free)
#if !defined(MI_SECURE)
#define MI_SECURE 0

View File

@ -124,10 +124,54 @@ mi_decl_allocator void* mi_zalloc(size_t size) mi_attr_noexcept {
}
// ------------------------------------------------------
// Check for double free in secure mode
// ------------------------------------------------------
#if MI_SECURE>=4
static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, const mi_block_t* elem) {
while (list != NULL) {
if (elem==list) return true;
list = mi_block_next(page, list);
}
return false;
}
static mi_decl_noinline bool mi_check_double_freex(const mi_page_t* page, const mi_block_t* block, const mi_block_t* n) {
size_t psize;
uint8_t* pstart = _mi_page_start(_mi_page_segment(page), page, &psize);
if (n == NULL || ((uint8_t*)n >= pstart && (uint8_t*)n < (pstart + psize))) {
// Suspicious: the decoded value is in the same page (or NULL).
// Walk the free lists to see if it is already freed
if (mi_list_contains(page, page->free, block) ||
mi_list_contains(page, page->local_free, block) ||
mi_list_contains(page, (const mi_block_t*)mi_atomic_read_ptr_relaxed(mi_atomic_cast(void*,&page->thread_free)), block))
{
_mi_fatal_error("double free detected of block %p with size %zu\n", block, page->block_size);
return true;
}
}
return false;
}
static inline bool mi_check_double_free(const mi_page_t* page, const mi_block_t* block) {
mi_block_t* n = (mi_block_t*)(block->next ^ page->cookie);
if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check
(n==NULL || mi_is_in_same_segment(block, n)))
{
// Suspicous: decoded value in block is in the same segment (or NULL) -- maybe a double free?
return mi_check_double_freex(page, block, n);
}
return false;
}
#endif
// ------------------------------------------------------
// Free
// ------------------------------------------------------
// multi-threaded free
static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block)
{
@ -251,14 +295,16 @@ void mi_free(void* p) mi_attr_noexcept
#if (MI_DEBUG>0)
if (mi_unlikely(!mi_is_in_heap_region(p))) {
_mi_warning_message("possibly trying to mi_free a pointer that does not point to a valid heap region: 0x%p\n"
_mi_warning_message("possibly trying to free a pointer that does not point to a valid heap region: 0x%p\n"
"(this may still be a valid very large allocation (over 64MiB))\n", p);
if (mi_likely(_mi_ptr_cookie(segment) == segment->cookie)) {
_mi_warning_message("(yes, the previous pointer 0x%p was valid after all)\n", p);
}
}
#endif
#if (MI_DEBUG>0 || MI_SECURE>=4)
if (mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie)) {
_mi_error_message("trying to mi_free a pointer that does not point to a valid heap space: %p\n", p);
_mi_error_message("trying to free a pointer that does not point to a valid heap space: %p\n", p);
return;
}
#endif
@ -278,6 +324,9 @@ void mi_free(void* p) mi_attr_noexcept
if (mi_likely(tid == segment->thread_id && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks
// local, and not full or aligned
mi_block_t* block = (mi_block_t*)p;
#if MI_SECURE>=4
if (mi_check_double_free(page,block)) return;
#endif
mi_block_set_next(page, block, page->local_free);
page->local_free = block;
page->used--;

View File

@ -208,10 +208,6 @@ uintptr_t _mi_random_init(uintptr_t seed /* can be zero */) {
return x;
}
uintptr_t _mi_ptr_cookie(const void* p) {
return ((uintptr_t)p ^ _mi_heap_main.cookie);
}
/* -----------------------------------------------------------
Initialization and freeing of the thread local heaps
----------------------------------------------------------- */

View File

@ -139,7 +139,7 @@ Commit from a region
// Returns `false` on an error (OOM); `true` otherwise. `p` and `id` are only written
// if the blocks were successfully claimed so ensure they are initialized to NULL/SIZE_MAX before the call.
// (not being able to claim is not considered an error so check for `p != NULL` afterwards).
static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bitidx, size_t blocks,
static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bitidx, size_t blocks,
size_t size, bool* commit, bool* allow_large, bool* is_zero, void** p, size_t* id, mi_os_tld_t* tld)
{
size_t mask = mi_region_block_mask(blocks,bitidx);
@ -149,7 +149,7 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit
// ensure the region is reserved
mi_region_info_t info = mi_atomic_read(&region->info);
if (info == 0)
if (info == 0)
{
bool region_commit = mi_option_is_enabled(mi_option_eager_region_commit);
bool region_large = *allow_large;
@ -202,7 +202,7 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit
// Commit the blocks to memory
bool region_is_committed = false;
bool region_is_large = false;
void* start = mi_region_info_read(info,&region_is_large,&region_is_committed);
void* start = mi_region_info_read(info,&region_is_large,&region_is_committed);
mi_assert_internal(!(region_is_large && !*allow_large));
mi_assert_internal(start!=NULL);
@ -215,7 +215,7 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit
void* blocks_start = (uint8_t*)start + (bitidx * MI_SEGMENT_SIZE);
if (*commit && !region_is_committed) {
// ensure commit
// ensure commit
bool commit_zero = false;
_mi_os_commit(blocks_start, mi_good_commit_size(size), &commit_zero, tld->stats); // only commit needed size (unless using large OS pages)
if (commit_zero) *is_zero = true;
@ -225,7 +225,7 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit
*commit = true;
}
// and return the allocation
// and return the allocation
mi_assert_internal(blocks_start != NULL);
*allow_large = region_is_large;
*p = blocks_start;
@ -271,7 +271,7 @@ static inline size_t mi_bsr(uintptr_t x) {
// Returns `false` on an error (OOM); `true` otherwise. `p` and `id` are only written
// if the blocks were successfully claimed so ensure they are initialized to NULL/SIZE_MAX before the call.
// (not being able to claim is not considered an error so check for `p != NULL` afterwards).
static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t blocks, size_t size,
static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t blocks, size_t size,
bool* commit, bool* allow_large, bool* is_zero, void** p, size_t* id, mi_os_tld_t* tld)
{
mi_assert_internal(p != NULL && id != NULL);
@ -303,7 +303,7 @@ static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t bloc
else {
// success, we claimed the bits
// now commit the block memory -- this can still fail
return mi_region_commit_blocks(region, idx, bitidx, blocks,
return mi_region_commit_blocks(region, idx, bitidx, blocks,
size, commit, allow_large, is_zero, p, id, tld);
}
}
@ -327,20 +327,20 @@ static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t bloc
// Returns `false` on an error (OOM); `true` otherwise. `p` and `id` are only written
// if the blocks were successfully claimed so ensure they are initialized to NULL/0 before the call.
// (not being able to claim is not considered an error so check for `p != NULL` afterwards).
static bool mi_region_try_alloc_blocks(size_t idx, size_t blocks, size_t size,
bool* commit, bool* allow_large, bool* is_zero,
static bool mi_region_try_alloc_blocks(size_t idx, size_t blocks, size_t size,
bool* commit, bool* allow_large, bool* is_zero,
void** p, size_t* id, mi_os_tld_t* tld)
{
// check if there are available blocks in the region..
mi_assert_internal(idx < MI_REGION_MAX);
mem_region_t* region = &regions[idx];
uintptr_t m = mi_atomic_read_relaxed(&region->map);
if (m != MI_REGION_MAP_FULL) { // some bits are zero
if (m != MI_REGION_MAP_FULL) { // some bits are zero
bool ok = (*commit || *allow_large); // committing or allow-large is always ok
if (!ok) {
// otherwise skip incompatible regions if possible.
// otherwise skip incompatible regions if possible.
// this is not guaranteed due to multiple threads allocating at the same time but
// that's ok. In secure mode, large is never allowed for any thread, so that works out;
// that's ok. In secure mode, large is never allowed for any thread, so that works out;
// otherwise we might just not be able to reset/decommit individual pages sometimes.
mi_region_info_t info = mi_atomic_read_relaxed(&region->info);
bool is_large;
@ -361,7 +361,7 @@ static bool mi_region_try_alloc_blocks(size_t idx, size_t blocks, size_t size,
// Allocate `size` memory aligned at `alignment`. Return non NULL on success, with a given memory `id`.
// (`id` is abstract, but `id = idx*MI_REGION_MAP_BITS + bitidx`)
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_zero,
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_zero,
size_t* id, mi_os_tld_t* tld)
{
mi_assert_internal(id != NULL && tld != NULL);
@ -369,7 +369,7 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* l
*id = SIZE_MAX;
*is_zero = false;
bool default_large = false;
if (large==NULL) large = &default_large; // ensure `large != NULL`
if (large==NULL) large = &default_large; // ensure `large != NULL`
// use direct OS allocation for huge blocks or alignment (with `id = SIZE_MAX`)
if (size > MI_REGION_MAX_ALLOC_SIZE || alignment > MI_SEGMENT_ALIGN) {
@ -465,7 +465,7 @@ void _mi_mem_free(void* p, size_t size, size_t id, mi_stats_t* stats) {
// _mi_os_decommit(p,size,stats); // if !is_eager_committed (and clear dirty bits)
}
// else { _mi_os_reset(p,size,stats); }
}
}
if (!is_eager_committed) {
// adjust commit statistics as we commit again when re-using the same slot
_mi_stat_decrease(&stats->committed, mi_good_commit_size(size));

View File

@ -285,6 +285,14 @@ void _mi_assert_fail(const char* assertion, const char* fname, unsigned line, co
}
#endif
mi_attr_noreturn void _mi_fatal_error(const char* fmt, ...) {
va_list args;
va_start(args, fmt);
mi_vfprintf(NULL, "mimalloc: fatal: ", fmt, args);
va_end(args);
exit(99);
}
// --------------------------------------------------------
// Initialize options by checking the environment
// --------------------------------------------------------

View File

@ -95,7 +95,7 @@ typedef NTSTATUS (__stdcall *PNtAllocateVirtualMemoryEx)(HANDLE, PVOID*, SIZE_T*
static PVirtualAlloc2 pVirtualAlloc2 = NULL;
static PNtAllocateVirtualMemoryEx pNtAllocateVirtualMemoryEx = NULL;
static bool mi_win_enable_large_os_pages()
static bool mi_win_enable_large_os_pages()
{
if (large_os_page_size > 0) return true;
@ -149,7 +149,7 @@ void _mi_os_init(void) {
if (hDll != NULL) {
pNtAllocateVirtualMemoryEx = (PNtAllocateVirtualMemoryEx)(void (*)(void))GetProcAddress(hDll, "NtAllocateVirtualMemoryEx");
FreeLibrary(hDll);
}
}
if (mi_option_is_enabled(mi_option_large_os_pages) || mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
mi_win_enable_large_os_pages();
}
@ -189,7 +189,7 @@ static bool mi_os_mem_free(void* addr, size_t size, bool was_committed, mi_stats
#else
err = (munmap(addr, size) == -1);
#endif
if (was_committed) _mi_stat_decrease(&stats->committed, size);
if (was_committed) _mi_stat_decrease(&stats->committed, size);
_mi_stat_decrease(&stats->reserved, size);
if (err) {
#pragma warning(suppress:4996)
@ -233,14 +233,14 @@ static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment
}
}
#endif
#if (MI_INTPTR_SIZE >= 8)
#if (MI_INTPTR_SIZE >= 8)
// on 64-bit systems, try to use the virtual address area after 4TiB for 4MiB aligned allocations
void* hint;
if (addr == NULL && (hint = mi_os_get_aligned_hint(try_alignment,size)) != NULL) {
return VirtualAlloc(hint, size, flags, PAGE_READWRITE);
}
#endif
#if defined(MEM_EXTENDED_PARAMETER_TYPE_BITS)
#if defined(MEM_EXTENDED_PARAMETER_TYPE_BITS)
// on modern Windows try use VirtualAlloc2 for aligned allocation
if (try_alignment > 0 && (try_alignment % _mi_os_page_size()) == 0 && pVirtualAlloc2 != NULL) {
MEM_ADDRESS_REQUIREMENTS reqs = { 0 };
@ -258,7 +258,7 @@ static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment,
mi_assert_internal(!(large_only && !allow_large));
static volatile _Atomic(uintptr_t) large_page_try_ok; // = 0;
void* p = NULL;
if ((large_only || use_large_os_page(size, try_alignment))
if ((large_only || use_large_os_page(size, try_alignment))
&& allow_large && (flags&MEM_COMMIT)!=0 && (flags&MEM_RESERVE)!=0) {
uintptr_t try_ok = mi_atomic_read(&large_page_try_ok);
if (!large_only && try_ok > 0) {
@ -282,7 +282,7 @@ static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment,
p = mi_win_virtual_allocx(addr, size, try_alignment, flags);
}
if (p == NULL) {
_mi_warning_message("unable to alloc mem error: err: %i size: 0x%x \n", GetLastError(), size);
_mi_warning_message("unable to allocate memory: error code: %i, addr: %p, size: 0x%x, large only: %d, allow_large: %d\n", GetLastError(), addr, size, large_only, allow_large);
}
return p;
}
@ -398,7 +398,7 @@ static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int pro
}
if (p == NULL) {
*is_large = false;
p = mi_unix_mmapx(addr, size, try_alignment, protect_flags, flags, fd);
p = mi_unix_mmapx(addr, size, try_alignment, protect_flags, flags, fd);
#if defined(MADV_HUGEPAGE)
// Many Linux systems don't allow MAP_HUGETLB but they support instead
// transparent huge pages (TPH). It is not required to call `madvise` with MADV_HUGE
@ -417,7 +417,7 @@ static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int pro
}
#endif
// On 64-bit systems, we can do efficient aligned allocation by using
// On 64-bit systems, we can do efficient aligned allocation by using
// the 4TiB to 30TiB area to allocate them.
#if (MI_INTPTR_SIZE >= 8) && (defined(_WIN32) || (defined(MI_OS_USE_MMAP) && !defined(MAP_ALIGNED)))
static volatile _Atomic(intptr_t) aligned_base;
@ -824,7 +824,7 @@ typedef struct mi_huge_info_s {
static mi_huge_info_t os_huge_reserved = { NULL, 0, ATOMIC_VAR_INIT(0) };
bool _mi_os_is_huge_reserved(void* p) {
return (mi_atomic_read_ptr(&os_huge_reserved.start) != NULL &&
return (mi_atomic_read_ptr(&os_huge_reserved.start) != NULL &&
p >= mi_atomic_read_ptr(&os_huge_reserved.start) &&
(uint8_t*)p < (uint8_t*)mi_atomic_read_ptr(&os_huge_reserved.start) + mi_atomic_read(&os_huge_reserved.reserved));
}
@ -833,14 +833,14 @@ void* _mi_os_try_alloc_from_huge_reserved(size_t size, size_t try_alignment)
{
// only allow large aligned allocations (e.g. regions)
if (size < MI_SEGMENT_SIZE || (size % MI_SEGMENT_SIZE) != 0) return NULL;
if (try_alignment > MI_SEGMENT_SIZE) return NULL;
if (try_alignment > MI_SEGMENT_SIZE) return NULL;
if (mi_atomic_read_ptr(&os_huge_reserved.start)==NULL) return NULL;
if (mi_atomic_read(&os_huge_reserved.used) >= mi_atomic_read(&os_huge_reserved.reserved)) return NULL; // already full
// always aligned
mi_assert_internal(mi_atomic_read(&os_huge_reserved.used) % MI_SEGMENT_SIZE == 0 );
mi_assert_internal( (uintptr_t)mi_atomic_read_ptr(&os_huge_reserved.start) % MI_SEGMENT_SIZE == 0 );
// try to reserve space
size_t base = mi_atomic_addu( &os_huge_reserved.used, size );
if ((base + size) > os_huge_reserved.reserved) {
@ -871,13 +871,13 @@ static void mi_os_free_huge_reserved() {
int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept {
UNUSED(pages); UNUSED(max_secs);
if (pages_reserved != NULL) *pages_reserved = 0;
return ENOMEM;
return ENOMEM;
}
#else
int mi_reserve_huge_os_pages( size_t pages, double max_secs, size_t* pages_reserved ) mi_attr_noexcept
{
if (pages_reserved != NULL) *pages_reserved = 0;
if (max_secs==0) return ETIMEDOUT; // timeout
if (max_secs==0) return ETIMEDOUT; // timeout
if (pages==0) return 0; // ok
if (!mi_atomic_cas_ptr_strong(&os_huge_reserved.start,(void*)1,NULL)) return ETIMEDOUT; // already reserved
@ -894,22 +894,22 @@ int mi_reserve_huge_os_pages( size_t pages, double max_secs, size_t* pages_reser
uint8_t* addr = start; // current top of the allocations
for (size_t page = 0; page < pages; page++, addr += MI_HUGE_OS_PAGE_SIZE ) {
// allocate a page
void* p = NULL;
void* p = NULL;
bool is_large = true;
#ifdef _WIN32
if (page==0) { mi_win_enable_large_os_pages(); }
p = mi_win_virtual_alloc(addr, MI_HUGE_OS_PAGE_SIZE, 0, MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE, true, true, &is_large);
#elif defined(MI_OS_USE_MMAP)
p = mi_unix_mmap(addr, MI_HUGE_OS_PAGE_SIZE, 0, PROT_READ | PROT_WRITE, true, true, &is_large);
#else
#else
// always fail
#endif
#endif
// Did we succeed at a contiguous address?
if (p != addr) {
// no success, issue a warning and return with an error
// no success, issue a warning and return with an error
if (p != NULL) {
_mi_warning_message("could not allocate contiguous huge page %zu at 0x%p\n", page, addr);
_mi_warning_message("could not allocate contiguous huge page %zu at 0x%p\n", page, addr);
_mi_os_free(p, MI_HUGE_OS_PAGE_SIZE, &_mi_stats_main );
}
else {
@ -920,7 +920,7 @@ int mi_reserve_huge_os_pages( size_t pages, double max_secs, size_t* pages_reser
#endif
_mi_warning_message("could not allocate huge page %zu at 0x%p, error: %i\n", page, addr, err);
}
return ENOMEM;
return ENOMEM;
}
// success, record it
if (page==0) {
@ -930,20 +930,19 @@ int mi_reserve_huge_os_pages( size_t pages, double max_secs, size_t* pages_reser
else {
mi_atomic_addu(&os_huge_reserved.reserved,MI_HUGE_OS_PAGE_SIZE);
}
_mi_stat_increase(&_mi_stats_main.committed, MI_HUGE_OS_PAGE_SIZE);
_mi_stat_increase(&_mi_stats_main.committed, MI_HUGE_OS_PAGE_SIZE);
_mi_stat_increase(&_mi_stats_main.reserved, MI_HUGE_OS_PAGE_SIZE);
if (pages_reserved != NULL) { *pages_reserved = page + 1; }
// check for timeout
double elapsed = _mi_clock_end(start_t);
if (elapsed > max_secs) return ETIMEDOUT;
if (elapsed > max_secs) return ETIMEDOUT;
if (page >= 1) {
double estimate = ((elapsed / (double)(page+1)) * (double)pages);
if (estimate > 1.5*max_secs) return ETIMEDOUT; // seems like we are going to timeout
}
}
}
_mi_verbose_message("reserved %zu huge pages\n", pages);
return 0;
}
#endif

View File

@ -2,6 +2,7 @@
#include <stdio.h>
#include <assert.h>
#include <string.h>
#include <stdint.h>
#include <mimalloc.h>
#include <mimalloc-override.h> // redefines malloc etc.
@ -172,6 +173,7 @@ void mi_bins() {
int main() {
mi_version();
mi_bins();
void* p1 = malloc(78);
void* p2 = malloc(24);
free(p1);
@ -194,3 +196,4 @@ int main() {
mi_stats_print(NULL);
return 0;
}

View File

@ -2,6 +2,7 @@
#include <stdio.h>
#include <assert.h>
#include <string.h>
#include <stdint.h>
#include <mimalloc.h>
#include <new>
@ -66,3 +67,5 @@ public:
};
static Static s = Static();