mirror of
https://github.com/microsoft/mimalloc.git
synced 2024-12-25 20:14:12 +08:00
Merge branch 'dev' into dev-slice
This commit is contained in:
commit
1daa4ea627
@ -328,7 +328,7 @@ typedef enum mi_option_e {
|
||||
mi_option_allow_large_os_pages, // allow large (2 or 4 MiB) OS pages, implies eager commit. If false, also disables THP for the process.
|
||||
mi_option_reserve_huge_os_pages, // reserve N huge OS pages (1GiB pages) at startup
|
||||
mi_option_reserve_huge_os_pages_at, // reserve huge OS pages at a specific NUMA node
|
||||
mi_option_reserve_os_memory, // reserve specified amount of OS memory in an arena at startup
|
||||
mi_option_reserve_os_memory, // reserve specified amount of OS memory in an arena at startup (internally, this value is in KiB; use `mi_option_get_size`)
|
||||
mi_option_deprecated_segment_cache,
|
||||
mi_option_deprecated_page_reset,
|
||||
mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination
|
||||
@ -342,11 +342,12 @@ typedef enum mi_option_e {
|
||||
mi_option_max_warnings, // issue at most N warning messages
|
||||
mi_option_max_segment_reclaim, // max. percentage of the abandoned segments can be reclaimed per try (=10%)
|
||||
mi_option_destroy_on_exit, // if set, release all memory on exit; sometimes used for dynamic unloading but can be unsafe
|
||||
mi_option_arena_reserve, // initial memory size in KiB for arena reservation (= 1 GiB on 64-bit)
|
||||
mi_option_arena_reserve, // initial memory size for arena reservation (= 1 GiB on 64-bit) (internally, this value is in KiB; use `mi_option_get_size`)
|
||||
mi_option_arena_purge_mult, // multiplier for `purge_delay` for the purging delay for arenas (=10)
|
||||
mi_option_purge_extend_delay,
|
||||
mi_option_abandoned_reclaim_on_free, // allow to reclaim an abandoned segment on a free (=1)
|
||||
mi_option_disallow_arena_alloc, // 1 = do not use arena's for allocation (except if using specific arena id's)
|
||||
mi_option_retry_on_oom, // retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. (only on windows)
|
||||
_mi_option_last,
|
||||
// legacy option names
|
||||
mi_option_large_os_pages = mi_option_allow_large_os_pages,
|
||||
|
@ -379,10 +379,10 @@ static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
|
||||
}
|
||||
#else /* __builtin_umul_overflow is unavailable */
|
||||
static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
|
||||
#define MI_MUL_NO_OVERFLOW ((size_t)1 << (4*sizeof(size_t))) // sqrt(SIZE_MAX)
|
||||
#define MI_MUL_COULD_OVERFLOW ((size_t)1 << (4*sizeof(size_t))) // sqrt(SIZE_MAX)
|
||||
*total = count * size;
|
||||
// note: gcc/clang optimize this to directly check the overflow flag
|
||||
return ((size >= MI_MUL_NO_OVERFLOW || count >= MI_MUL_NO_OVERFLOW) && size > 0 && (SIZE_MAX / size) < count);
|
||||
return ((size >= MI_MUL_COULD_OVERFLOW || count >= MI_MUL_COULD_OVERFLOW) && size > 0 && (SIZE_MAX / size) < count);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -51,12 +51,13 @@ typedef struct mi_arena_s {
|
||||
bool exclusive; // only allow allocations if specifically for this arena
|
||||
bool is_large; // memory area consists of large- or huge OS pages (always committed)
|
||||
_Atomic(size_t) search_idx; // optimization to start the search for free blocks
|
||||
_Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be decommitted from `blocks_decommit`.
|
||||
_Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be decommitted from `blocks_decommit`.
|
||||
mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero?
|
||||
mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted)
|
||||
mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted)
|
||||
mi_bitmap_field_t* blocks_abandoned; // blocks that start with an abandoned segment. (This crosses API's but it is convenient to have here)
|
||||
mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`)
|
||||
// do not add further fields here as the dirty, committed, purged, and abandoned bitmaps follow the inuse bitmap fields.
|
||||
} mi_arena_t;
|
||||
|
||||
|
||||
|
@ -65,7 +65,7 @@ static mi_option_desc_t options[_mi_option_last] =
|
||||
{ 0, UNINIT, MI_OPTION_LEGACY(allow_large_os_pages,large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
|
||||
{ 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages
|
||||
{-1, UNINIT, MI_OPTION(reserve_huge_os_pages_at) }, // reserve huge pages at node N
|
||||
{ 0, UNINIT, MI_OPTION(reserve_os_memory) }, // reserve OS memory in advance
|
||||
{ 0, UNINIT, MI_OPTION(reserve_os_memory) }, // reserve N KiB OS memory in advance (use `option_get_size`)
|
||||
{ 0, UNINIT, MI_OPTION(deprecated_segment_cache) }, // cache N segments per thread
|
||||
{ 0, UNINIT, MI_OPTION(deprecated_page_reset) }, // reset page memory on free
|
||||
{ 0, UNINIT, MI_OPTION_LEGACY(abandoned_page_purge,abandoned_page_reset) }, // reset free page memory when a thread terminates
|
||||
@ -79,19 +79,20 @@ static mi_option_desc_t options[_mi_option_last] =
|
||||
{ 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
|
||||
{ 0, UNINIT, MI_OPTION_LEGACY(disallow_os_alloc,limit_os_alloc) }, // 1 = do not use OS memory for allocation (but only reserved arenas)
|
||||
{ 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose
|
||||
{ 16, UNINIT, MI_OPTION(max_errors) }, // maximum errors that are output
|
||||
{ 16, UNINIT, MI_OPTION(max_warnings) }, // maximum warnings that are output
|
||||
{ 10, UNINIT, MI_OPTION(max_segment_reclaim)}, // max. percentage of the abandoned segments per try.
|
||||
{ 32, UNINIT, MI_OPTION(max_errors) }, // maximum errors that are output
|
||||
{ 32, UNINIT, MI_OPTION(max_warnings) }, // maximum warnings that are output
|
||||
{ 10, UNINIT, MI_OPTION(max_segment_reclaim)}, // max. percentage of the abandoned segments to be reclaimed per try.
|
||||
{ 0, UNINIT, MI_OPTION(destroy_on_exit)}, // release all OS memory on process exit; careful with dangling pointer or after-exit frees!
|
||||
#if (MI_INTPTR_SIZE>4)
|
||||
{ 1024L * 1024L, UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time
|
||||
{ 1024L*1024L, UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time (=1GiB) (use `option_get_size`)
|
||||
#else
|
||||
{ 128L * 1024L, UNINIT, MI_OPTION(arena_reserve) },
|
||||
{ 128L*1024L, UNINIT, MI_OPTION(arena_reserve) }, // =128MiB on 32-bit
|
||||
#endif
|
||||
{ 10, UNINIT, MI_OPTION(arena_purge_mult) }, // purge delay multiplier for arena's
|
||||
{ 1, UNINIT, MI_OPTION_LEGACY(purge_extend_delay, decommit_extend_delay) },
|
||||
{ 1, UNINIT, MI_OPTION(abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free
|
||||
{ 0, UNINIT, MI_OPTION(disallow_arena_alloc) }, // 1 = do not use arena's for allocation (except if using specific arena id's)
|
||||
{ 400, UNINIT, MI_OPTION(retry_on_oom) }, // windows only: retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries.
|
||||
};
|
||||
|
||||
static void mi_option_init(mi_option_desc_t* desc);
|
||||
@ -135,8 +136,12 @@ mi_decl_nodiscard long mi_option_get_clamp(mi_option_t option, long min, long ma
|
||||
|
||||
mi_decl_nodiscard size_t mi_option_get_size(mi_option_t option) {
|
||||
mi_assert_internal(mi_option_has_size_in_kib(option));
|
||||
long x = mi_option_get(option);
|
||||
return (x < 0 ? 0 : (size_t)x * MI_KiB);
|
||||
const long x = mi_option_get(option);
|
||||
size_t size = (x < 0 ? 0 : (size_t)x);
|
||||
if (mi_option_has_size_in_kib(option)) {
|
||||
size *= MI_KiB;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
void mi_option_set(mi_option_t option, long value) {
|
||||
@ -479,14 +484,20 @@ static void mi_option_init(mi_option_desc_t* desc) {
|
||||
else {
|
||||
char* end = buf;
|
||||
long value = strtol(buf, &end, 10);
|
||||
if (desc->option == mi_option_reserve_os_memory || desc->option == mi_option_arena_reserve) {
|
||||
// this option is interpreted in KiB to prevent overflow of `long`
|
||||
if (mi_option_has_size_in_kib(desc->option)) {
|
||||
// this option is interpreted in KiB to prevent overflow of `long` for large allocations
|
||||
// (long is 32-bit on 64-bit windows, which allows for 4TiB max.)
|
||||
size_t size = (value < 0 ? 0 : (size_t)value);
|
||||
bool overflow = false;
|
||||
if (*end == 'K') { end++; }
|
||||
else if (*end == 'M') { value *= MI_KiB; end++; }
|
||||
else if (*end == 'G') { value *= MI_MiB; end++; }
|
||||
else { value = (value + MI_KiB - 1) / MI_KiB; }
|
||||
if (end[0] == 'I' && end[1] == 'B') { end += 2; }
|
||||
else if (*end == 'B') { end++; }
|
||||
else if (*end == 'M') { overflow = mi_mul_overflow(size,MI_KiB,&size); end++; }
|
||||
else if (*end == 'G') { overflow = mi_mul_overflow(size,MI_MiB,&size); end++; }
|
||||
else if (*end == 'T') { overflow = mi_mul_overflow(size,MI_GiB,&size); end++; }
|
||||
else { size = (size + MI_KiB - 1) / MI_KiB; }
|
||||
if (end[0] == 'I' && end[1] == 'B') { end += 2; } // KiB, MiB, GiB, TiB
|
||||
else if (*end == 'B') { end++; } // Kb, Mb, Gb, Tb
|
||||
if (overflow || size > MI_MAX_ALLOC_SIZE) { size = (MI_MAX_ALLOC_SIZE / MI_KiB); }
|
||||
value = (size > LONG_MAX ? LONG_MAX : (long)size);
|
||||
}
|
||||
if (*end == 0) {
|
||||
desc->value = value;
|
||||
|
@ -178,7 +178,7 @@ int _mi_prim_free(void* addr, size_t size ) {
|
||||
// VirtualAlloc
|
||||
//---------------------------------------------
|
||||
|
||||
static void* win_virtual_alloc_prim(void* addr, size_t size, size_t try_alignment, DWORD flags) {
|
||||
static void* win_virtual_alloc_prim_once(void* addr, size_t size, size_t try_alignment, DWORD flags) {
|
||||
#if (MI_INTPTR_SIZE >= 8)
|
||||
// on 64-bit systems, try to use the virtual address area after 2TiB for 4MiB aligned allocations
|
||||
if (addr == NULL) {
|
||||
@ -200,13 +200,53 @@ static void* win_virtual_alloc_prim(void* addr, size_t size, size_t try_alignmen
|
||||
param.Arg.Pointer = &reqs;
|
||||
void* p = (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, ¶m, 1);
|
||||
if (p != NULL) return p;
|
||||
_mi_warning_message("unable to allocate aligned OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %zu, flags: 0x%x)\n", size, GetLastError(), addr, try_alignment, flags);
|
||||
_mi_warning_message("unable to allocate aligned OS memory (0x%zx bytes, error code: 0x%x, address: %p, alignment: 0x%zx, flags: 0x%x)\n", size, GetLastError(), addr, try_alignment, flags);
|
||||
// fall through on error
|
||||
}
|
||||
// last resort
|
||||
return VirtualAlloc(addr, size, flags, PAGE_READWRITE);
|
||||
}
|
||||
|
||||
static bool win_is_out_of_memory_error(DWORD err) {
|
||||
switch (err) {
|
||||
case ERROR_COMMITMENT_MINIMUM:
|
||||
case ERROR_COMMITMENT_LIMIT:
|
||||
case ERROR_PAGEFILE_QUOTA:
|
||||
case ERROR_NOT_ENOUGH_MEMORY:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static void* win_virtual_alloc_prim(void* addr, size_t size, size_t try_alignment, DWORD flags) {
|
||||
long max_retry_msecs = mi_option_get_clamp(mi_option_retry_on_oom, 0, 2000); // at most 2 seconds
|
||||
if (max_retry_msecs == 1) { max_retry_msecs = 100; } // if one sets the option to "true"
|
||||
for (long tries = 1; tries <= 10; tries++) { // try at most 10 times (=2200ms)
|
||||
void* p = win_virtual_alloc_prim_once(addr, size, try_alignment, flags);
|
||||
if (p != NULL) {
|
||||
// success, return the address
|
||||
return p;
|
||||
}
|
||||
else if (max_retry_msecs > 0 && (try_alignment <= 2*MI_SEGMENT_ALIGN) &&
|
||||
(flags&MEM_COMMIT) != 0 && (flags&MEM_LARGE_PAGES) == 0 &&
|
||||
win_is_out_of_memory_error(GetLastError())) {
|
||||
// if committing regular memory and being out-of-memory,
|
||||
// keep trying for a bit in case memory frees up after all. See issue #894
|
||||
_mi_warning_message("out-of-memory on OS allocation, try again... (attempt %lu, 0x%zx bytes, error code: 0x%x, address: %p, alignment: 0x%zx, flags: 0x%x)\n", tries, size, GetLastError(), addr, try_alignment, flags);
|
||||
long sleep_msecs = tries*40; // increasing waits
|
||||
if (sleep_msecs > max_retry_msecs) { sleep_msecs = max_retry_msecs; }
|
||||
max_retry_msecs -= sleep_msecs;
|
||||
Sleep(sleep_msecs);
|
||||
}
|
||||
else {
|
||||
// otherwise return with an error
|
||||
break;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void* win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DWORD flags, bool large_only, bool allow_large, bool* is_large) {
|
||||
mi_assert_internal(!(large_only && !allow_large));
|
||||
static _Atomic(size_t) large_page_try_ok; // = 0;
|
||||
|
Loading…
x
Reference in New Issue
Block a user