merge from dev-abandon

This commit is contained in:
daanx 2024-03-01 15:15:24 -08:00
commit f6320bd3be
6 changed files with 25 additions and 14 deletions

View File

@ -1,6 +1,6 @@
set(mi_version_major 2)
set(mi_version_minor 1)
set(mi_version_patch 2)
set(mi_version_patch 4)
set(mi_version ${mi_version_major}.${mi_version_minor})
set(PACKAGE_VERSION ${mi_version})

View File

@ -8,7 +8,7 @@ terms of the MIT license. A copy of the license can be found in the file
#ifndef MIMALLOC_H
#define MIMALLOC_H
#define MI_MALLOC_VERSION 212 // major + 2 digits minor
#define MI_MALLOC_VERSION 214 // major + 2 digits minor
// ------------------------------------------------------
// Compiler specific attributes
@ -332,18 +332,18 @@ typedef enum mi_option_e {
mi_option_deprecated_segment_cache,
mi_option_deprecated_page_reset,
mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination
mi_option_deprecated_segment_reset,
mi_option_eager_commit_delay,
mi_option_deprecated_segment_reset,
mi_option_eager_commit_delay,
mi_option_purge_delay, // memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all.
mi_option_use_numa_nodes, // 0 = use all available numa nodes, otherwise use at most N nodes.
mi_option_limit_os_alloc, // 1 = do not use OS memory for allocation (but only programmatically reserved arenas)
mi_option_os_tag, // tag used for OS logging (macOS only for now)
mi_option_max_errors, // issue at most N error messages
mi_option_max_warnings, // issue at most N warning messages
mi_option_max_segment_reclaim,
mi_option_max_segment_reclaim,
mi_option_destroy_on_exit, // if set, release all memory on exit; sometimes used for dynamic unloading but can be unsafe.
mi_option_arena_reserve, // initial memory size in KiB for arena reservation (1GiB on 64-bit)
mi_option_arena_purge_mult,
mi_option_arena_purge_mult,
mi_option_purge_extend_delay,
mi_option_abandoned_reclaim_on_free, // reclaim abandoned segments on a free
_mi_option_last,
@ -514,7 +514,7 @@ template<class T, bool _mi_destroy> struct _mi_heap_stl_allocator_common : publi
protected:
std::shared_ptr<mi_heap_t> heap;
template<class U, bool D> friend struct _mi_heap_stl_allocator_common;
_mi_heap_stl_allocator_common() {
mi_heap_t* hp = mi_heap_new();
this->heap.reset(hp, (_mi_destroy ? &heap_destroy : &heap_delete)); /* calls heap_delete/destroy when the refcount drops to zero */
@ -531,7 +531,7 @@ private:
template<class T> struct mi_heap_stl_allocator : public _mi_heap_stl_allocator_common<T, false> {
using typename _mi_heap_stl_allocator_common<T, false>::size_type;
mi_heap_stl_allocator() : _mi_heap_stl_allocator_common<T, false>() { } // creates fresh heap that is deleted when the destructor is called
mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, false>(hp) { } // no delete nor destroy on the passed in heap
mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, false>(hp) { } // no delete nor destroy on the passed in heap
template<class U> mi_heap_stl_allocator(const mi_heap_stl_allocator<U>& x) mi_attr_noexcept : _mi_heap_stl_allocator_common<T, false>(x) { }
mi_heap_stl_allocator select_on_container_copy_construction() const { return *this; }
@ -548,7 +548,7 @@ template<class T1, class T2> bool operator!=(const mi_heap_stl_allocator<T1>& x,
template<class T> struct mi_heap_destroy_stl_allocator : public _mi_heap_stl_allocator_common<T, true> {
using typename _mi_heap_stl_allocator_common<T, true>::size_type;
mi_heap_destroy_stl_allocator() : _mi_heap_stl_allocator_common<T, true>() { } // creates fresh heap that is destroyed when the destructor is called
mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, true>(hp) { } // no delete nor destroy on the passed in heap
mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, true>(hp) { } // no delete nor destroy on the passed in heap
template<class U> mi_heap_destroy_stl_allocator(const mi_heap_destroy_stl_allocator<U>& x) mi_attr_noexcept : _mi_heap_stl_allocator_common<T, true>(x) { }
mi_heap_destroy_stl_allocator select_on_container_copy_construction() const { return *this; }

View File

@ -127,6 +127,7 @@ void _mi_arena_unsafe_destroy_all(mi_stats_t* stats);
bool _mi_arena_segment_clear_abandoned(mi_memid_t memid);
void _mi_arena_segment_mark_abandoned(mi_memid_t memid);
size_t _mi_arena_segment_abandoned_count(void);
typedef struct mi_arena_field_cursor_s { // abstract
mi_arena_id_t start;

View File

@ -736,14 +736,18 @@ bool _mi_arena_contains(const void* p) {
This is used to atomically abandon/reclaim segments
(and crosses the arena API but it is convenient to have here).
Abandoned segments still have live blocks; they get reclaimed
when a thread frees in it, or when a thread needs a fresh
when a thread frees a block in it, or when a thread needs a fresh
segment; these threads scan the abandoned segments through
the arena bitmaps.
----------------------------------------------------------- */
// Maintain these for debug purposes
// Maintain a count of all abandoned segments
static mi_decl_cache_align _Atomic(size_t)abandoned_count;
size_t _mi_arena_segment_abandoned_count(void) {
return mi_atomic_load_relaxed(&abandoned_count);
}
// reclaim a specific abandoned segment; `true` on success.
bool _mi_arena_segment_clear_abandoned(mi_memid_t memid )
{
@ -888,7 +892,7 @@ static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int
// consequetive bitmaps
arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap
arena->blocks_abandoned = &arena->blocks_inuse[2 * fields]; // just after dirty bitmap
arena->blocks_committed = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after abandonde bitmap
arena->blocks_committed = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after abandoned bitmap
arena->blocks_purge = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[4*fields]); // just after committed bitmap
// initialize committed bitmap?
if (arena->blocks_committed != NULL && arena->memid.initially_committed) {

View File

@ -81,7 +81,7 @@ static mi_option_desc_t options[_mi_option_last] =
{ 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose
{ 16, UNINIT, MI_OPTION(max_errors) }, // maximum errors that are output
{ 16, UNINIT, MI_OPTION(max_warnings) }, // maximum warnings that are output
{ 16, UNINIT, MI_OPTION(max_segment_reclaim)}, // max. number of segment reclaims from the abandoned segments per try.
{ 10, UNINIT, MI_OPTION(max_segment_reclaim)}, // max. percentage of the abandoned segments per try.
{ 0, UNINIT, MI_OPTION(destroy_on_exit)}, // release all OS memory on process exit; careful with dangling pointer or after-exit frees!
#if (MI_INTPTR_SIZE>4)
{ 1024L * 1024L, UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time

View File

@ -1242,7 +1242,13 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slice
*reclaimed = false;
mi_segment_t* segment;
mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap,&current);
long max_tries = mi_option_get_clamp(mi_option_max_segment_reclaim, 0, 1024); // limit the work to bound allocation times
// limit the tries to 10% (default) of the abandoned segments with at least 8 tries, and at most 1024.
const size_t perc = (size_t)mi_option_get_clamp(mi_option_max_segment_reclaim, 0, 100);
if (perc <= 0) return NULL;
const size_t abandoned_count = _mi_arena_segment_abandoned_count();
const size_t relative_count = (abandoned_count > 10000 ? (abandoned_count / 100) * perc : (abandoned_count * perc) / 100); // avoid overflow
long max_tries = (long)(relative_count < 8 ? 8 : (relative_count > 1024 ? 1024 : relative_count));
while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(&current)) != NULL))
{
segment->abandoned_visits++;