increase thread data cache to 16

This commit is contained in:
daanx 2023-04-15 19:49:14 -07:00
parent 8027fcfecd
commit 0174d19af3
4 changed files with 29 additions and 22 deletions

View File

@ -80,10 +80,11 @@ extern mi_decl_cache_align mi_stats_t _mi_stats_main;
extern mi_decl_cache_align const mi_page_t _mi_page_empty;
bool _mi_is_main_thread(void);
size_t _mi_current_thread_count(void);
bool _mi_preloading(void); // true while the C runtime is not ready
bool _mi_preloading(void); // true while the C runtime is not initialized yet
mi_threadid_t _mi_thread_id(void) mi_attr_noexcept;
mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
void _mi_thread_done(mi_heap_t* heap);
void _mi_thread_data_collect(void);
// os.c
void _mi_os_init(void); // called from process init
@ -119,8 +120,9 @@ void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_o
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id);
void _mi_arena_collect(bool free_arenas, bool force_decommit, mi_stats_t* stats);
bool _mi_arena_contains(const void* p);
void _mi_arena_collect(bool force_purge, mi_stats_t* stats);
void _mi_arena_unsafe_destroy_all(mi_stats_t* stats);
// "segment-map.c"
void _mi_segment_map_allocated_at(const mi_segment_t* segment);
@ -168,8 +170,8 @@ uint8_t _mi_bin(size_t size); // for stats
void _mi_heap_destroy_pages(mi_heap_t* heap);
void _mi_heap_collect_abandon(mi_heap_t* heap);
void _mi_heap_set_default_direct(mi_heap_t* heap);
void _mi_heap_destroy_all(void);
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid);
void _mi_heap_unsafe_destroy_all(void);
// "stats.c"
void _mi_stats_done(mi_stats_t* stats);

View File

@ -703,7 +703,7 @@ void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset,
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
// for dynamic libraries that are unloaded and need to release all their allocated memory.
static void mi_arenas_destroy(void) {
static void mi_arenas_unsafe_destroy(void) {
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
size_t new_max_arena = 0;
for (size_t i = 0; i < max_arena; i++) {
@ -730,15 +730,19 @@ static void mi_arenas_destroy(void) {
mi_atomic_cas_strong_acq_rel(&mi_arena_count, &expected, new_max_arena);
}
void _mi_arena_collect(bool free_arenas, bool force_decommit, mi_stats_t* stats) {
if (free_arenas) {
mi_arenas_destroy();
}
mi_arenas_try_purge(force_decommit, true, stats);
// Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired
void _mi_arena_collect(bool force_purge, mi_stats_t* stats) {
mi_arenas_try_purge(force_purge, true /* visit all */, stats);
}
// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
// for dynamic libraries that are unloaded and need to release all their allocated memory.
void _mi_arena_unsafe_destroy_all(mi_stats_t* stats) {
mi_arenas_unsafe_destroy();
_mi_arena_collect(true /* force purge */, stats); // purge non-owned arenas
}
// Is a pointer inside any of our arenas?
bool _mi_arena_contains(const void* p) {
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
for (size_t i = 0; i < max_arena; i++) {

View File

@ -151,14 +151,15 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL );
// collect segment caches
// collect segment and thread caches
if (collect >= MI_FORCE) {
_mi_segment_thread_collect(&heap->tld->segments);
}
// collect regions on program-exit (or shared library unload)
// collect arenas on program-exit (or shared library unload)
if (collect >= MI_FORCE && _mi_is_main_thread() && mi_heap_is_backing(heap)) {
_mi_arena_collect(false /* destroy arenas */, true /* force purge */, &heap->tld->stats);
_mi_thread_data_collect(); // collect thread data cache
_mi_arena_collect(true /* force purge */, &heap->tld->stats);
}
}
@ -354,7 +355,8 @@ void mi_heap_destroy(mi_heap_t* heap) {
}
}
void _mi_heap_destroy_all(void) {
// forcefully destroy all heaps in the current thread
void _mi_heap_unsafe_destroy_all(void) {
mi_heap_t* bheap = mi_heap_get_backing();
mi_heap_t* curr = bheap->tld->heaps;
while (curr != NULL) {

View File

@ -177,7 +177,6 @@ mi_heap_t* _mi_heap_main_get(void) {
typedef struct mi_thread_data_s {
mi_heap_t heap; // must come first due to cast in `_mi_heap_done`
mi_tld_t tld;
mi_memid_t memid;
} mi_thread_data_t;
@ -186,7 +185,7 @@ typedef struct mi_thread_data_s {
// destroy many OS threads, this may causes too much overhead
// per thread so we maintain a small cache of recently freed metadata.
#define TD_CACHE_SIZE (8)
#define TD_CACHE_SIZE (16)
static _Atomic(mi_thread_data_t*) td_cache[TD_CACHE_SIZE];
static mi_thread_data_t* mi_thread_data_zalloc(void) {
@ -238,7 +237,7 @@ static void mi_thread_data_free( mi_thread_data_t* tdfree ) {
_mi_os_free(tdfree, sizeof(mi_thread_data_t), &_mi_stats_main);
}
static void mi_thread_data_collect(void) {
void _mi_thread_data_collect(void) {
// free all thread metadata from the cache
for (int i = 0; i < TD_CACHE_SIZE; i++) {
mi_thread_data_t* td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]);
@ -323,7 +322,6 @@ static bool _mi_heap_done(mi_heap_t* heap) {
mi_thread_data_free((mi_thread_data_t*)heap);
}
else {
mi_thread_data_collect(); // free cached thread metadata
#if 0
// never free the main thread even in debug mode; if a dll is linked statically with mimalloc,
// there may still be delete/free calls after the mi_fls_done is called. Issue #207
@ -589,7 +587,7 @@ static void mi_cdecl mi_process_done(void) {
_mi_prim_thread_done_auto_done();
#ifndef MI_SKIP_COLLECT_ON_EXIT
#if (MI_DEBUG != 0) || !defined(MI_SHARED_LIB)
#if (MI_DEBUG || !defined(MI_SHARED_LIB))
// free all memory if possible on process exit. This is not needed for a stand-alone process
// but should be done if mimalloc is statically linked into another shared library which
// is repeatedly loaded/unloaded, see issue #281.
@ -601,8 +599,9 @@ static void mi_cdecl mi_process_done(void) {
// since after process_done there might still be other code running that calls `free` (like at_exit routines,
// or C-runtime termination code.
if (mi_option_is_enabled(mi_option_destroy_on_exit)) {
_mi_heap_destroy_all(); // forcefully release all memory held by all heaps (of this thread only!)
_mi_arena_collect(true /* destroy (owned) arenas */, true /* purge the rest */, &_mi_heap_main_get()->tld->stats);
mi_collect(true /* force */);
_mi_heap_unsafe_destroy_all(); // forcefully release all memory held by all heaps (of this thread only!)
_mi_arena_unsafe_destroy_all(& _mi_heap_main_get()->tld->stats);
}
if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) {