mirror of
https://github.com/microsoft/mimalloc.git
synced 2024-12-26 21:04:27 +08:00
Merge branch 'dev' into dev-slice
This commit is contained in:
commit
36da7e91c5
23
src/init.c
23
src/init.c
@ -226,7 +226,7 @@ static bool _mi_heap_init(void) {
|
||||
tld->segments.stats = &tld->stats;
|
||||
tld->segments.os = &tld->os;
|
||||
tld->os.stats = &tld->stats;
|
||||
_mi_heap_set_default_direct(heap);
|
||||
_mi_heap_set_default_direct(heap);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -260,9 +260,8 @@ static bool _mi_heap_done(mi_heap_t* heap) {
|
||||
_mi_heap_collect_abandon(heap);
|
||||
}
|
||||
|
||||
|
||||
// merge stats
|
||||
_mi_stats_done(&heap->tld->stats);
|
||||
_mi_stats_done(&heap->tld->stats);
|
||||
|
||||
// free if not the main thread
|
||||
if (heap != &_mi_heap_main) {
|
||||
@ -362,18 +361,13 @@ void mi_thread_init(void) mi_attr_noexcept
|
||||
{
|
||||
// ensure our process has started already
|
||||
mi_process_init();
|
||||
|
||||
|
||||
// initialize the thread local default heap
|
||||
// (this will call `_mi_heap_set_default_direct` and thus set the
|
||||
// fiber/pthread key to a non-zero value, ensuring `_mi_thread_done` is called)
|
||||
if (_mi_heap_init()) return; // returns true if already initialized
|
||||
|
||||
// don't further initialize for the main thread
|
||||
if (_mi_is_main_thread()) return;
|
||||
|
||||
mi_heap_t* const heap = mi_get_default_heap();
|
||||
if (mi_heap_is_initialized(heap)) { _mi_stat_increase(&heap->tld->stats.threads, 1); }
|
||||
|
||||
_mi_stat_increase(&_mi_stats_main.threads, 1);
|
||||
//_mi_verbose_message("thread init: 0x%zx\n", _mi_thread_id());
|
||||
}
|
||||
|
||||
@ -382,14 +376,11 @@ void mi_thread_done(void) mi_attr_noexcept {
|
||||
}
|
||||
|
||||
static void _mi_thread_done(mi_heap_t* heap) {
|
||||
_mi_stat_decrease(&_mi_stats_main.threads, 1);
|
||||
|
||||
// check thread-id as on Windows shutdown with FLS the main (exit) thread may call this on thread-local heaps...
|
||||
if (heap->thread_id != _mi_thread_id()) return;
|
||||
|
||||
// stats
|
||||
if (!_mi_is_main_thread() && mi_heap_is_initialized(heap)) {
|
||||
_mi_stat_decrease(&heap->tld->stats.threads, 1);
|
||||
}
|
||||
|
||||
|
||||
// abandon the thread local heap
|
||||
if (_mi_heap_done(heap)) return; // returns true if already ran
|
||||
}
|
||||
|
34
src/os.c
34
src/os.c
@ -601,14 +601,18 @@ static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit,
|
||||
OS API: alloc, free, alloc_aligned
|
||||
----------------------------------------------------------- */
|
||||
|
||||
void* _mi_os_alloc(size_t size, mi_stats_t* stats) {
|
||||
void* _mi_os_alloc(size_t size, mi_stats_t* tld_stats) {
|
||||
UNUSED(tld_stats);
|
||||
mi_stats_t* stats = &_mi_stats_main;
|
||||
if (size == 0) return NULL;
|
||||
size = _mi_os_good_alloc_size(size);
|
||||
bool is_large = false;
|
||||
return mi_os_mem_alloc(size, 0, true, false, &is_large, stats);
|
||||
}
|
||||
|
||||
void _mi_os_free_ex(void* p, size_t size, bool was_committed, mi_stats_t* stats) {
|
||||
void _mi_os_free_ex(void* p, size_t size, bool was_committed, mi_stats_t* tld_stats) {
|
||||
UNUSED(tld_stats);
|
||||
mi_stats_t* stats = &_mi_stats_main;
|
||||
if (size == 0 || p == NULL) return;
|
||||
size = _mi_os_good_alloc_size(size);
|
||||
mi_os_mem_free(p, size, was_committed, stats);
|
||||
@ -628,7 +632,7 @@ void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* lar
|
||||
allow_large = *large;
|
||||
*large = false;
|
||||
}
|
||||
return mi_os_mem_alloc_aligned(size, alignment, commit, allow_large, (large!=NULL?large:&allow_large), tld->stats);
|
||||
return mi_os_mem_alloc_aligned(size, alignment, commit, allow_large, (large!=NULL?large:&allow_large), &_mi_stats_main /*tld->stats*/ );
|
||||
}
|
||||
|
||||
|
||||
@ -685,11 +689,11 @@ static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservativ
|
||||
if (csize == 0) return true; // || _mi_os_is_huge_reserved(addr))
|
||||
int err = 0;
|
||||
if (commit) {
|
||||
_mi_stat_increase(&stats->committed, csize);
|
||||
_mi_stat_increase(&stats->committed, size); // use size for precise commit vs. decommit
|
||||
_mi_stat_counter_increase(&stats->commit_calls, 1);
|
||||
}
|
||||
else {
|
||||
_mi_stat_decrease(&stats->committed, csize);
|
||||
_mi_stat_decrease(&stats->committed, size);
|
||||
}
|
||||
|
||||
#if defined(_WIN32)
|
||||
@ -728,16 +732,20 @@ static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservativ
|
||||
return (err == 0);
|
||||
}
|
||||
|
||||
bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* stats) {
|
||||
bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) {
|
||||
UNUSED(tld_stats);
|
||||
mi_stats_t* stats = &_mi_stats_main;
|
||||
return mi_os_commitx(addr, size, true, false /* liberal */, is_zero, stats);
|
||||
}
|
||||
|
||||
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats) {
|
||||
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* tld_stats) {
|
||||
UNUSED(tld_stats);
|
||||
mi_stats_t* stats = &_mi_stats_main;
|
||||
bool is_zero;
|
||||
return mi_os_commitx(addr, size, false, true /* conservative */, &is_zero, stats);
|
||||
}
|
||||
|
||||
bool _mi_os_commit_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* stats) {
|
||||
static bool mi_os_commit_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* stats) {
|
||||
return mi_os_commitx(addr, size, true, true /* conservative */, is_zero, stats);
|
||||
}
|
||||
|
||||
@ -797,7 +805,9 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats)
|
||||
// but may be used later again. This will release physical memory
|
||||
// pages and reduce swapping while keeping the memory committed.
|
||||
// We page align to a conservative area inside the range to reset.
|
||||
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
|
||||
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats) {
|
||||
UNUSED(tld_stats);
|
||||
mi_stats_t* stats = &_mi_stats_main;
|
||||
if (mi_option_is_enabled(mi_option_reset_decommits)) {
|
||||
return _mi_os_decommit(addr, size, stats);
|
||||
}
|
||||
@ -806,9 +816,11 @@ bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
|
||||
}
|
||||
}
|
||||
|
||||
bool _mi_os_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* stats) {
|
||||
bool _mi_os_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) {
|
||||
UNUSED(tld_stats);
|
||||
mi_stats_t* stats = &_mi_stats_main;
|
||||
if (mi_option_is_enabled(mi_option_reset_decommits)) {
|
||||
return _mi_os_commit_unreset(addr, size, is_zero, stats); // re-commit it (conservatively!)
|
||||
return mi_os_commit_unreset(addr, size, is_zero, stats); // re-commit it (conservatively!)
|
||||
}
|
||||
else {
|
||||
*is_zero = false;
|
||||
|
@ -897,6 +897,10 @@ static mi_decl_cache_align _Atomic(mi_segment_t*) abandoned_visited; // =
|
||||
// The abandoned page list (tagged as it supports pop)
|
||||
static mi_decl_cache_align _Atomic(mi_tagged_segment_t) abandoned; // = NULL
|
||||
|
||||
// Maintain these for debug purposes (these counts may be a bit off)
|
||||
static mi_decl_cache_align _Atomic(uintptr_t) abandoned_count;
|
||||
static mi_decl_cache_align _Atomic(uintptr_t) abandoned_visited_count;
|
||||
|
||||
// We also maintain a count of current readers of the abandoned list
|
||||
// in order to prevent resetting/decommitting segment memory if it might
|
||||
// still be read.
|
||||
@ -912,6 +916,7 @@ static void mi_abandoned_visited_push(mi_segment_t* segment) {
|
||||
do {
|
||||
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, anext);
|
||||
} while (!mi_atomic_cas_ptr_weak_release(mi_segment_t, &abandoned_visited, &anext, segment));
|
||||
mi_atomic_increment_relaxed(&abandoned_visited_count);
|
||||
}
|
||||
|
||||
// Move the visited list to the abandoned list.
|
||||
@ -928,8 +933,13 @@ static bool mi_abandoned_visited_revisit(void)
|
||||
mi_tagged_segment_t afirst;
|
||||
mi_tagged_segment_t ts = mi_atomic_load_relaxed(&abandoned);
|
||||
if (mi_tagged_segment_ptr(ts)==NULL) {
|
||||
uintptr_t count = mi_atomic_load_relaxed(&abandoned_visited_count);
|
||||
afirst = mi_tagged_segment(first, ts);
|
||||
if (mi_atomic_cas_strong_acq_rel(&abandoned, &ts, afirst)) return true;
|
||||
if (mi_atomic_cas_strong_acq_rel(&abandoned, &ts, afirst)) {
|
||||
mi_atomic_add_relaxed(&abandoned_count, count);
|
||||
mi_atomic_sub_relaxed(&abandoned_visited_count, count);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// find the last element of the visited list: O(n)
|
||||
@ -942,10 +952,14 @@ static bool mi_abandoned_visited_revisit(void)
|
||||
// and atomically prepend to the abandoned list
|
||||
// (no need to increase the readers as we don't access the abandoned segments)
|
||||
mi_tagged_segment_t anext = mi_atomic_load_relaxed(&abandoned);
|
||||
uintptr_t count;
|
||||
do {
|
||||
count = mi_atomic_load_relaxed(&abandoned_visited_count);
|
||||
mi_atomic_store_ptr_release(mi_segment_t, &last->abandoned_next, mi_tagged_segment_ptr(anext));
|
||||
afirst = mi_tagged_segment(first, anext);
|
||||
} while (!mi_atomic_cas_weak_release(&abandoned, &anext, afirst));
|
||||
mi_atomic_add_relaxed(&abandoned_count, count);
|
||||
mi_atomic_sub_relaxed(&abandoned_visited_count, count);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -961,6 +975,7 @@ static void mi_abandoned_push(mi_segment_t* segment) {
|
||||
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, mi_tagged_segment_ptr(ts));
|
||||
next = mi_tagged_segment(segment, ts);
|
||||
} while (!mi_atomic_cas_weak_release(&abandoned, &ts, next));
|
||||
mi_atomic_increment_relaxed(&abandoned_count);
|
||||
}
|
||||
|
||||
// Wait until there are no more pending reads on segments that used to be in the abandoned list
|
||||
@ -1002,6 +1017,7 @@ static mi_segment_t* mi_abandoned_pop(void) {
|
||||
mi_atomic_decrement_relaxed(&abandoned_readers); // release reader lock
|
||||
if (segment != NULL) {
|
||||
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL);
|
||||
mi_atomic_decrement_relaxed(&abandoned_count);
|
||||
}
|
||||
return segment;
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ static void mi_stat_add(mi_stat_count_t* stat, const mi_stat_count_t* src, int64
|
||||
mi_atomic_addi64_relaxed( &stat->allocated, src->allocated * unit);
|
||||
mi_atomic_addi64_relaxed( &stat->current, src->current * unit);
|
||||
mi_atomic_addi64_relaxed( &stat->freed, src->freed * unit);
|
||||
// peak scores do not work across threads..
|
||||
// peak scores do not work across threads..
|
||||
mi_atomic_addi64_relaxed( &stat->peak, src->peak * unit);
|
||||
}
|
||||
|
||||
|
@ -272,7 +272,7 @@ static void run_os_threads(size_t nthreads, void (*fun)(intptr_t)) {
|
||||
DWORD* tids = (DWORD*)custom_calloc(nthreads,sizeof(DWORD));
|
||||
HANDLE* thandles = (HANDLE*)custom_calloc(nthreads,sizeof(HANDLE));
|
||||
for (uintptr_t i = 0; i < nthreads; i++) {
|
||||
thandles[i] = CreateThread(0, 4096, &thread_entry, (void*)(i), 0, &tids[i]);
|
||||
thandles[i] = CreateThread(0, 8*1024, &thread_entry, (void*)(i), 0, &tids[i]);
|
||||
}
|
||||
for (size_t i = 0; i < nthreads; i++) {
|
||||
WaitForSingleObject(thandles[i], INFINITE);
|
||||
|
Loading…
x
Reference in New Issue
Block a user