mirror of
https://github.com/microsoft/mimalloc.git
synced 2024-12-26 21:04:27 +08:00
cleanup
This commit is contained in:
parent
d48b988c40
commit
a655c28b66
@ -88,8 +88,10 @@ void _mi_thread_data_collect(void);
|
||||
|
||||
// os.c
|
||||
void _mi_os_init(void); // called from process init
|
||||
void* _mi_os_alloc(size_t size, bool* is_zero, mi_stats_t* stats); // to allocate thread local data
|
||||
void _mi_os_free(void* p, size_t size, mi_stats_t* stats); // to free thread local data
|
||||
void* _mi_os_alloc(size_t size, bool* is_zero, mi_stats_t* stats);
|
||||
void _mi_os_free(void* p, size_t size, mi_stats_t* stats);
|
||||
void _mi_os_free_ex(void* p, size_t size, bool is_committed, mi_stats_t* stats);
|
||||
|
||||
size_t _mi_os_page_size(void);
|
||||
size_t _mi_os_good_alloc_size(size_t size);
|
||||
bool _mi_os_has_overcommit(void);
|
||||
@ -106,13 +108,13 @@ bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* s
|
||||
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, mi_stats_t* stats);
|
||||
void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, bool* is_large, bool* is_zero, mi_stats_t* tld_stats);
|
||||
void _mi_os_free_aligned_at_offset(void* p, size_t size, size_t alignment, size_t align_offset, bool was_committed, mi_stats_t* tld_stats);
|
||||
|
||||
void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size);
|
||||
bool _mi_os_use_large_page(size_t size, size_t alignment);
|
||||
size_t _mi_os_large_page_size(void);
|
||||
|
||||
void _mi_os_free_ex(void* p, size_t size, bool was_committed, mi_stats_t* stats);
|
||||
void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, bool* is_zero);
|
||||
void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats);
|
||||
void _mi_os_free_huge_os_pages(void* p, size_t size, mi_stats_t* stats);
|
||||
|
||||
// arena.c
|
||||
mi_arena_id_t _mi_arena_id_none(void);
|
||||
@ -838,6 +840,8 @@ static inline void _mi_memzero(void* dst, size_t n) {
|
||||
}
|
||||
#endif
|
||||
|
||||
// initialize a local variable to zero; use memset as compilers optimize constant sized memset's
|
||||
#define _mi_memzero_var(x) memset(&x,0,sizeof(x))
|
||||
|
||||
// -------------------------------------------------------------------------------
|
||||
// The `_mi_memcpy_aligned` can be used if the pointers are machine-word aligned
|
||||
|
28
src/arena.c
28
src/arena.c
@ -96,22 +96,19 @@ static bool mi_arena_id_is_suitable(mi_arena_id_t arena_id, bool arena_is_exclus
|
||||
memory id's
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static mi_memid_t mi_memid_none(void) {
|
||||
mi_memid_t memid;
|
||||
_mi_memzero(&memid, sizeof(memid));
|
||||
memid.memkind = MI_MEM_NONE;
|
||||
return memid;
|
||||
}
|
||||
|
||||
static mi_memid_t mi_memid_create(mi_memkind_t memkind) {
|
||||
mi_memid_t memid = mi_memid_none();
|
||||
mi_memid_t memid;
|
||||
_mi_memzero_var(memid);
|
||||
memid.memkind = memkind;
|
||||
return memid;
|
||||
}
|
||||
|
||||
static mi_memid_t mi_memid_none(void) {
|
||||
return mi_memid_create(MI_MEM_NONE);
|
||||
}
|
||||
|
||||
static mi_memid_t mi_memid_create_os(bool committed) {
|
||||
mi_memid_t memid = mi_memid_none();
|
||||
memid.memkind = MI_MEM_OS;
|
||||
mi_memid_t memid = mi_memid_create(MI_MEM_OS);
|
||||
memid.was_committed = committed;
|
||||
return memid;
|
||||
}
|
||||
@ -163,11 +160,10 @@ static bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bit
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Special static area for mimalloc internal structures
|
||||
to avoid OS calls (for example, for the arena and thread
|
||||
metadata)
|
||||
to avoid OS calls (for example, for the arena metadata)
|
||||
----------------------------------------------------------- */
|
||||
|
||||
#define MI_ARENA_STATIC_MAX (MI_INTPTR_SIZE*8*MI_KiB) // 64 KiB on 64-bit
|
||||
#define MI_ARENA_STATIC_MAX (MI_INTPTR_SIZE*MI_KiB) // 8 KiB on 64-bit
|
||||
|
||||
static uint8_t mi_arena_static[MI_ARENA_STATIC_MAX];
|
||||
static _Atomic(size_t) mi_arena_static_top;
|
||||
@ -209,7 +205,7 @@ static void* mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* st
|
||||
p = _mi_os_alloc(size, &is_zero, stats);
|
||||
if (p != NULL) {
|
||||
*memid = mi_memid_create_os(true);
|
||||
if (!is_zero) { _mi_memzero(p, size); }
|
||||
if (!is_zero) { _mi_memzero_aligned(p, size); }
|
||||
return p;
|
||||
}
|
||||
|
||||
@ -724,7 +720,7 @@ static void mi_arenas_unsafe_destroy(void) {
|
||||
if (arena->owned && arena->start != NULL) {
|
||||
mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL);
|
||||
if (arena->is_huge_alloc) {
|
||||
_mi_os_free_huge_pages(arena->start, mi_arena_size(arena), &_mi_stats_main);
|
||||
_mi_os_free_huge_os_pages(arena->start, mi_arena_size(arena), &_mi_stats_main);
|
||||
}
|
||||
else {
|
||||
_mi_os_free(arena->start, mi_arena_size(arena), &_mi_stats_main);
|
||||
@ -938,7 +934,7 @@ int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_m
|
||||
_mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages);
|
||||
|
||||
if (!mi_manage_os_memory_ex2(p, hsize, true, true, true, is_zero, numa_node, exclusive, true /* owned */, arena_id)) {
|
||||
_mi_os_free_huge_pages(p, hsize, &_mi_stats_main);
|
||||
_mi_os_free_huge_os_pages(p, hsize, &_mi_stats_main);
|
||||
return ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
|
@ -217,7 +217,7 @@ static mi_thread_data_t* mi_thread_data_zalloc(void) {
|
||||
}
|
||||
|
||||
if (td != NULL && !is_zero) {
|
||||
_mi_memzero(td, sizeof(*td));
|
||||
_mi_memzero_aligned(td, sizeof(*td));
|
||||
}
|
||||
return td;
|
||||
}
|
||||
|
2
src/os.c
2
src/os.c
@ -599,7 +599,7 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse
|
||||
|
||||
// free every huge page in a range individually (as we allocated per page)
|
||||
// note: needed with VirtualAlloc but could potentially be done in one go on mmap'd systems.
|
||||
void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats) {
|
||||
void _mi_os_free_huge_os_pages(void* p, size_t size, mi_stats_t* stats) {
|
||||
if (p==NULL || size==0) return;
|
||||
uint8_t* base = (uint8_t*)p;
|
||||
while (size >= MI_HUGE_OS_PAGE_SIZE) {
|
||||
|
@ -444,7 +444,7 @@ mi_msecs_t _mi_clock_end(mi_msecs_t start) {
|
||||
mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults) mi_attr_noexcept
|
||||
{
|
||||
mi_process_info_t pinfo;
|
||||
_mi_memzero(&pinfo,sizeof(pinfo));
|
||||
_mi_memzero_var(pinfo);
|
||||
pinfo.elapsed = _mi_clock_end(mi_process_start);
|
||||
pinfo.current_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.current));
|
||||
pinfo.peak_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.peak));
|
||||
|
Loading…
x
Reference in New Issue
Block a user