mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-01-14 00:27:59 +08:00
implement arena exclusive heap allocation for dev
This commit is contained in:
parent
4fc597d4f4
commit
461df1e878
@ -281,7 +281,7 @@ mi_decl_export int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node,
|
||||
mi_decl_export int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
|
||||
mi_decl_export bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
|
||||
|
||||
#if MI_MALLOC_VERSION >= 200
|
||||
#if MI_MALLOC_VERSION >= 182
|
||||
// Create a heap that only allocates in the specified arena
|
||||
mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id);
|
||||
#endif
|
||||
|
@ -179,6 +179,7 @@ void _mi_heap_destroy_pages(mi_heap_t* heap);
|
||||
void _mi_heap_collect_abandon(mi_heap_t* heap);
|
||||
void _mi_heap_set_default_direct(mi_heap_t* heap);
|
||||
void _mi_heap_destroy_all(void);
|
||||
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, size_t memid);
|
||||
|
||||
// "stats.c"
|
||||
void _mi_stats_done(mi_stats_t* stats);
|
||||
|
@ -410,6 +410,7 @@ struct mi_heap_s {
|
||||
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
|
||||
_Atomic(mi_block_t*) thread_delayed_free;
|
||||
mi_threadid_t thread_id; // thread this heap belongs too
|
||||
mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0)
|
||||
uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`)
|
||||
uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list
|
||||
mi_random_ctx_t random; // random number context used for secure allocation
|
||||
|
15
src/heap.c
15
src/heap.c
@ -198,15 +198,16 @@ mi_heap_t* mi_heap_get_backing(void) {
|
||||
return bheap;
|
||||
}
|
||||
|
||||
mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {
|
||||
mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) {
|
||||
mi_heap_t* bheap = mi_heap_get_backing();
|
||||
mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode?
|
||||
if (heap==NULL) return NULL;
|
||||
if (heap == NULL) return NULL;
|
||||
_mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t));
|
||||
heap->tld = bheap->tld;
|
||||
heap->thread_id = _mi_thread_id();
|
||||
heap->arena_id = arena_id;
|
||||
_mi_random_split(&bheap->random, &heap->random);
|
||||
heap->cookie = _mi_heap_random_next(heap) | 1;
|
||||
heap->cookie = _mi_heap_random_next(heap) | 1;
|
||||
heap->keys[0] = _mi_heap_random_next(heap);
|
||||
heap->keys[1] = _mi_heap_random_next(heap);
|
||||
heap->no_reclaim = true; // don't reclaim abandoned pages or otherwise destroy is unsafe
|
||||
@ -216,6 +217,14 @@ mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {
|
||||
return heap;
|
||||
}
|
||||
|
||||
mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {
|
||||
return mi_heap_new_in_arena(_mi_arena_id_none());
|
||||
}
|
||||
|
||||
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, size_t memid) {
|
||||
return _mi_arena_memid_is_suitable(memid, heap->arena_id);
|
||||
}
|
||||
|
||||
uintptr_t _mi_heap_random_next(mi_heap_t* heap) {
|
||||
return _mi_random_next(&heap->random);
|
||||
}
|
||||
|
@ -96,6 +96,7 @@ mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
|
||||
MI_ATOMIC_VAR_INIT(NULL),
|
||||
0, // tid
|
||||
0, // cookie
|
||||
0, // arena id
|
||||
{ 0, 0 }, // keys
|
||||
{ {0}, {0}, 0, true }, // random
|
||||
0, // page count
|
||||
@ -132,6 +133,7 @@ mi_heap_t _mi_heap_main = {
|
||||
MI_ATOMIC_VAR_INIT(NULL),
|
||||
0, // thread id
|
||||
0, // initial cookie
|
||||
0, // arena id
|
||||
{ 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!)
|
||||
{ {0x846ca68b}, {0}, 0, true }, // random
|
||||
0, // page count
|
||||
|
@ -54,9 +54,11 @@ static bool mi_segment_queue_contains(const mi_segment_queue_t* queue, const mi_
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
static bool mi_segment_queue_is_empty(const mi_segment_queue_t* queue) {
|
||||
return (queue->first == NULL);
|
||||
}
|
||||
*/
|
||||
|
||||
static void mi_segment_queue_remove(mi_segment_queue_t* queue, mi_segment_t* segment) {
|
||||
mi_assert_expensive(mi_segment_queue_contains(queue, segment));
|
||||
@ -500,7 +502,8 @@ void _mi_segment_thread_collect(mi_segments_tld_t* tld) {
|
||||
Segment allocation
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static mi_segment_t* mi_segment_os_alloc(bool eager_delayed, size_t page_alignment, size_t pre_size, size_t info_size,
|
||||
static mi_segment_t* mi_segment_os_alloc(bool eager_delayed, size_t page_alignment, mi_arena_id_t req_arena_id,
|
||||
size_t pre_size, size_t info_size,
|
||||
size_t* segment_size, bool* is_zero, bool* commit, mi_segments_tld_t* tld, mi_os_tld_t* tld_os)
|
||||
{
|
||||
size_t memid;
|
||||
@ -515,7 +518,7 @@ static mi_segment_t* mi_segment_os_alloc(bool eager_delayed, size_t page_alignme
|
||||
}
|
||||
|
||||
// mi_segment_t* segment = (mi_segment_t*)_mi_mem_alloc_aligned(*segment_size, alignment, align_offset, commit, &mem_large, &is_pinned, is_zero, &memid, tld_os);
|
||||
mi_segment_t* segment = (mi_segment_t*)_mi_arena_alloc_aligned(*segment_size, alignment, align_offset, commit, &mem_large, &is_pinned, is_zero, _mi_arena_id_none(), &memid, tld_os);
|
||||
mi_segment_t* segment = (mi_segment_t*)_mi_arena_alloc_aligned(*segment_size, alignment, align_offset, commit, &mem_large, &is_pinned, is_zero, req_arena_id, &memid, tld_os);
|
||||
if (segment == NULL) return NULL; // failed to allocate
|
||||
if (!(*commit)) {
|
||||
// ensure the initial info is committed
|
||||
@ -541,7 +544,8 @@ static mi_segment_t* mi_segment_os_alloc(bool eager_delayed, size_t page_alignme
|
||||
}
|
||||
|
||||
// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
|
||||
static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind, size_t page_shift, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
|
||||
static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind, size_t page_shift, size_t page_alignment,
|
||||
mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
|
||||
{
|
||||
// required is only > 0 for huge page allocations
|
||||
mi_assert_internal((required > 0 && page_kind > MI_PAGE_LARGE)|| (required==0 && page_kind <= MI_PAGE_LARGE));
|
||||
@ -574,7 +578,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
|
||||
bool is_zero = false;
|
||||
|
||||
// Allocate the segment from the OS (segment_size can change due to alignment)
|
||||
mi_segment_t* segment = mi_segment_os_alloc(eager_delayed, page_alignment, pre_size, info_size, &segment_size, &is_zero, &commit, tld, os_tld);
|
||||
mi_segment_t* segment = mi_segment_os_alloc(eager_delayed, page_alignment, req_arena_id, pre_size, info_size, &segment_size, &is_zero, &commit, tld, os_tld);
|
||||
if (segment == NULL) return NULL;
|
||||
mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);
|
||||
mi_assert_internal(segment->mem_is_pinned ? segment->mem_is_committed : true);
|
||||
@ -1094,6 +1098,9 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size,
|
||||
long max_tries = mi_option_get_clamp(mi_option_max_segment_reclaim, 8, 1024); // limit the work to bound allocation times
|
||||
while ((max_tries-- > 0) && ((segment = mi_abandoned_pop()) != NULL)) {
|
||||
segment->abandoned_visits++;
|
||||
// todo: an arena exclusive heap will potentially visit many abandoned unsuitable segments
|
||||
// and push them into the visited list and use many tries. Perhaps we can skip non-suitable ones in a better way?
|
||||
bool is_suitable = _mi_heap_memid_is_suitable(heap, segment->memid);
|
||||
bool all_pages_free;
|
||||
bool has_page = mi_segment_check_free(segment,block_size,&all_pages_free); // try to free up pages (due to concurrent frees)
|
||||
if (all_pages_free) {
|
||||
@ -1104,18 +1111,19 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size,
|
||||
// freeing but that would violate some invariants temporarily)
|
||||
mi_segment_reclaim(segment, heap, 0, NULL, tld);
|
||||
}
|
||||
else if (has_page && segment->page_kind == page_kind) {
|
||||
else if (has_page && segment->page_kind == page_kind && is_suitable) {
|
||||
// found a free page of the right kind, or page of the right block_size with free space
|
||||
// we return the result of reclaim (which is usually `segment`) as it might free
|
||||
// the segment due to concurrent frees (in which case `NULL` is returned).
|
||||
return mi_segment_reclaim(segment, heap, block_size, reclaimed, tld);
|
||||
}
|
||||
else if (segment->abandoned_visits >= 3) {
|
||||
else if (segment->abandoned_visits >= 3 && is_suitable) {
|
||||
// always reclaim on 3rd visit to limit the list length.
|
||||
mi_segment_reclaim(segment, heap, 0, NULL, tld);
|
||||
}
|
||||
else {
|
||||
// otherwise, push on the visited list so it gets not looked at too quickly again
|
||||
// todo: reset delayed pages in the segment?
|
||||
mi_abandoned_visited_push(segment);
|
||||
}
|
||||
}
|
||||
@ -1135,6 +1143,7 @@ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t block_s
|
||||
// 1. try to reclaim an abandoned segment
|
||||
bool reclaimed;
|
||||
mi_segment_t* segment = mi_segment_try_reclaim(heap, block_size, page_kind, &reclaimed, tld);
|
||||
mi_assert_internal(segment == NULL || _mi_arena_memid_is_suitable(segment->memid, heap->arena_id));
|
||||
if (reclaimed) {
|
||||
// reclaimed the right page right into the heap
|
||||
mi_assert_internal(segment != NULL && segment->page_kind == page_kind && page_kind <= MI_PAGE_LARGE);
|
||||
@ -1145,7 +1154,7 @@ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t block_s
|
||||
return segment;
|
||||
}
|
||||
// 2. otherwise allocate a fresh segment
|
||||
return mi_segment_alloc(0, page_kind, page_shift, 0, tld, os_tld);
|
||||
return mi_segment_alloc(0, page_kind, page_shift, 0, heap->arena_id, tld, os_tld);
|
||||
}
|
||||
|
||||
|
||||
@ -1155,7 +1164,7 @@ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t block_s
|
||||
|
||||
static mi_page_t* mi_segment_find_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||
mi_assert_internal(mi_segment_has_free(segment));
|
||||
mi_assert_expensive(mi_segment_is_valid(segment, tld));
|
||||
mi_assert_expensive(mi_segment_is_valid(segment, tld));
|
||||
for (size_t i = 0; i < segment->capacity; i++) { // TODO: use a bitmap instead of search?
|
||||
mi_page_t* page = &segment->pages[i];
|
||||
if (!page->segment_in_use) {
|
||||
@ -1173,24 +1182,34 @@ static mi_page_t* mi_segment_page_alloc_in(mi_segment_t* segment, mi_segments_tl
|
||||
return mi_segment_find_free(segment, tld);
|
||||
}
|
||||
|
||||
static mi_page_t* mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, mi_page_kind_t kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
|
||||
static mi_page_t* mi_segment_page_try_alloc_in_queue(mi_heap_t* heap, mi_page_kind_t kind, mi_segments_tld_t* tld) {
|
||||
// find an available segment the segment free queue
|
||||
mi_segment_queue_t* const free_queue = mi_segment_free_queue_of_kind(kind, tld);
|
||||
if (mi_segment_queue_is_empty(free_queue)) {
|
||||
for (mi_segment_t* segment = free_queue->first; segment != NULL; segment = segment->next) {
|
||||
if (_mi_arena_memid_is_suitable(segment->memid, heap->arena_id) && mi_segment_has_free(segment)) {
|
||||
return mi_segment_page_alloc_in(segment, tld);
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static mi_page_t* mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, mi_page_kind_t kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
|
||||
mi_page_t* page = mi_segment_page_try_alloc_in_queue(heap, kind, tld);
|
||||
if (page == NULL) {
|
||||
// possibly allocate or reclaim a fresh segment
|
||||
mi_segment_t* const segment = mi_segment_reclaim_or_alloc(heap, block_size, kind, page_shift, tld, os_tld);
|
||||
if (segment == NULL) return NULL; // return NULL if out-of-memory (or reclaimed)
|
||||
mi_assert_internal(free_queue->first == segment);
|
||||
mi_assert_internal(segment->page_kind==kind);
|
||||
mi_assert_internal(segment->used < segment->capacity);
|
||||
mi_assert_internal(_mi_arena_memid_is_suitable(segment->memid, heap->arena_id));
|
||||
page = mi_segment_page_try_alloc_in_queue(heap, kind, tld); // this should now succeed
|
||||
}
|
||||
mi_assert_internal(free_queue->first != NULL);
|
||||
mi_page_t* const page = mi_segment_page_alloc_in(free_queue->first, tld);
|
||||
mi_assert_internal(page != NULL);
|
||||
#if MI_DEBUG>=2 && !MI_TRACK_ENABLED // && !MI_TSAN
|
||||
#if MI_DEBUG>=2 && !MI_TRACK_ENABLED // && !MI_TSAN
|
||||
// verify it is committed
|
||||
_mi_segment_page_start(_mi_page_segment(page), page, sizeof(void*), NULL, NULL)[0] = 0;
|
||||
#endif
|
||||
#endif
|
||||
return page;
|
||||
}
|
||||
|
||||
@ -1217,9 +1236,9 @@ static mi_page_t* mi_segment_large_page_alloc(mi_heap_t* heap, size_t block_size
|
||||
return page;
|
||||
}
|
||||
|
||||
static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
|
||||
static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
|
||||
{
|
||||
mi_segment_t* segment = mi_segment_alloc(size, MI_PAGE_HUGE, MI_SEGMENT_SHIFT + 1, page_alignment, tld, os_tld);
|
||||
mi_segment_t* segment = mi_segment_alloc(size, MI_PAGE_HUGE, MI_SEGMENT_SHIFT + 1, page_alignment, req_arena_id, tld, os_tld);
|
||||
if (segment == NULL) return NULL;
|
||||
mi_assert_internal(mi_segment_page_size(segment) - segment->segment_info_size - (2*(MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= size);
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
@ -1303,7 +1322,7 @@ mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t pag
|
||||
mi_assert_internal(page_alignment >= MI_SEGMENT_SIZE);
|
||||
//mi_assert_internal((MI_SEGMENT_SIZE % page_alignment) == 0);
|
||||
if (page_alignment < MI_SEGMENT_SIZE) { page_alignment = MI_SEGMENT_SIZE; }
|
||||
page = mi_segment_huge_page_alloc(block_size, page_alignment, tld, os_tld);
|
||||
page = mi_segment_huge_page_alloc(block_size, page_alignment, heap->arena_id, tld, os_tld);
|
||||
}
|
||||
else if (block_size <= MI_SMALL_OBJ_SIZE_MAX) {
|
||||
page = mi_segment_small_page_alloc(heap, block_size, tld, os_tld);
|
||||
@ -1315,7 +1334,7 @@ mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t pag
|
||||
page = mi_segment_large_page_alloc(heap, block_size, tld, os_tld);
|
||||
}
|
||||
else {
|
||||
page = mi_segment_huge_page_alloc(block_size, page_alignment, tld, os_tld);
|
||||
page = mi_segment_huge_page_alloc(block_size, page_alignment, heap->arena_id, tld, os_tld);
|
||||
}
|
||||
mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld));
|
||||
mi_assert_internal(page == NULL || (mi_segment_page_size(_mi_page_segment(page)) - (MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= block_size);
|
||||
|
Loading…
x
Reference in New Issue
Block a user