mirror of
https://github.com/microsoft/mimalloc.git
synced 2024-12-27 13:33:18 +08:00
initial api for heaps restricted to a certain arena
This commit is contained in:
parent
e961ef705e
commit
9f36808a7f
@ -93,9 +93,10 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit,
|
|||||||
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
|
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
|
||||||
void _mi_arena_free(void* p, size_t size, size_t memid, bool is_committed, mi_os_tld_t* tld);
|
void _mi_arena_free(void* p, size_t size, size_t memid, bool is_committed, mi_os_tld_t* tld);
|
||||||
mi_arena_id_t _mi_arena_id_none(void);
|
mi_arena_id_t _mi_arena_id_none(void);
|
||||||
|
bool _mi_arena_memid_is_suitable(size_t memid, mi_arena_id_t req_arena_id);
|
||||||
|
|
||||||
// "segment-cache.c"
|
// "segment-cache.c"
|
||||||
void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
|
void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
|
||||||
bool _mi_segment_cache_push(void* start, size_t size, size_t memid, const mi_commit_mask_t* commit_mask, const mi_commit_mask_t* decommit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld);
|
bool _mi_segment_cache_push(void* start, size_t size, size_t memid, const mi_commit_mask_t* commit_mask, const mi_commit_mask_t* decommit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld);
|
||||||
void _mi_segment_cache_collect(bool force, mi_os_tld_t* tld);
|
void _mi_segment_cache_collect(bool force, mi_os_tld_t* tld);
|
||||||
void _mi_segment_map_allocated_at(const mi_segment_t* segment);
|
void _mi_segment_map_allocated_at(const mi_segment_t* segment);
|
||||||
@ -142,6 +143,7 @@ uint8_t _mi_bin(size_t size); // for stats
|
|||||||
void _mi_heap_destroy_pages(mi_heap_t* heap);
|
void _mi_heap_destroy_pages(mi_heap_t* heap);
|
||||||
void _mi_heap_collect_abandon(mi_heap_t* heap);
|
void _mi_heap_collect_abandon(mi_heap_t* heap);
|
||||||
void _mi_heap_set_default_direct(mi_heap_t* heap);
|
void _mi_heap_set_default_direct(mi_heap_t* heap);
|
||||||
|
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, size_t memid);
|
||||||
|
|
||||||
// "stats.c"
|
// "stats.c"
|
||||||
void _mi_stats_done(mi_stats_t* stats);
|
void _mi_stats_done(mi_stats_t* stats);
|
||||||
|
@ -438,6 +438,7 @@ struct mi_heap_s {
|
|||||||
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
|
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
|
||||||
_Atomic(mi_block_t*) thread_delayed_free;
|
_Atomic(mi_block_t*) thread_delayed_free;
|
||||||
mi_threadid_t thread_id; // thread this heap belongs too
|
mi_threadid_t thread_id; // thread this heap belongs too
|
||||||
|
mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0)
|
||||||
uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`)
|
uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`)
|
||||||
uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list
|
uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list
|
||||||
mi_random_ctx_t random; // random number context used for secure allocation
|
mi_random_ctx_t random; // random number context used for secure allocation
|
||||||
|
@ -287,7 +287,7 @@ mi_decl_export int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow
|
|||||||
mi_decl_export bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
|
mi_decl_export bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
|
||||||
|
|
||||||
#if MI_MALLOC_VERSION >= 200
|
#if MI_MALLOC_VERSION >= 200
|
||||||
mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id, bool exclusive);
|
mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// deprecated
|
// deprecated
|
||||||
|
16
src/arena.c
16
src/arena.c
@ -97,8 +97,9 @@ mi_arena_id_t _mi_arena_id_none(void) {
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool mi_arena_id_suitable(mi_arena_id_t arena_id, bool exclusive, mi_arena_id_t req_arena_id) {
|
static bool mi_arena_id_is_suitable(mi_arena_id_t arena_id, bool arena_is_exclusive, mi_arena_id_t req_arena_id) {
|
||||||
return (!exclusive || arena_id == req_arena_id);
|
return ((!arena_is_exclusive && req_arena_id == _mi_arena_id_none()) ||
|
||||||
|
(arena_id == req_arena_id));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -117,18 +118,16 @@ static size_t mi_arena_memid_create(mi_arena_id_t id, bool exclusive, mi_bitmap_
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool mi_arena_memid_indices(size_t arena_memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) {
|
static bool mi_arena_memid_indices(size_t arena_memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) {
|
||||||
mi_assert_internal(arena_memid != MI_MEMID_OS);
|
|
||||||
*bitmap_index = (arena_memid >> 8);
|
*bitmap_index = (arena_memid >> 8);
|
||||||
mi_arena_id_t id = (int)(arena_memid & 0x7F);
|
mi_arena_id_t id = (int)(arena_memid & 0x7F);
|
||||||
*arena_index = mi_arena_id_index(id);
|
*arena_index = mi_arena_id_index(id);
|
||||||
return ((arena_memid & 0x80) != 0);
|
return ((arena_memid & 0x80) != 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool _mi_arena_memid_suitable(size_t arena_memid, mi_arena_id_t request_arena_id) {
|
bool _mi_arena_memid_is_suitable(size_t arena_memid, mi_arena_id_t request_arena_id) {
|
||||||
mi_assert_internal(arena_memid != MI_MEMID_OS);
|
|
||||||
mi_arena_id_t id = (int)(arena_memid & 0x7F);
|
mi_arena_id_t id = (int)(arena_memid & 0x7F);
|
||||||
bool exclusive = ((arena_memid & 0x80) != 0);
|
bool exclusive = ((arena_memid & 0x80) != 0);
|
||||||
return mi_arena_id_suitable(id, exclusive, request_arena_id);
|
return mi_arena_id_is_suitable(id, exclusive, request_arena_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t mi_block_count_of_size(size_t size) {
|
static size_t mi_block_count_of_size(size_t size) {
|
||||||
@ -159,7 +158,7 @@ static mi_decl_noinline void* mi_arena_alloc_from(mi_arena_t* arena, size_t aren
|
|||||||
{
|
{
|
||||||
MI_UNUSED(arena_index);
|
MI_UNUSED(arena_index);
|
||||||
mi_assert_internal(mi_arena_id_index(arena->id) == arena_index);
|
mi_assert_internal(mi_arena_id_index(arena->id) == arena_index);
|
||||||
if (!mi_arena_id_suitable(arena->id, arena->exclusive, req_arena_id)) return NULL;
|
if (!mi_arena_id_is_suitable(arena->id, arena->exclusive, req_arena_id)) return NULL;
|
||||||
|
|
||||||
mi_bitmap_index_t bitmap_index;
|
mi_bitmap_index_t bitmap_index;
|
||||||
if (!mi_arena_alloc(arena, needed_bcount, &bitmap_index)) return NULL;
|
if (!mi_arena_alloc(arena, needed_bcount, &bitmap_index)) return NULL;
|
||||||
@ -266,7 +265,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool*
|
|||||||
}
|
}
|
||||||
|
|
||||||
// finally, fall back to the OS
|
// finally, fall back to the OS
|
||||||
if (mi_option_is_enabled(mi_option_limit_os_alloc)) {
|
if (mi_option_is_enabled(mi_option_limit_os_alloc) || req_arena_id != _mi_arena_id_none()) {
|
||||||
errno = ENOMEM;
|
errno = ENOMEM;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -282,6 +281,7 @@ void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, b
|
|||||||
return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* -----------------------------------------------------------
|
/* -----------------------------------------------------------
|
||||||
Arena free
|
Arena free
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
19
src/bitmap.c
19
src/bitmap.c
@ -108,6 +108,25 @@ bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fiel
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Like _mi_bitmap_try_find_from_claim but with an extra predicate that must be fullfilled
|
||||||
|
bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields,
|
||||||
|
const size_t start_field_idx, const size_t count,
|
||||||
|
mi_bitmap_pred_fun_t pred_fun, void* pred_arg,
|
||||||
|
mi_bitmap_index_t* bitmap_idx) {
|
||||||
|
size_t idx = start_field_idx;
|
||||||
|
for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) {
|
||||||
|
if (idx >= bitmap_fields) idx = 0; // wrap
|
||||||
|
if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) {
|
||||||
|
if (pred_fun == NULL || pred_fun(*bitmap_idx, pred_arg)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
// predicate returned false, unclaim and look further
|
||||||
|
_mi_bitmap_unclaim(bitmap, bitmap_fields, count, *bitmap_idx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
// Find `count` bits of 0 and set them to 1 atomically; returns `true` on success.
|
// Find `count` bits of 0 and set them to 1 atomically; returns `true` on success.
|
||||||
// For now, `count` can be at most MI_BITMAP_FIELD_BITS and will never span fields.
|
// For now, `count` can be at most MI_BITMAP_FIELD_BITS and will never span fields.
|
||||||
|
@ -72,6 +72,10 @@ bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_
|
|||||||
// For now, `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields.
|
// For now, `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields.
|
||||||
bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx);
|
bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx);
|
||||||
|
|
||||||
|
// Like _mi_bitmap_try_find_from_claim but with an extra predicate that must be fullfilled
|
||||||
|
typedef bool (mi_cdecl *mi_bitmap_pred_fun_t)(mi_bitmap_index_t bitmap_idx, void* pred_arg);
|
||||||
|
bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_pred_fun_t pred_fun, void* pred_arg, mi_bitmap_index_t* bitmap_idx);
|
||||||
|
|
||||||
// Set `count` bits at `bitmap_idx` to 0 atomically
|
// Set `count` bits at `bitmap_idx` to 0 atomically
|
||||||
// Returns `true` if all `count` bits were 1 previously.
|
// Returns `true` if all `count` bits were 1 previously.
|
||||||
bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
|
bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
|
||||||
|
11
src/heap.c
11
src/heap.c
@ -200,13 +200,14 @@ mi_heap_t* mi_heap_get_backing(void) {
|
|||||||
return bheap;
|
return bheap;
|
||||||
}
|
}
|
||||||
|
|
||||||
mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {
|
mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena( mi_arena_id_t arena_id ) {
|
||||||
mi_heap_t* bheap = mi_heap_get_backing();
|
mi_heap_t* bheap = mi_heap_get_backing();
|
||||||
mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode?
|
mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode?
|
||||||
if (heap==NULL) return NULL;
|
if (heap==NULL) return NULL;
|
||||||
_mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t));
|
_mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t));
|
||||||
heap->tld = bheap->tld;
|
heap->tld = bheap->tld;
|
||||||
heap->thread_id = _mi_thread_id();
|
heap->thread_id = _mi_thread_id();
|
||||||
|
heap->arena_id = arena_id;
|
||||||
_mi_random_split(&bheap->random, &heap->random);
|
_mi_random_split(&bheap->random, &heap->random);
|
||||||
heap->cookie = _mi_heap_random_next(heap) | 1;
|
heap->cookie = _mi_heap_random_next(heap) | 1;
|
||||||
heap->keys[0] = _mi_heap_random_next(heap);
|
heap->keys[0] = _mi_heap_random_next(heap);
|
||||||
@ -218,6 +219,14 @@ mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {
|
|||||||
return heap;
|
return heap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {
|
||||||
|
return mi_heap_new_in_arena(_mi_arena_id_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, size_t memid) {
|
||||||
|
return _mi_arena_memid_is_suitable(memid, heap->arena_id);
|
||||||
|
}
|
||||||
|
|
||||||
uintptr_t _mi_heap_random_next(mi_heap_t* heap) {
|
uintptr_t _mi_heap_random_next(mi_heap_t* heap) {
|
||||||
return _mi_random_next(&heap->random);
|
return _mi_random_next(&heap->random);
|
||||||
}
|
}
|
||||||
|
@ -109,6 +109,7 @@ mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
|
|||||||
MI_ATOMIC_VAR_INIT(NULL),
|
MI_ATOMIC_VAR_INIT(NULL),
|
||||||
0, // tid
|
0, // tid
|
||||||
0, // cookie
|
0, // cookie
|
||||||
|
0, // arena id
|
||||||
{ 0, 0 }, // keys
|
{ 0, 0 }, // keys
|
||||||
{ {0}, {0}, 0 },
|
{ {0}, {0}, 0 },
|
||||||
0, // page count
|
0, // page count
|
||||||
@ -149,6 +150,7 @@ mi_heap_t _mi_heap_main = {
|
|||||||
MI_ATOMIC_VAR_INIT(NULL),
|
MI_ATOMIC_VAR_INIT(NULL),
|
||||||
0, // thread id
|
0, // thread id
|
||||||
0, // initial cookie
|
0, // initial cookie
|
||||||
|
0, // arena id
|
||||||
{ 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!)
|
{ 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!)
|
||||||
{ {0x846ca68b}, {0}, 0 }, // random
|
{ {0x846ca68b}, {0}, 0 }, // random
|
||||||
0, // page count
|
0, // page count
|
||||||
|
@ -39,8 +39,13 @@ static mi_decl_cache_align mi_bitmap_field_t cache_available[MI_CACHE_FIELDS] =
|
|||||||
static mi_decl_cache_align mi_bitmap_field_t cache_available_large[MI_CACHE_FIELDS] = { MI_CACHE_BITS_SET };
|
static mi_decl_cache_align mi_bitmap_field_t cache_available_large[MI_CACHE_FIELDS] = { MI_CACHE_BITS_SET };
|
||||||
static mi_decl_cache_align mi_bitmap_field_t cache_inuse[MI_CACHE_FIELDS]; // zero bit = free
|
static mi_decl_cache_align mi_bitmap_field_t cache_inuse[MI_CACHE_FIELDS]; // zero bit = free
|
||||||
|
|
||||||
|
static bool mi_cdecl mi_segment_cache_is_suitable(mi_bitmap_index_t bitidx, void* arg) {
|
||||||
|
mi_arena_id_t req_arena_id = *((mi_arena_id_t*)arg);
|
||||||
|
mi_cache_slot_t* slot = &cache[mi_bitmap_index_bit(bitidx)];
|
||||||
|
return _mi_arena_memid_is_suitable(slot->memid, req_arena_id);
|
||||||
|
}
|
||||||
|
|
||||||
mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
|
mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t _req_arena_id, size_t* memid, mi_os_tld_t* tld)
|
||||||
{
|
{
|
||||||
#ifdef MI_CACHE_DISABLE
|
#ifdef MI_CACHE_DISABLE
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -60,12 +65,15 @@ mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* comm
|
|||||||
// find an available slot
|
// find an available slot
|
||||||
mi_bitmap_index_t bitidx = 0;
|
mi_bitmap_index_t bitidx = 0;
|
||||||
bool claimed = false;
|
bool claimed = false;
|
||||||
|
mi_arena_id_t req_arena_id = _req_arena_id;
|
||||||
|
mi_bitmap_pred_fun_t pred_fun = &mi_segment_cache_is_suitable; // cannot pass NULL as the arena may be exclusive itself; todo: do not put exclusive arenas in the cache?
|
||||||
|
|
||||||
if (*large) { // large allowed?
|
if (*large) { // large allowed?
|
||||||
claimed = _mi_bitmap_try_find_from_claim(cache_available_large, MI_CACHE_FIELDS, start_field, 1, &bitidx);
|
claimed = _mi_bitmap_try_find_from_claim_pred(cache_available_large, MI_CACHE_FIELDS, start_field, 1, pred_fun, &req_arena_id, &bitidx);
|
||||||
if (claimed) *large = true;
|
if (claimed) *large = true;
|
||||||
}
|
}
|
||||||
if (!claimed) {
|
if (!claimed) {
|
||||||
claimed = _mi_bitmap_try_find_from_claim(cache_available, MI_CACHE_FIELDS, start_field, 1, &bitidx);
|
claimed = _mi_bitmap_try_find_from_claim_pred (cache_available, MI_CACHE_FIELDS, start_field, 1, pred_fun, &req_arena_id, &bitidx);
|
||||||
if (claimed) *large = false;
|
if (claimed) *large = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -721,7 +721,7 @@ static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_i
|
|||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_segments_tld_t* tld) {
|
static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld) {
|
||||||
mi_assert_internal(slice_count*MI_SEGMENT_SLICE_SIZE <= MI_LARGE_OBJ_SIZE_MAX);
|
mi_assert_internal(slice_count*MI_SEGMENT_SLICE_SIZE <= MI_LARGE_OBJ_SIZE_MAX);
|
||||||
// search from best fit up
|
// search from best fit up
|
||||||
mi_span_queue_t* sq = mi_span_queue_for(slice_count, tld);
|
mi_span_queue_t* sq = mi_span_queue_for(slice_count, tld);
|
||||||
@ -730,19 +730,23 @@ static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_segm
|
|||||||
for (mi_slice_t* slice = sq->first; slice != NULL; slice = slice->next) {
|
for (mi_slice_t* slice = sq->first; slice != NULL; slice = slice->next) {
|
||||||
if (slice->slice_count >= slice_count) {
|
if (slice->slice_count >= slice_count) {
|
||||||
// found one
|
// found one
|
||||||
mi_span_queue_delete(sq, slice);
|
|
||||||
mi_segment_t* segment = _mi_ptr_segment(slice);
|
mi_segment_t* segment = _mi_ptr_segment(slice);
|
||||||
if (slice->slice_count > slice_count) {
|
if (_mi_arena_memid_is_suitable(segment->memid, req_arena_id)) {
|
||||||
mi_segment_slice_split(segment, slice, slice_count, tld);
|
// found a suitable page span
|
||||||
|
mi_span_queue_delete(sq, slice);
|
||||||
|
|
||||||
|
if (slice->slice_count > slice_count) {
|
||||||
|
mi_segment_slice_split(segment, slice, slice_count, tld);
|
||||||
|
}
|
||||||
|
mi_assert_internal(slice != NULL && slice->slice_count == slice_count && slice->xblock_size > 0);
|
||||||
|
mi_page_t* page = mi_segment_span_allocate(segment, mi_slice_index(slice), slice->slice_count, tld);
|
||||||
|
if (page == NULL) {
|
||||||
|
// commit failed; return NULL but first restore the slice
|
||||||
|
mi_segment_span_free_coalesce(slice, tld);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
return page;
|
||||||
}
|
}
|
||||||
mi_assert_internal(slice != NULL && slice->slice_count == slice_count && slice->xblock_size > 0);
|
|
||||||
mi_page_t* page = mi_segment_span_allocate(segment, mi_slice_index(slice), slice->slice_count, tld);
|
|
||||||
if (page == NULL) {
|
|
||||||
// commit failed; return NULL but first restore the slice
|
|
||||||
mi_segment_span_free_coalesce(slice, tld);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
return page;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sq++;
|
sq++;
|
||||||
@ -757,7 +761,7 @@ static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_segm
|
|||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
|
||||||
// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
|
// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
|
||||||
static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page)
|
static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page)
|
||||||
{
|
{
|
||||||
mi_assert_internal((required==0 && huge_page==NULL) || (required>0 && huge_page != NULL));
|
mi_assert_internal((required==0 && huge_page==NULL) || (required>0 && huge_page != NULL));
|
||||||
mi_assert_internal((segment==NULL) || (segment!=NULL && required==0));
|
mi_assert_internal((segment==NULL) || (segment!=NULL && required==0));
|
||||||
@ -793,9 +797,9 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
|
|||||||
bool mem_large = (!eager_delay && (MI_SECURE==0)); // only allow large OS pages once we are no longer lazy
|
bool mem_large = (!eager_delay && (MI_SECURE==0)); // only allow large OS pages once we are no longer lazy
|
||||||
bool is_pinned = false;
|
bool is_pinned = false;
|
||||||
size_t memid = 0;
|
size_t memid = 0;
|
||||||
segment = (mi_segment_t*)_mi_segment_cache_pop(segment_size, &commit_mask, &decommit_mask, &mem_large, &is_pinned, &is_zero, &memid, os_tld);
|
segment = (mi_segment_t*)_mi_segment_cache_pop(segment_size, &commit_mask, &decommit_mask, &mem_large, &is_pinned, &is_zero, req_arena_id, &memid, os_tld);
|
||||||
if (segment==NULL) {
|
if (segment==NULL) {
|
||||||
segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &is_pinned, &is_zero, _mi_arena_id_none(), &memid, os_tld);
|
segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &is_pinned, &is_zero, req_arena_id, &memid, os_tld);
|
||||||
if (segment == NULL) return NULL; // failed to allocate
|
if (segment == NULL) return NULL; // failed to allocate
|
||||||
if (commit) {
|
if (commit) {
|
||||||
mi_commit_mask_create_full(&commit_mask);
|
mi_commit_mask_create_full(&commit_mask);
|
||||||
@ -908,8 +912,8 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
|
|||||||
|
|
||||||
|
|
||||||
// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
|
// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
|
||||||
static mi_segment_t* mi_segment_alloc(size_t required, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page) {
|
static mi_segment_t* mi_segment_alloc(size_t required, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page) {
|
||||||
return mi_segment_init(NULL, required, tld, os_tld, huge_page);
|
return mi_segment_init(NULL, required, req_arena_id, tld, os_tld, huge_page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1368,6 +1372,9 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slice
|
|||||||
long max_tries = mi_option_get_clamp(mi_option_max_segment_reclaim, 8, 1024); // limit the work to bound allocation times
|
long max_tries = mi_option_get_clamp(mi_option_max_segment_reclaim, 8, 1024); // limit the work to bound allocation times
|
||||||
while ((max_tries-- > 0) && ((segment = mi_abandoned_pop()) != NULL)) {
|
while ((max_tries-- > 0) && ((segment = mi_abandoned_pop()) != NULL)) {
|
||||||
segment->abandoned_visits++;
|
segment->abandoned_visits++;
|
||||||
|
// todo: an arena exclusive heap will potentially visit many abandoned unsuitable segments
|
||||||
|
// and push them into the visited list and use many tries. Perhaps we can skip non-suitable ones in a better way?
|
||||||
|
bool is_suitable = _mi_heap_memid_is_suitable(heap, segment->memid);
|
||||||
bool has_page = mi_segment_check_free(segment,needed_slices,block_size,tld); // try to free up pages (due to concurrent frees)
|
bool has_page = mi_segment_check_free(segment,needed_slices,block_size,tld); // try to free up pages (due to concurrent frees)
|
||||||
if (segment->used == 0) {
|
if (segment->used == 0) {
|
||||||
// free the segment (by forced reclaim) to make it available to other threads.
|
// free the segment (by forced reclaim) to make it available to other threads.
|
||||||
@ -1377,13 +1384,13 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slice
|
|||||||
// freeing but that would violate some invariants temporarily)
|
// freeing but that would violate some invariants temporarily)
|
||||||
mi_segment_reclaim(segment, heap, 0, NULL, tld);
|
mi_segment_reclaim(segment, heap, 0, NULL, tld);
|
||||||
}
|
}
|
||||||
else if (has_page) {
|
else if (has_page && is_suitable) {
|
||||||
// found a large enough free span, or a page of the right block_size with free space
|
// found a large enough free span, or a page of the right block_size with free space
|
||||||
// we return the result of reclaim (which is usually `segment`) as it might free
|
// we return the result of reclaim (which is usually `segment`) as it might free
|
||||||
// the segment due to concurrent frees (in which case `NULL` is returned).
|
// the segment due to concurrent frees (in which case `NULL` is returned).
|
||||||
return mi_segment_reclaim(segment, heap, block_size, reclaimed, tld);
|
return mi_segment_reclaim(segment, heap, block_size, reclaimed, tld);
|
||||||
}
|
}
|
||||||
else if (segment->abandoned_visits > 3) {
|
else if (segment->abandoned_visits > 3 && is_suitable) {
|
||||||
// always reclaim on 3rd visit to limit the abandoned queue length.
|
// always reclaim on 3rd visit to limit the abandoned queue length.
|
||||||
mi_segment_reclaim(segment, heap, 0, NULL, tld);
|
mi_segment_reclaim(segment, heap, 0, NULL, tld);
|
||||||
}
|
}
|
||||||
@ -1425,7 +1432,7 @@ void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld)
|
|||||||
Reclaim or allocate
|
Reclaim or allocate
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
|
||||||
static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_slices, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
|
static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_slices, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
|
||||||
{
|
{
|
||||||
mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE);
|
mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE);
|
||||||
mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX);
|
mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX);
|
||||||
@ -1443,7 +1450,7 @@ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_
|
|||||||
return segment;
|
return segment;
|
||||||
}
|
}
|
||||||
// 2. otherwise allocate a fresh segment
|
// 2. otherwise allocate a fresh segment
|
||||||
return mi_segment_alloc(0, tld, os_tld, NULL);
|
return mi_segment_alloc(0, heap->arena_id, tld, os_tld, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1459,7 +1466,7 @@ static mi_page_t* mi_segments_page_alloc(mi_heap_t* heap, mi_page_kind_t page_ki
|
|||||||
size_t page_size = _mi_align_up(required, (required > MI_MEDIUM_PAGE_SIZE ? MI_MEDIUM_PAGE_SIZE : MI_SEGMENT_SLICE_SIZE));
|
size_t page_size = _mi_align_up(required, (required > MI_MEDIUM_PAGE_SIZE ? MI_MEDIUM_PAGE_SIZE : MI_SEGMENT_SLICE_SIZE));
|
||||||
size_t slices_needed = page_size / MI_SEGMENT_SLICE_SIZE;
|
size_t slices_needed = page_size / MI_SEGMENT_SLICE_SIZE;
|
||||||
mi_assert_internal(slices_needed * MI_SEGMENT_SLICE_SIZE == page_size);
|
mi_assert_internal(slices_needed * MI_SEGMENT_SLICE_SIZE == page_size);
|
||||||
mi_page_t* page = mi_segments_page_find_and_allocate(slices_needed, tld); //(required <= MI_SMALL_SIZE_MAX ? 0 : slices_needed), tld);
|
mi_page_t* page = mi_segments_page_find_and_allocate(slices_needed, heap->arena_id, tld); //(required <= MI_SMALL_SIZE_MAX ? 0 : slices_needed), tld);
|
||||||
if (page==NULL) {
|
if (page==NULL) {
|
||||||
// no free page, allocate a new segment and try again
|
// no free page, allocate a new segment and try again
|
||||||
if (mi_segment_reclaim_or_alloc(heap, slices_needed, block_size, tld, os_tld) == NULL) {
|
if (mi_segment_reclaim_or_alloc(heap, slices_needed, block_size, tld, os_tld) == NULL) {
|
||||||
@ -1483,10 +1490,10 @@ static mi_page_t* mi_segments_page_alloc(mi_heap_t* heap, mi_page_kind_t page_ki
|
|||||||
Huge page allocation
|
Huge page allocation
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
|
||||||
static mi_page_t* mi_segment_huge_page_alloc(size_t size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
|
static mi_page_t* mi_segment_huge_page_alloc(size_t size, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
|
||||||
{
|
{
|
||||||
mi_page_t* page = NULL;
|
mi_page_t* page = NULL;
|
||||||
mi_segment_t* segment = mi_segment_alloc(size,tld,os_tld,&page);
|
mi_segment_t* segment = mi_segment_alloc(size,req_arena_id,tld,os_tld,&page);
|
||||||
if (segment == NULL || page==NULL) return NULL;
|
if (segment == NULL || page==NULL) return NULL;
|
||||||
mi_assert_internal(segment->used==1);
|
mi_assert_internal(segment->used==1);
|
||||||
mi_assert_internal(mi_page_block_size(page) >= size);
|
mi_assert_internal(mi_page_block_size(page) >= size);
|
||||||
@ -1536,8 +1543,9 @@ mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, mi_segment
|
|||||||
page = mi_segments_page_alloc(heap,MI_PAGE_LARGE,block_size,block_size,tld, os_tld);
|
page = mi_segments_page_alloc(heap,MI_PAGE_LARGE,block_size,block_size,tld, os_tld);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
page = mi_segment_huge_page_alloc(block_size,tld,os_tld);
|
page = mi_segment_huge_page_alloc(block_size,heap->arena_id,tld,os_tld);
|
||||||
}
|
}
|
||||||
|
mi_assert_internal(page == NULL || _mi_heap_memid_is_suitable(heap, _mi_page_segment(page)->memid));
|
||||||
mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld));
|
mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld));
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
@ -19,6 +19,7 @@ static void test_reserved(void);
|
|||||||
static void negative_stat(void);
|
static void negative_stat(void);
|
||||||
static void alloc_huge(void);
|
static void alloc_huge(void);
|
||||||
static void test_heap_walk(void);
|
static void test_heap_walk(void);
|
||||||
|
static void test_heap_arena(void);
|
||||||
|
|
||||||
int main() {
|
int main() {
|
||||||
mi_version();
|
mi_version();
|
||||||
@ -33,7 +34,8 @@ int main() {
|
|||||||
// test_reserved();
|
// test_reserved();
|
||||||
// negative_stat();
|
// negative_stat();
|
||||||
// alloc_huge();
|
// alloc_huge();
|
||||||
test_heap_walk();
|
// test_heap_walk();
|
||||||
|
test_heap_arena();
|
||||||
|
|
||||||
void* p1 = malloc(78);
|
void* p1 = malloc(78);
|
||||||
void* p2 = malloc(24);
|
void* p2 = malloc(24);
|
||||||
@ -212,6 +214,20 @@ static void test_heap_walk(void) {
|
|||||||
mi_heap_visit_blocks(heap, true, &test_visit, NULL);
|
mi_heap_visit_blocks(heap, true, &test_visit, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void test_heap_arena(void) {
|
||||||
|
mi_arena_id_t arena_id;
|
||||||
|
int err = mi_reserve_os_memory_ex(100 * 1024 * 1024, false /* commit */, false /* allow large */, true /* exclusive */, &arena_id);
|
||||||
|
if (err) abort();
|
||||||
|
mi_heap_t* heap = mi_heap_new_in_arena(arena_id);
|
||||||
|
for (int i = 0; i < 500000; i++) {
|
||||||
|
void* p = mi_heap_malloc(heap, 1024);
|
||||||
|
if (p == NULL) {
|
||||||
|
printf("out of memory after %d kb (expecting about 100_000kb)\n", i);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ----------------------------
|
// ----------------------------
|
||||||
// bin size experiments
|
// bin size experiments
|
||||||
// ------------------------------
|
// ------------------------------
|
||||||
|
Loading…
x
Reference in New Issue
Block a user