mirror of
https://github.com/microsoft/mimalloc.git
synced 2024-12-27 13:33:18 +08:00
clean up arena function names
This commit is contained in:
parent
cb0369452d
commit
4c4f2f4084
42
src/arena.c
42
src/arena.c
@ -133,7 +133,9 @@ static size_t mi_arena_block_size(size_t bcount) {
|
||||
/* -----------------------------------------------------------
|
||||
Thread safe allocation in an arena
|
||||
----------------------------------------------------------- */
|
||||
static bool mi_arena_alloc(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx)
|
||||
|
||||
// claim the `blocks_inuse` bits
|
||||
static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx)
|
||||
{
|
||||
size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter
|
||||
if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx)) {
|
||||
@ -148,7 +150,7 @@ static bool mi_arena_alloc(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t*
|
||||
Arena Allocation
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static mi_decl_noinline void* mi_arena_alloc_from(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
|
||||
static mi_decl_noinline void* mi_arena_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
|
||||
bool* commit, bool* large, bool* is_pinned, bool* is_zero,
|
||||
mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld)
|
||||
{
|
||||
@ -157,7 +159,7 @@ static mi_decl_noinline void* mi_arena_alloc_from(mi_arena_t* arena, size_t aren
|
||||
if (!mi_arena_id_is_suitable(arena->id, arena->exclusive, req_arena_id)) return NULL;
|
||||
|
||||
mi_bitmap_index_t bitmap_index;
|
||||
if (!mi_arena_alloc(arena, needed_bcount, &bitmap_index)) return NULL;
|
||||
if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index)) return NULL;
|
||||
|
||||
// claimed it!
|
||||
void* p = arena->start + mi_arena_block_size(mi_bitmap_index_bit(bitmap_index));
|
||||
@ -199,9 +201,9 @@ static mi_decl_noinline void* mi_arena_alloc_from(mi_arena_t* arena, size_t aren
|
||||
}
|
||||
|
||||
// allocate in a speficic arena
|
||||
static void* mi_arena_alloc_in(mi_arena_id_t arena_id, int numa_node, size_t size, size_t alignment,
|
||||
bool* commit, bool* large, bool* is_pinned, bool* is_zero,
|
||||
mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld )
|
||||
static void* mi_arena_alloc_at_id(mi_arena_id_t arena_id, int numa_node, size_t size, size_t alignment,
|
||||
bool* commit, bool* large, bool* is_pinned, bool* is_zero,
|
||||
mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld )
|
||||
{
|
||||
MI_UNUSED_RELEASE(alignment);
|
||||
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
|
||||
@ -216,12 +218,12 @@ static void* mi_arena_alloc_in(mi_arena_id_t arena_id, int numa_node, size_t siz
|
||||
if (arena == NULL) return NULL;
|
||||
if (arena->numa_node >= 0 && arena->numa_node != numa_node) return NULL;
|
||||
if (!(*large) && arena->is_large) return NULL;
|
||||
return mi_arena_alloc_from(arena, arena_index, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||
return mi_arena_alloc_at(arena, arena_index, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||
}
|
||||
|
||||
|
||||
// allocate from an arena with fallback to the OS
|
||||
static mi_decl_noinline void* mi_arena_allocate(int numa_node, size_t size, size_t alignment, bool* commit, bool* large,
|
||||
static mi_decl_noinline void* mi_arenas_alloc(int numa_node, size_t size, size_t alignment, bool* commit, bool* large,
|
||||
bool* is_pinned, bool* is_zero,
|
||||
mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld )
|
||||
{
|
||||
@ -233,14 +235,14 @@ static mi_decl_noinline void* mi_arena_allocate(int numa_node, size_t size, size
|
||||
mi_assert_internal(size <= mi_arena_block_size(bcount));
|
||||
|
||||
size_t arena_index = mi_arena_id_index(req_arena_id);
|
||||
if (arena_index < MI_MAX_ARENAS) {
|
||||
if (arena_index < MI_MAX_ARENAS && arena_index < max_arena) {
|
||||
// try a specific arena if requested
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[arena_index]);
|
||||
if ((arena != NULL) &&
|
||||
(arena->numa_node < 0 || arena->numa_node == numa_node) && // numa local?
|
||||
(*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
|
||||
// (arena->numa_node < 0 || arena->numa_node == numa_node) && // numa local?
|
||||
(*large || !arena->is_large)) // large OS pages allowed, or the arena does not consist of large OS pages
|
||||
{
|
||||
void* p = mi_arena_alloc_from(arena, arena_index, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||
void* p = mi_arena_alloc_at(arena, arena_index, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||
mi_assert_internal((uintptr_t)p % alignment == 0);
|
||||
if (p != NULL) return p;
|
||||
}
|
||||
@ -251,9 +253,9 @@ static mi_decl_noinline void* mi_arena_allocate(int numa_node, size_t size, size
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
|
||||
if (arena == NULL) break; // end reached
|
||||
if ((arena->numa_node < 0 || arena->numa_node == numa_node) && // numa local?
|
||||
(*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
|
||||
(*large || !arena->is_large)) // large OS pages allowed, or the arena does not consist of large OS pages
|
||||
{
|
||||
void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||
void* p = mi_arena_alloc_at(arena, i, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||
mi_assert_internal((uintptr_t)p % alignment == 0);
|
||||
if (p != NULL) return p;
|
||||
}
|
||||
@ -264,9 +266,9 @@ static mi_decl_noinline void* mi_arena_allocate(int numa_node, size_t size, size
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
|
||||
if (arena == NULL) break; // end reached
|
||||
if ((arena->numa_node >= 0 && arena->numa_node != numa_node) && // not numa local!
|
||||
(*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
|
||||
(*large || !arena->is_large)) // large OS pages allowed, or the arena does not consist of large OS pages
|
||||
{
|
||||
void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||
void* p = mi_arena_alloc_at(arena, i, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||
mi_assert_internal((uintptr_t)p % alignment == 0);
|
||||
if (p != NULL) return p;
|
||||
}
|
||||
@ -276,7 +278,7 @@ static mi_decl_noinline void* mi_arena_allocate(int numa_node, size_t size, size
|
||||
}
|
||||
|
||||
// try to reserve a fresh arena
|
||||
static bool mi_arena_reserve(size_t size, bool allow_large, mi_arena_id_t req_arena_id, mi_arena_id_t *arena_id)
|
||||
static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t req_arena_id, mi_arena_id_t *arena_id)
|
||||
{
|
||||
if (_mi_preloading()) return false;
|
||||
if (req_arena_id != _mi_arena_id_none()) return false;
|
||||
@ -294,7 +296,7 @@ static bool mi_arena_reserve(size_t size, bool allow_large, mi_arena_id_t req_ar
|
||||
if (arena_count >= 8 && arena_count <= 128) {
|
||||
arena_reserve = (1<<(arena_count/8)) * arena_reserve; // scale up the arena sizes exponentially
|
||||
}
|
||||
if (arena_reserve < size) return false;
|
||||
if (arena_reserve < req_size) return false; // should be able to at least handle the current allocation size
|
||||
|
||||
// commit eagerly?
|
||||
bool arena_commit = false;
|
||||
@ -319,14 +321,14 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
|
||||
|
||||
// try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
|
||||
if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) {
|
||||
void* p = mi_arena_allocate(numa_node, size, alignment, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||
void* p = mi_arenas_alloc(numa_node, size, alignment, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||
if (p != NULL) return p;
|
||||
|
||||
// otherwise, try to first eagerly reserve a new arena
|
||||
mi_arena_id_t arena_id = 0;
|
||||
if (mi_arena_reserve(size,*large,req_arena_id,&arena_id)) {
|
||||
// and try allocate in there
|
||||
p = mi_arena_alloc_in(arena_id, numa_node, size, alignment, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||
p = mi_arena_alloc_at_id(arena_id, numa_node, size, alignment, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||
if (p != NULL) return p;
|
||||
}
|
||||
}
|
||||
|
@ -575,7 +575,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
|
||||
const bool eager_delayed = (page_kind <= MI_PAGE_MEDIUM && // don't delay for large objects
|
||||
// !_mi_os_has_overcommit() && // never delay on overcommit systems
|
||||
_mi_current_thread_count() > 1 && // do not delay for the first N threads
|
||||
tld->count < (size_t)mi_option_get(mi_option_eager_commit_delay));
|
||||
tld->peak_count < (size_t)mi_option_get(mi_option_eager_commit_delay));
|
||||
const bool eager = !eager_delayed && mi_option_is_enabled(mi_option_eager_commit);
|
||||
bool commit = eager; // || (page_kind >= MI_PAGE_LARGE);
|
||||
bool is_zero = false;
|
||||
|
Loading…
x
Reference in New Issue
Block a user