mirror of
https://github.com/microsoft/mimalloc.git
synced 2024-12-27 13:33:18 +08:00
initial work on heaps restricted to specific arena's
This commit is contained in:
parent
65eb5c65ec
commit
fe2d5aa8ff
@ -273,6 +273,16 @@ mi_decl_export int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size
|
||||
mi_decl_export int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept;
|
||||
mi_decl_export bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept;
|
||||
|
||||
// Experimental: heaps associated with specific memory arena's
|
||||
typedef int mi_arena_id_t;
|
||||
mi_decl_export int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
|
||||
mi_decl_export int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
|
||||
mi_decl_export bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
|
||||
|
||||
#if MI_MALLOC_VERSION >= 200
|
||||
mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id, bool exclusive);
|
||||
#endif
|
||||
|
||||
// deprecated
|
||||
mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept;
|
||||
|
||||
|
137
src/arena.c
137
src/arena.c
@ -1,5 +1,5 @@
|
||||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2019-2021, Microsoft Research, Daan Leijen
|
||||
Copyright (c) 2019-2022, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
@ -18,12 +18,7 @@ which is sometimes needed for embedded devices or shared memory for example.
|
||||
(We can also employ this with WASI or `sbrk` systems to reserve large arenas
|
||||
on demand and be able to reuse them efficiently).
|
||||
|
||||
The arena allocation needs to be thread safe and we use an atomic
|
||||
bitmap to allocate. The current implementation of the bitmap can
|
||||
only do this within a field (`size_t`) so we can allocate at most
|
||||
blocks of 2GiB (64*32MiB) and no object can cross the boundary. This
|
||||
can lead to fragmentation but fortunately most objects will be regions
|
||||
of 256MiB in practice.
|
||||
The arena allocation needs to be thread safe and we use an atomic bitmap to allocate.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#include "mimalloc.h"
|
||||
#include "mimalloc-internal.h"
|
||||
@ -53,10 +48,12 @@ bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
|
||||
#define MI_SEGMENT_ALIGN MI_SEGMENT_SIZE
|
||||
#define MI_ARENA_BLOCK_SIZE (4*MI_SEGMENT_ALIGN) // 32MiB
|
||||
#define MI_ARENA_MIN_OBJ_SIZE (MI_ARENA_BLOCK_SIZE/2) // 16MiB
|
||||
#define MI_MAX_ARENAS (64) // not more than 256 (since we use 8 bits in the memid)
|
||||
#define MI_MAX_ARENAS (64) // not more than 126 (since we use 8 bits in the memid and a signed 8-bit arena_id + 1)
|
||||
|
||||
// A memory arena descriptor
|
||||
typedef struct mi_arena_s {
|
||||
mi_arena_id_t id; // arena id; 0 for non-specific
|
||||
bool exclusive; // only allow allocations if specifically for this arena
|
||||
_Atomic(uint8_t*) start; // the start of the memory area
|
||||
size_t block_count; // size of the area in arena blocks (of `MI_ARENA_BLOCK_SIZE`)
|
||||
size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`)
|
||||
@ -76,24 +73,60 @@ static mi_decl_cache_align _Atomic(mi_arena_t*) mi_arenas[MI_MAX_ARENAS];
|
||||
static mi_decl_cache_align _Atomic(size_t) mi_arena_count; // = 0
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Arena id's
|
||||
0 is used for non-arena's (like OS memory)
|
||||
id = arena_index + 1
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static size_t mi_arena_id_index(mi_arena_id_t id) {
|
||||
return (size_t)(id < 0 ? MI_MAX_ARENAS : id - 1);
|
||||
}
|
||||
|
||||
static mi_arena_id_t mi_arena_id_create(size_t arena_index) {
|
||||
mi_assert_internal(arena_index >= 0 && arena_index < MI_MAX_ARENAS);
|
||||
mi_assert_internal(MI_MAX_ARENAS <= 126);
|
||||
int id = (int)arena_index + 1;
|
||||
mi_assert_internal(id >= 1 && id <= 127);
|
||||
return id;
|
||||
}
|
||||
|
||||
static mi_arena_id_t mi_arena_id_none(void) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool mi_arena_id_suitable(mi_arena_id_t arena_id, bool exclusive, mi_arena_id_t req_arena_id) {
|
||||
return (!exclusive || arena_id == req_arena_id);
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Arena allocations get a memory id where the lower 8 bits are
|
||||
the arena index +1, and the upper bits the block index.
|
||||
the arena id, and the upper bits the block index.
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// Use `0` as a special id for direct OS allocated memory.
|
||||
#define MI_MEMID_OS 0
|
||||
|
||||
static size_t mi_arena_id_create(size_t arena_index, mi_bitmap_index_t bitmap_index) {
|
||||
mi_assert_internal(arena_index < 0xFE);
|
||||
static size_t mi_arena_memid_create(mi_arena_id_t id, bool exclusive, mi_bitmap_index_t bitmap_index) {
|
||||
mi_assert_internal(((bitmap_index << 8) >> 8) == bitmap_index); // no overflow?
|
||||
return ((bitmap_index << 8) | ((arena_index+1) & 0xFF));
|
||||
mi_assert_internal(id >= 0 && id <= 0x7F);
|
||||
return ((bitmap_index << 8) | ((uint8_t)id & 0x7F) | (exclusive ? 0x80 : 0));
|
||||
}
|
||||
|
||||
static void mi_arena_id_indices(size_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) {
|
||||
mi_assert_internal(memid != MI_MEMID_OS);
|
||||
*arena_index = (memid & 0xFF) - 1;
|
||||
*bitmap_index = (memid >> 8);
|
||||
static bool mi_arena_memid_indices(size_t arena_memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) {
|
||||
mi_assert_internal(arena_memid != MI_MEMID_OS);
|
||||
*bitmap_index = (arena_memid >> 8);
|
||||
mi_arena_id_t id = (int)(arena_memid & 0x7F);
|
||||
*arena_index = mi_arena_id_index(id);
|
||||
return ((arena_memid & 0x80) != 0);
|
||||
}
|
||||
|
||||
bool _mi_arena_memid_suitable(size_t arena_memid, mi_arena_id_t request_arena_id) {
|
||||
mi_assert_internal(arena_memid != MI_MEMID_OS);
|
||||
mi_arena_id_t id = (int)(arena_memid & 0x7F);
|
||||
bool exclusive = ((arena_memid & 0x80) != 0);
|
||||
return mi_arena_id_suitable(id, exclusive, request_arena_id);
|
||||
}
|
||||
|
||||
static size_t mi_block_count_of_size(size_t size) {
|
||||
@ -119,14 +152,19 @@ static bool mi_arena_alloc(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t*
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static void* mi_arena_alloc_from(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
|
||||
bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
|
||||
bool* commit, bool* large, bool* is_pinned, bool* is_zero,
|
||||
mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld)
|
||||
{
|
||||
MI_UNUSED(arena_index);
|
||||
mi_assert_internal(mi_arena_id_index(arena->id) == arena_index);
|
||||
if (!mi_arena_id_suitable(arena->id, arena->exclusive, req_arena_id)) return NULL;
|
||||
|
||||
mi_bitmap_index_t bitmap_index;
|
||||
if (!mi_arena_alloc(arena, needed_bcount, &bitmap_index)) return NULL;
|
||||
|
||||
// claimed it! set the dirty bits (todo: no need for an atomic op here?)
|
||||
void* p = arena->start + (mi_bitmap_index_bit(bitmap_index)*MI_ARENA_BLOCK_SIZE);
|
||||
*memid = mi_arena_id_create(arena_index, bitmap_index);
|
||||
*memid = mi_arena_memid_create(arena->id, arena->exclusive, bitmap_index);
|
||||
*is_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
|
||||
*large = arena->is_large;
|
||||
*is_pinned = (arena->is_large || !arena->allow_decommit);
|
||||
@ -152,7 +190,7 @@ static void* mi_arena_alloc_from(mi_arena_t* arena, size_t arena_index, size_t n
|
||||
}
|
||||
|
||||
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero,
|
||||
size_t* memid, mi_os_tld_t* tld)
|
||||
mi_arena_id_t arena_id, size_t* memid, mi_os_tld_t* tld)
|
||||
{
|
||||
mi_assert_internal(commit != NULL && is_pinned != NULL && is_zero != NULL && memid != NULL && tld != NULL);
|
||||
mi_assert_internal(size > 0);
|
||||
@ -168,8 +206,22 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool*
|
||||
{
|
||||
const size_t bcount = mi_block_count_of_size(size);
|
||||
const int numa_node = _mi_os_numa_node(tld); // current numa node
|
||||
|
||||
mi_assert_internal(size <= bcount*MI_ARENA_BLOCK_SIZE);
|
||||
|
||||
// try specific arena if so requested
|
||||
size_t arena_index = mi_arena_id_index(arena_id);
|
||||
if (arena_index < MI_MAX_ARENAS) {
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[arena_index]);
|
||||
if ((arena != NULL) &&
|
||||
(arena->numa_node < 0 || arena->numa_node == numa_node) && // numa local?
|
||||
(*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
|
||||
{
|
||||
void* p = mi_arena_alloc_from(arena, arena_index, bcount, commit, large, is_pinned, is_zero, arena_id, memid, tld);
|
||||
mi_assert_internal((uintptr_t)p % alignment == 0);
|
||||
if (p != NULL) return p;
|
||||
}
|
||||
}
|
||||
|
||||
// try numa affine allocation
|
||||
for (size_t i = 0; i < MI_MAX_ARENAS; i++) {
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
|
||||
@ -177,7 +229,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool*
|
||||
if ((arena->numa_node<0 || arena->numa_node==numa_node) && // numa local?
|
||||
(*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
|
||||
{
|
||||
void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_pinned, is_zero, memid, tld);
|
||||
void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_pinned, is_zero, arena_id, memid, tld);
|
||||
mi_assert_internal((uintptr_t)p % alignment == 0);
|
||||
if (p != NULL) return p;
|
||||
}
|
||||
@ -189,7 +241,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool*
|
||||
if ((arena->numa_node>=0 && arena->numa_node!=numa_node) && // not numa local!
|
||||
(*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
|
||||
{
|
||||
void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_pinned, is_zero, memid, tld);
|
||||
void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_pinned, is_zero, arena_id, memid, tld);
|
||||
mi_assert_internal((uintptr_t)p % alignment == 0);
|
||||
if (p != NULL) return p;
|
||||
}
|
||||
@ -208,9 +260,9 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool*
|
||||
return p;
|
||||
}
|
||||
|
||||
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
|
||||
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t arena_id, size_t* memid, mi_os_tld_t* tld)
|
||||
{
|
||||
return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, commit, large, is_pinned, is_zero, memid, tld);
|
||||
return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, commit, large, is_pinned, is_zero, arena_id, memid, tld);
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
@ -229,7 +281,7 @@ void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_s
|
||||
// allocated in an arena
|
||||
size_t arena_idx;
|
||||
size_t bitmap_idx;
|
||||
mi_arena_id_indices(memid, &arena_idx, &bitmap_idx);
|
||||
mi_arena_memid_indices(memid, &arena_idx, &bitmap_idx);
|
||||
mi_assert_internal(arena_idx < MI_MAX_ARENAS);
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t,&mi_arenas[arena_idx]);
|
||||
mi_assert_internal(arena != NULL);
|
||||
@ -267,10 +319,11 @@ void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_s
|
||||
Add an arena.
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static bool mi_arena_add(mi_arena_t* arena) {
|
||||
static bool mi_arena_add(mi_arena_t* arena, mi_arena_id_t* arena_id) {
|
||||
mi_assert_internal(arena != NULL);
|
||||
mi_assert_internal((uintptr_t)mi_atomic_load_ptr_relaxed(uint8_t,&arena->start) % MI_SEGMENT_ALIGN == 0);
|
||||
mi_assert_internal(arena->block_count > 0);
|
||||
if (arena_id != NULL) *arena_id = -1;
|
||||
|
||||
size_t i = mi_atomic_increment_acq_rel(&mi_arena_count);
|
||||
if (i >= MI_MAX_ARENAS) {
|
||||
@ -278,11 +331,14 @@ static bool mi_arena_add(mi_arena_t* arena) {
|
||||
return false;
|
||||
}
|
||||
mi_atomic_store_ptr_release(mi_arena_t,&mi_arenas[i], arena);
|
||||
arena->id = mi_arena_id_create(i);
|
||||
if (arena_id != NULL) *arena_id = arena->id;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept
|
||||
bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept
|
||||
{
|
||||
if (arena_id != NULL) *arena_id = mi_arena_id_none();
|
||||
if (size < MI_ARENA_BLOCK_SIZE) return false;
|
||||
|
||||
if (is_large) {
|
||||
@ -297,6 +353,8 @@ bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_la
|
||||
mi_arena_t* arena = (mi_arena_t*)_mi_os_alloc(asize, &_mi_stats_main); // TODO: can we avoid allocating from the OS?
|
||||
if (arena == NULL) return false;
|
||||
|
||||
arena->id = mi_arena_id_none();
|
||||
arena->exclusive = exclusive;
|
||||
arena->block_count = bcount;
|
||||
arena->field_count = fields;
|
||||
arena->start = (uint8_t*)start;
|
||||
@ -321,18 +379,19 @@ bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_la
|
||||
_mi_bitmap_claim(arena->blocks_inuse, fields, post, postidx, NULL);
|
||||
}
|
||||
|
||||
mi_arena_add(arena);
|
||||
return true;
|
||||
return mi_arena_add(arena, arena_id);
|
||||
|
||||
}
|
||||
|
||||
// Reserve a range of regular OS memory
|
||||
int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept
|
||||
int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept
|
||||
{
|
||||
if (arena_id != NULL) *arena_id = mi_arena_id_none();
|
||||
size = _mi_align_up(size, MI_ARENA_BLOCK_SIZE); // at least one block
|
||||
bool large = allow_large;
|
||||
void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, &large, &_mi_stats_main);
|
||||
if (start==NULL) return ENOMEM;
|
||||
if (!mi_manage_os_memory(start, size, (large || commit), large, true, -1)) {
|
||||
if (!mi_manage_os_memory_ex(start, size, (large || commit), large, true, -1, exclusive, arena_id)) {
|
||||
_mi_os_free_ex(start, size, commit, &_mi_stats_main);
|
||||
_mi_verbose_message("failed to reserve %zu k memory\n", _mi_divide_up(size,1024));
|
||||
return ENOMEM;
|
||||
@ -341,12 +400,21 @@ int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noe
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept {
|
||||
return mi_manage_os_memory_ex(start, size, is_committed, is_large, is_zero, numa_node, false, NULL);
|
||||
}
|
||||
|
||||
int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept {
|
||||
return mi_reserve_os_memory_ex(size, commit, allow_large, false, NULL);
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Reserve a huge page arena.
|
||||
----------------------------------------------------------- */
|
||||
// reserve at a specific numa node
|
||||
int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept {
|
||||
int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
|
||||
if (arena_id != NULL) *arena_id = -1;
|
||||
if (pages==0) return 0;
|
||||
if (numa_node < -1) numa_node = -1;
|
||||
if (numa_node >= 0) numa_node = numa_node % _mi_os_numa_node_count();
|
||||
@ -359,13 +427,16 @@ int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msec
|
||||
}
|
||||
_mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages);
|
||||
|
||||
if (!mi_manage_os_memory(p, hsize, true, true, true, numa_node)) {
|
||||
if (!mi_manage_os_memory_ex(p, hsize, true, true, true, numa_node, exclusive, arena_id)) {
|
||||
_mi_os_free_huge_pages(p, hsize, &_mi_stats_main);
|
||||
return ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept {
|
||||
return mi_reserve_huge_os_pages_at_ex(pages, numa_node, timeout_msecs, false, NULL);
|
||||
}
|
||||
|
||||
// reserve huge pages evenly among the given number of numa nodes (or use the available ones as detected)
|
||||
int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept {
|
||||
|
Loading…
x
Reference in New Issue
Block a user