make memid's abstract for safety

This commit is contained in:
daanx 2023-04-13 12:17:52 -07:00
parent 290443aee6
commit c90a2bbd0a
5 changed files with 90 additions and 44 deletions

View File

@ -115,11 +115,12 @@ void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats);
// arena.c // arena.c
mi_arena_id_t _mi_arena_id_none(void); mi_arena_id_t _mi_arena_id_none(void);
void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset, size_t memid, size_t committed, mi_stats_t* stats); void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset, mi_memid_t memid, size_t committed, mi_stats_t* stats);
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld); void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld); void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
bool _mi_arena_memid_is_suitable(size_t arena_memid, mi_arena_id_t request_arena_id); bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id);
bool _mi_arena_is_os_allocated(size_t arena_memid); bool _mi_arena_memid_is_os_allocated(mi_memid_t memid);
bool _mi_arena_memid_is_static_allocated(mi_memid_t memid);
void _mi_arena_collect(bool free_arenas, bool force_decommit, mi_stats_t* stats); void _mi_arena_collect(bool free_arenas, bool force_decommit, mi_stats_t* stats);
bool _mi_arena_contains(const void* p); bool _mi_arena_contains(const void* p);
@ -170,7 +171,7 @@ void _mi_heap_destroy_pages(mi_heap_t* heap);
void _mi_heap_collect_abandon(mi_heap_t* heap); void _mi_heap_collect_abandon(mi_heap_t* heap);
void _mi_heap_set_default_direct(mi_heap_t* heap); void _mi_heap_set_default_direct(mi_heap_t* heap);
void _mi_heap_destroy_all(void); void _mi_heap_destroy_all(void);
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, size_t memid); bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid);
// "stats.c" // "stats.c"
void _mi_stats_done(mi_stats_t* stats); void _mi_stats_done(mi_stats_t* stats);

View File

@ -319,12 +319,29 @@ typedef enum mi_page_kind_e {
MI_PAGE_HUGE // huge blocks (>512KiB) are put into a single page in a segment of the exact size (but still 2MiB aligned) MI_PAGE_HUGE // huge blocks (>512KiB) are put into a single page in a segment of the exact size (but still 2MiB aligned)
} mi_page_kind_t; } mi_page_kind_t;
// Memory can reside in arena's, direct OS allocated, or statically allocated. The memid keeps track of this.
typedef enum mi_memkind_e {
MI_MEM_NONE,
MI_MEM_OS,
MI_MEM_STATIC,
MI_MEM_ARENA
} mi_memkind_t;
typedef struct mi_memid_s {
size_t arena_idx;
mi_arena_id_t arena_id;
bool arena_is_exclusive;
mi_memkind_t memkind;
} mi_memid_t;
// Segments are large allocated memory blocks (2MiB on 64 bit) from // Segments are large allocated memory blocks (2MiB on 64 bit) from
// the OS. Inside segments we allocated fixed size _pages_ that // the OS. Inside segments we allocated fixed size _pages_ that
// contain blocks. // contain blocks.
typedef struct mi_segment_s { typedef struct mi_segment_s {
// memory fields // memory fields
size_t memid; // id for the os-level memory manager mi_memid_t memid; // id for the os-level memory manager
bool mem_is_pinned; // `true` if we cannot decommit/reset/protect in this memory (i.e. when allocated using large OS pages) bool mem_is_pinned; // `true` if we cannot decommit/reset/protect in this memory (i.e. when allocated using large OS pages)
bool mem_is_large; // `true` if the memory is in OS large or huge pages. (`is_pinned` will be true) bool mem_is_large; // `true` if the memory is in OS large or huge pages. (`is_pinned` will be true)
bool mem_is_committed; // `true` if the whole segment is eagerly committed bool mem_is_committed; // `true` if the whole segment is eagerly committed

View File

@ -70,7 +70,6 @@ static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_committed,
/* ----------------------------------------------------------- /* -----------------------------------------------------------
Arena id's Arena id's
0 is used for non-arena's (like OS memory)
id = arena_index + 1 id = arena_index + 1
----------------------------------------------------------- */ ----------------------------------------------------------- */
@ -80,10 +79,7 @@ static size_t mi_arena_id_index(mi_arena_id_t id) {
static mi_arena_id_t mi_arena_id_create(size_t arena_index) { static mi_arena_id_t mi_arena_id_create(size_t arena_index) {
mi_assert_internal(arena_index < MI_MAX_ARENAS); mi_assert_internal(arena_index < MI_MAX_ARENAS);
mi_assert_internal(MI_MAX_ARENAS <= 126); return (int)arena_index + 1;
int id = (int)arena_index + 1;
mi_assert_internal(id >= 1 && id <= 127);
return id;
} }
mi_arena_id_t _mi_arena_id_none(void) { mi_arena_id_t _mi_arena_id_none(void) {
@ -95,36 +91,67 @@ static bool mi_arena_id_is_suitable(mi_arena_id_t arena_id, bool arena_is_exclus
(arena_id == req_arena_id)); (arena_id == req_arena_id));
} }
/* ----------------------------------------------------------- /* -----------------------------------------------------------
Arena allocations get a memory id where the lower 8 bits are memory id's
the arena id, and the upper bits the block index.
----------------------------------------------------------- */ ----------------------------------------------------------- */
// Use `0` as a special id for direct OS allocated memory. static mi_memid_t mi_arena_memid_none(void) {
#define MI_MEMID_OS 0 mi_memid_t memid;
memid.memkind = MI_MEM_NONE;
static size_t mi_arena_memid_create(mi_arena_id_t id, bool exclusive, mi_bitmap_index_t bitmap_index) { memid.arena_id = 0;
mi_assert_internal(((bitmap_index << 8) >> 8) == bitmap_index); // no overflow? memid.arena_idx = 0;
mi_assert_internal(id >= 0 && id <= 0x7F); memid.arena_is_exclusive = false;
return ((bitmap_index << 8) | ((uint8_t)id & 0x7F) | (exclusive ? 0x80 : 0)); return memid;
} }
static bool mi_arena_memid_indices(size_t arena_memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) { static mi_memid_t mi_arena_memid_os(void) {
*bitmap_index = (arena_memid >> 8); mi_memid_t memid = mi_arena_memid_none();
mi_arena_id_t id = (int)(arena_memid & 0x7F); memid.memkind = MI_MEM_OS;
*arena_index = mi_arena_id_index(id); return memid;
return ((arena_memid & 0x80) != 0);
} }
bool _mi_arena_memid_is_suitable(size_t arena_memid, mi_arena_id_t request_arena_id) { /*
mi_arena_id_t id = (int)(arena_memid & 0x7F); static mi_memid_t mi_arena_memid_static(void) {
bool exclusive = ((arena_memid & 0x80) != 0); mi_memid_t memid = mi_arena_memid_none();
return mi_arena_id_is_suitable(id, exclusive, request_arena_id); memid.memkind = MI_MEM_STATIC;
return memid;
}
*/
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id) {
// note: works also for OS and STATIC memory with a zero arena_id.
return mi_arena_id_is_suitable(memid.arena_id, memid.arena_is_exclusive, request_arena_id);
} }
bool _mi_arena_is_os_allocated(size_t arena_memid) { bool _mi_arena_memid_is_os_allocated(mi_memid_t memid) {
return (arena_memid == MI_MEMID_OS); return (memid.memkind == MI_MEM_OS);
}
bool _mi_arena_is_static_allocated(mi_memid_t memid) {
return (memid.memkind == MI_MEM_STATIC);
}
/* -----------------------------------------------------------
Arena allocations get a (currently) 16-bit memory id where the
lower 8 bits are the arena id, and the upper bits the block index.
----------------------------------------------------------- */
static mi_memid_t mi_arena_memid_create(mi_arena_id_t id, bool is_exclusive, mi_bitmap_index_t bitmap_index) {
mi_memid_t memid;
memid.memkind = MI_MEM_ARENA;
memid.arena_id = id;
memid.arena_idx = bitmap_index;
memid.arena_is_exclusive = is_exclusive;
return memid;
}
static bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) {
mi_assert_internal(memid.memkind == MI_MEM_ARENA);
*arena_index = mi_arena_id_index(memid.arena_id);
*bitmap_index = memid.arena_idx;
return memid.arena_is_exclusive;
} }
static size_t mi_block_count_of_size(size_t size) { static size_t mi_block_count_of_size(size_t size) {
@ -163,7 +190,7 @@ static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index
static mi_decl_noinline void* mi_arena_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount, static mi_decl_noinline void* mi_arena_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
bool* commit, bool* large, bool* is_pinned, bool* is_zero, bool* commit, bool* large, bool* is_pinned, bool* is_zero,
mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld) mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
{ {
MI_UNUSED(arena_index); MI_UNUSED(arena_index);
mi_assert_internal(mi_arena_id_index(arena->id) == arena_index); mi_assert_internal(mi_arena_id_index(arena->id) == arena_index);
@ -214,7 +241,7 @@ static mi_decl_noinline void* mi_arena_alloc_at(mi_arena_t* arena, size_t arena_
// allocate in a speficic arena // allocate in a speficic arena
static void* mi_arena_alloc_at_id(mi_arena_id_t arena_id, int numa_node, size_t size, size_t alignment, static void* mi_arena_alloc_at_id(mi_arena_id_t arena_id, int numa_node, size_t size, size_t alignment,
bool* commit, bool* large, bool* is_pinned, bool* is_zero, bool* commit, bool* large, bool* is_pinned, bool* is_zero,
mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld ) mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
{ {
MI_UNUSED_RELEASE(alignment); MI_UNUSED_RELEASE(alignment);
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN); mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
@ -236,7 +263,7 @@ static void* mi_arena_alloc_at_id(mi_arena_id_t arena_id, int numa_node, size_t
// allocate from an arena with fallback to the OS // allocate from an arena with fallback to the OS
static mi_decl_noinline void* mi_arenas_alloc(int numa_node, size_t size, size_t alignment, bool* commit, bool* large, static mi_decl_noinline void* mi_arenas_alloc(int numa_node, size_t size, size_t alignment, bool* commit, bool* large,
bool* is_pinned, bool* is_zero, bool* is_pinned, bool* is_zero,
mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld ) mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
{ {
MI_UNUSED(alignment); MI_UNUSED(alignment);
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN); mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
@ -317,12 +344,13 @@ static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t re
return (mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive */, arena_id) == 0); return (mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive */, arena_id) == 0);
} }
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero, void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero,
mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld) mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
{ {
mi_assert_internal(commit != NULL && is_pinned != NULL && is_zero != NULL && memid != NULL && tld != NULL); mi_assert_internal(commit != NULL && is_pinned != NULL && is_zero != NULL && memid != NULL && tld != NULL);
mi_assert_internal(size > 0); mi_assert_internal(size > 0);
*memid = MI_MEMID_OS; *memid = mi_arena_memid_none();
*is_zero = false; *is_zero = false;
*is_pinned = false; *is_pinned = false;
@ -350,13 +378,13 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
return NULL; return NULL;
} }
*memid = MI_MEMID_OS; *memid = mi_arena_memid_os();
void* p = _mi_os_alloc_aligned_offset(size, alignment, align_offset, *commit, large, is_zero, tld->stats); void* p = _mi_os_alloc_aligned_offset(size, alignment, align_offset, *commit, large, is_zero, tld->stats);
if (p != NULL) { *is_pinned = *large; } if (p != NULL) { *is_pinned = *large; }
return p; return p;
} }
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld) void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
{ {
return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, large, is_pinned, is_zero, req_arena_id, memid, tld); return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
} }
@ -538,7 +566,7 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats )
Arena free Arena free
----------------------------------------------------------- */ ----------------------------------------------------------- */
void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset, size_t memid, size_t committed_size, mi_stats_t* stats) { void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset, mi_memid_t memid, size_t committed_size, mi_stats_t* stats) {
mi_assert_internal(size > 0 && stats != NULL); mi_assert_internal(size > 0 && stats != NULL);
mi_assert_internal(committed_size <= size); mi_assert_internal(committed_size <= size);
if (p==NULL) return; if (p==NULL) return;
@ -546,7 +574,7 @@ void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset,
const bool all_committed = (committed_size == size); const bool all_committed = (committed_size == size);
if (memid == MI_MEMID_OS) { if (_mi_arena_memid_is_os_allocated(memid)) {
// was a direct OS allocation, pass through // was a direct OS allocation, pass through
if (!all_committed && committed_size > 0) { if (!all_committed && committed_size > 0) {
// if partially committed, adjust the committed stats // if partially committed, adjust the committed stats

View File

@ -221,7 +221,7 @@ mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {
return mi_heap_new_in_arena(_mi_arena_id_none()); return mi_heap_new_in_arena(_mi_arena_id_none());
} }
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, size_t memid) { bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid) {
return _mi_arena_memid_is_suitable(memid, heap->arena_id); return _mi_arena_memid_is_suitable(memid, heap->arena_id);
} }

View File

@ -513,7 +513,7 @@ static mi_segment_t* mi_segment_os_alloc(bool eager_delayed, size_t page_alignme
size_t pre_size, size_t info_size, size_t pre_size, size_t info_size,
size_t* segment_size, bool* is_zero, bool* commit, mi_segments_tld_t* tld, mi_os_tld_t* tld_os) size_t* segment_size, bool* is_zero, bool* commit, mi_segments_tld_t* tld, mi_os_tld_t* tld_os)
{ {
size_t memid; mi_memid_t memid;
bool mem_large = (!eager_delayed && (MI_SECURE == 0)); // only allow large OS pages once we are no longer lazy bool mem_large = (!eager_delayed && (MI_SECURE == 0)); // only allow large OS pages once we are no longer lazy
bool is_pinned = false; bool is_pinned = false;
size_t align_offset = 0; size_t align_offset = 0;