merge from dev-reset

This commit is contained in:
daanx 2023-04-13 13:37:56 -07:00
commit 991d04b2b1
8 changed files with 180 additions and 71 deletions

View File

@ -116,17 +116,19 @@ void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats);
// arena.c
mi_arena_id_t _mi_arena_id_none(void);
void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset, size_t memid, size_t committed, mi_stats_t* stats);
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
bool _mi_arena_memid_is_suitable(size_t arena_memid, mi_arena_id_t request_arena_id);
bool _mi_arena_is_os_allocated(size_t arena_memid);
void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset, mi_memid_t memid, size_t committed, mi_stats_t* stats);
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id);
bool _mi_arena_memid_is_os_allocated(mi_memid_t memid);
void _mi_arena_collect(bool free_arenas, bool force_decommit, mi_stats_t* stats);
bool _mi_arena_contains(const void* p);
void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* stats);
void _mi_arena_meta_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats);
// "segment-cache.c"
void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* purge_mask, bool large_allowed, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
bool _mi_segment_cache_push(void* start, size_t size, size_t memid, const mi_commit_mask_t* commit_mask, const mi_commit_mask_t* purge_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld);
void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* purge_mask, bool large_allowed, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
bool _mi_segment_cache_push(void* start, size_t size, mi_memid_t memid, const mi_commit_mask_t* commit_mask, const mi_commit_mask_t* purge_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld);
void _mi_segment_cache_collect(bool force, mi_os_tld_t* tld);
void _mi_segment_cache_free_all(mi_os_tld_t* tld);
@ -178,9 +180,8 @@ uint8_t _mi_bin(size_t size); // for stats
void _mi_heap_destroy_pages(mi_heap_t* heap);
void _mi_heap_collect_abandon(mi_heap_t* heap);
void _mi_heap_set_default_direct(mi_heap_t* heap);
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, size_t memid);
void _mi_heap_destroy_all(void);
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, size_t memid);
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid);
// "stats.c"
void _mi_stats_done(mi_stats_t* stats);

View File

@ -367,11 +367,27 @@ typedef mi_page_t mi_slice_t;
typedef int64_t mi_msecs_t;
// Memory can reside in arena's, direct OS allocated, or statically allocated. The memid keeps track of this.
typedef enum mi_memkind_e {
MI_MEM_NONE,
MI_MEM_OS,
MI_MEM_STATIC,
MI_MEM_ARENA
} mi_memkind_t;
typedef struct mi_memid_s {
size_t arena_idx;
mi_arena_id_t arena_id;
bool arena_is_exclusive;
mi_memkind_t memkind;
} mi_memid_t;
// Segments are large allocated memory blocks (8mb on 64 bit) from
// the OS. Inside segments we allocated fixed size _pages_ that
// contain blocks.
typedef struct mi_segment_s {
size_t memid; // memory id for arena allocation
mi_memid_t memid; // memory id for arena allocation
bool mem_is_pinned; // `true` if we cannot decommit/reset/protect in this memory (i.e. when allocated using large OS pages)
bool mem_is_large; // in large/huge os pages?
bool mem_is_committed; // `true` if the whole segment is eagerly committed

View File

@ -47,6 +47,7 @@ typedef struct mi_arena_s {
size_t block_count; // size of the area in arena blocks (of `MI_ARENA_BLOCK_SIZE`)
size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`)
size_t meta_size; // size of the arena structure itself including the bitmaps
mi_memid_t meta_memid; // memid of the arena structure itself (OS or static allocation)
int numa_node; // associated NUMA node
bool is_zero_init; // is the arena zero initialized?
bool allow_decommit; // is decommit allowed? if true, is_large should be false and blocks_committed != NULL
@ -70,7 +71,6 @@ static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_committed,
/* -----------------------------------------------------------
Arena id's
0 is used for non-arena's (like OS memory)
id = arena_index + 1
----------------------------------------------------------- */
@ -80,10 +80,7 @@ static size_t mi_arena_id_index(mi_arena_id_t id) {
static mi_arena_id_t mi_arena_id_create(size_t arena_index) {
mi_assert_internal(arena_index < MI_MAX_ARENAS);
mi_assert_internal(MI_MAX_ARENAS <= 126);
int id = (int)arena_index + 1;
mi_assert_internal(id >= 1 && id <= 127);
return id;
return (int)arena_index + 1;
}
mi_arena_id_t _mi_arena_id_none(void) {
@ -95,36 +92,59 @@ static bool mi_arena_id_is_suitable(mi_arena_id_t arena_id, bool arena_is_exclus
(arena_id == req_arena_id));
}
/* -----------------------------------------------------------
Arena allocations get a memory id where the lower 8 bits are
the arena id, and the upper bits the block index.
memory id's
----------------------------------------------------------- */
// Use `0` as a special id for direct OS allocated memory.
#define MI_MEMID_OS 0
static size_t mi_arena_memid_create(mi_arena_id_t id, bool exclusive, mi_bitmap_index_t bitmap_index) {
mi_assert_internal(((bitmap_index << 8) >> 8) == bitmap_index); // no overflow?
mi_assert_internal(id >= 0 && id <= 0x7F);
return ((bitmap_index << 8) | ((uint8_t)id & 0x7F) | (exclusive ? 0x80 : 0));
static mi_memid_t mi_arena_memid_none(void) {
mi_memid_t memid;
memid.memkind = MI_MEM_NONE;
memid.arena_id = 0;
memid.arena_idx = 0;
memid.arena_is_exclusive = false;
return memid;
}
static bool mi_arena_memid_indices(size_t arena_memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) {
*bitmap_index = (arena_memid >> 8);
mi_arena_id_t id = (int)(arena_memid & 0x7F);
*arena_index = mi_arena_id_index(id);
return ((arena_memid & 0x80) != 0);
static mi_memid_t mi_arena_memid_os(void) {
mi_memid_t memid = mi_arena_memid_none();
memid.memkind = MI_MEM_OS;
return memid;
}
bool _mi_arena_memid_is_suitable(size_t arena_memid, mi_arena_id_t request_arena_id) {
mi_arena_id_t id = (int)(arena_memid & 0x7F);
bool exclusive = ((arena_memid & 0x80) != 0);
return mi_arena_id_is_suitable(id, exclusive, request_arena_id);
static mi_memid_t mi_arena_memid_static(void) {
mi_memid_t memid = mi_arena_memid_none();
memid.memkind = MI_MEM_STATIC;
return memid;
}
bool _mi_arena_is_os_allocated(size_t arena_memid) {
return (arena_memid == MI_MEMID_OS);
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id) {
// note: works also for OS and STATIC memory with a zero arena_id.
return mi_arena_id_is_suitable(memid.arena_id, memid.arena_is_exclusive, request_arena_id);
}
bool _mi_arena_memid_is_os_allocated(mi_memid_t memid) {
return (memid.memkind == MI_MEM_OS);
}
/* -----------------------------------------------------------
Arena allocations get a (currently) 16-bit memory id where the
lower 8 bits are the arena id, and the upper bits the block index.
----------------------------------------------------------- */
static mi_memid_t mi_arena_memid_create(mi_arena_id_t id, bool is_exclusive, mi_bitmap_index_t bitmap_index) {
mi_memid_t memid;
memid.memkind = MI_MEM_ARENA;
memid.arena_id = id;
memid.arena_idx = bitmap_index;
memid.arena_is_exclusive = is_exclusive;
return memid;
}
static bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) {
mi_assert_internal(memid.memkind == MI_MEM_ARENA);
*arena_index = mi_arena_id_index(memid.arena_id);
*bitmap_index = memid.arena_idx;
return memid.arena_is_exclusive;
}
static size_t mi_block_count_of_size(size_t size) {
@ -140,6 +160,70 @@ static size_t mi_arena_size(mi_arena_t* arena) {
}
/* -----------------------------------------------------------
Special static area for mimalloc internal structures
to avoid OS calls (for example, for the arena and thread
metadata)
----------------------------------------------------------- */
#define MI_ARENA_STATIC_MAX (MI_INTPTR_SIZE*8*MI_KiB) // 64 KiB on 64-bit
static uint8_t mi_arena_static[MI_ARENA_STATIC_MAX];
static _Atomic(size_t) mi_arena_static_top;
static void* mi_arena_static_zalloc(size_t size, size_t alignment, mi_memid_t* memid) {
*memid = mi_arena_memid_static();
if (size == 0 || size > MI_ARENA_STATIC_MAX) return NULL;
if (mi_atomic_load_relaxed(&mi_arena_static_top) >= MI_ARENA_STATIC_MAX) return NULL;
// try to claim space
if (alignment == 0) { alignment = 1; }
const size_t oversize = size + alignment - 1;
if (oversize > MI_ARENA_STATIC_MAX) return NULL;
const size_t oldtop = mi_atomic_add_acq_rel(&mi_arena_static_top, oversize);
size_t top = oldtop + oversize;
if (top > MI_ARENA_STATIC_MAX) {
// try to roll back, ok if this fails
mi_atomic_cas_strong_acq_rel(&mi_arena_static_top, &top, oldtop);
return NULL;
}
// success
*memid = mi_arena_memid_static();
const size_t start = _mi_align_up(oldtop, alignment);
uint8_t* const p = &mi_arena_static[start];
_mi_memzero(p, size);
return p;
}
void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) {
*memid = mi_arena_memid_none();
// try static
void* p = mi_arena_static_zalloc(size, MI_ALIGNMENT_MAX, memid);
if (p != NULL) {
*memid = mi_arena_memid_static();
return p;
}
// or fall back to the OS
bool is_zero = false;
p = _mi_os_alloc(size, &is_zero, stats);
if (p != NULL) {
*memid = mi_arena_memid_os();
if (!is_zero) { _mi_memzero(p, size); }
return p;
}
return NULL;
}
void _mi_arena_meta_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats) {
if (memid.memkind == MI_MEM_OS) {
_mi_os_free(p, size, stats);
}
}
/* -----------------------------------------------------------
Thread safe allocation in an arena
@ -163,7 +247,7 @@ static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index
static mi_decl_noinline void* mi_arena_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
bool* commit, bool* large, bool* is_pinned, bool* is_zero,
mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld)
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
{
MI_UNUSED(arena_index);
mi_assert_internal(mi_arena_id_index(arena->id) == arena_index);
@ -214,7 +298,7 @@ static mi_decl_noinline void* mi_arena_alloc_at(mi_arena_t* arena, size_t arena_
// allocate in a speficic arena
static void* mi_arena_alloc_at_id(mi_arena_id_t arena_id, int numa_node, size_t size, size_t alignment,
bool* commit, bool* large, bool* is_pinned, bool* is_zero,
mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld )
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
{
MI_UNUSED_RELEASE(alignment);
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
@ -236,7 +320,7 @@ static void* mi_arena_alloc_at_id(mi_arena_id_t arena_id, int numa_node, size_t
// allocate from an arena with fallback to the OS
static mi_decl_noinline void* mi_arenas_alloc(int numa_node, size_t size, size_t alignment, bool* commit, bool* large,
bool* is_pinned, bool* is_zero,
mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld )
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
{
MI_UNUSED(alignment);
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
@ -317,12 +401,13 @@ static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t re
return (mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive */, arena_id) == 0);
}
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero,
mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld)
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
{
mi_assert_internal(commit != NULL && is_pinned != NULL && is_zero != NULL && memid != NULL && tld != NULL);
mi_assert_internal(size > 0);
*memid = MI_MEMID_OS;
*memid = mi_arena_memid_none();
*is_zero = false;
*is_pinned = false;
@ -350,13 +435,13 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
return NULL;
}
*memid = MI_MEMID_OS;
*memid = mi_arena_memid_os();
void* p = _mi_os_alloc_aligned_offset(size, alignment, align_offset, *commit, large, is_zero, tld->stats);
if (p != NULL) { *is_pinned = *large; }
return p;
}
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld)
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
{
return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
}
@ -538,15 +623,17 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats )
Arena free
----------------------------------------------------------- */
void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset, size_t memid, size_t committed_size, mi_stats_t* stats) {
void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset, mi_memid_t memid, size_t committed_size, mi_stats_t* stats) {
mi_assert_internal(size > 0 && stats != NULL);
mi_assert_internal(committed_size <= size);
if (p==NULL) return;
if (size==0) return;
const bool all_committed = (committed_size == size);
if (memid == MI_MEMID_OS) {
if (memid.memkind == MI_MEM_STATIC) {
// nothing to do
}
else if (memid.memkind == MI_MEM_OS) {
// was a direct OS allocation, pass through
if (!all_committed && committed_size > 0) {
// if partially committed, adjust the committed stats
@ -632,7 +719,7 @@ static void mi_arenas_destroy(void) {
else {
_mi_os_free(arena->start, mi_arena_size(arena), &_mi_stats_main);
}
_mi_os_free(arena, arena->meta_size, &_mi_stats_main);
_mi_arena_meta_free(arena, arena->meta_size, arena->meta_memid, &_mi_stats_main);
}
else {
new_max_arena = i;
@ -703,16 +790,17 @@ static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_committed,
const size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS);
const size_t bitmaps = (allow_decommit ? 4 : 2);
const size_t asize = sizeof(mi_arena_t) + (bitmaps*fields*sizeof(mi_bitmap_field_t));
mi_arena_t* arena = (mi_arena_t*)_mi_os_alloc(asize, NULL, &_mi_stats_main); // TODO: can we avoid allocating from the OS?
mi_memid_t meta_memid;
mi_arena_t* arena = (mi_arena_t*)_mi_arena_meta_zalloc(asize, &meta_memid, &_mi_stats_main); // TODO: can we avoid allocating from the OS?
if (arena == NULL) return false;
_mi_memzero(arena, asize);
// already zero'd due to os_alloc
// _mi_memzero(arena, asize);
arena->id = _mi_arena_id_none();
arena->exclusive = exclusive;
arena->owned = owned;
arena->meta_size = asize;
arena->meta_memid = meta_memid;
arena->block_count = bcount;
arena->field_count = fields;
arena->start = (uint8_t*)start;

View File

@ -232,7 +232,7 @@ mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {
return mi_heap_new_in_arena(_mi_arena_id_none());
}
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, size_t memid) {
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid) {
return _mi_arena_memid_is_suitable(memid, heap->arena_id);
}

View File

@ -203,6 +203,7 @@ mi_heap_t* _mi_heap_main_get(void) {
typedef struct mi_thread_data_s {
mi_heap_t heap; // must come first due to cast in `_mi_heap_done`
mi_tld_t tld;
mi_memid_t memid;
} mi_thread_data_t;
@ -214,28 +215,35 @@ typedef struct mi_thread_data_s {
#define TD_CACHE_SIZE (8)
static _Atomic(mi_thread_data_t*) td_cache[TD_CACHE_SIZE];
static mi_thread_data_t* mi_thread_data_alloc(void) {
static mi_thread_data_t* mi_thread_data_zalloc(void) {
// try to find thread metadata in the cache
mi_thread_data_t* td;
for (int i = 0; i < TD_CACHE_SIZE; i++) {
td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]);
if (td != NULL) {
// found cached allocation, try use it
td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL);
if (td != NULL) {
_mi_memzero(td, sizeof(*td));
return td;
}
}
}
// if that fails, allocate directly from the OS
td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), NULL, &_mi_stats_main);
// if that fails, allocate as meta data
mi_memid_t memid;
td = (mi_thread_data_t*)_mi_arena_meta_zalloc(sizeof(mi_thread_data_t), &memid, &_mi_stats_main);
if (td == NULL) {
// if this fails, try once more. (issue #257)
td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), NULL, &_mi_stats_main);
td = (mi_thread_data_t*)_mi_arena_meta_zalloc(sizeof(mi_thread_data_t), &memid, &_mi_stats_main);
if (td == NULL) {
// really out of memory
_mi_error_message(ENOMEM, "unable to allocate thread local heap metadata (%zu bytes)\n", sizeof(mi_thread_data_t));
}
}
if (td != NULL) {
td->memid = memid;
}
return td;
}
@ -251,7 +259,7 @@ static void mi_thread_data_free( mi_thread_data_t* tdfree ) {
}
}
// if that fails, just free it directly
_mi_os_free(tdfree, sizeof(mi_thread_data_t), &_mi_stats_main);
_mi_arena_meta_free(tdfree, sizeof(mi_thread_data_t), tdfree->memid, &_mi_stats_main);
}
static void mi_thread_data_collect(void) {
@ -279,10 +287,9 @@ static bool _mi_heap_init(void) {
}
else {
// use `_mi_os_alloc` to allocate directly from the OS
mi_thread_data_t* td = mi_thread_data_alloc();
mi_thread_data_t* td = mi_thread_data_zalloc();
if (td == NULL) return false;
// OS allocated so already zero initialized
mi_tld_t* tld = &td->tld;
mi_heap_t* heap = &td->heap;
_mi_memcpy_aligned(tld, &tld_empty, sizeof(*tld));

View File

@ -678,7 +678,6 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
#endif
mi_assert_internal(page->is_committed);
mi_assert_internal(!page->is_reset);
mi_assert_internal(page->capacity == 0);
mi_assert_internal(page->free == NULL);
mi_assert_internal(page->used == 0);

View File

@ -26,7 +26,7 @@ terms of the MIT license. A copy of the license can be found in the file
typedef struct mi_cache_slot_s {
void* p;
size_t memid;
mi_memid_t memid;
bool is_pinned;
mi_commit_mask_t commit_mask;
mi_commit_mask_t purge_mask;
@ -50,7 +50,7 @@ mi_decl_noinline static void* mi_segment_cache_pop_ex(
size_t size, mi_commit_mask_t* commit_mask,
mi_commit_mask_t* purge_mask, bool large_allowed,
bool* large, bool* is_pinned, bool* is_zero,
mi_arena_id_t _req_arena_id, size_t* memid, mi_os_tld_t* tld)
mi_arena_id_t _req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
{
#ifdef MI_CACHE_DISABLE
return NULL;
@ -107,7 +107,7 @@ mi_decl_noinline static void* mi_segment_cache_pop_ex(
}
mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* purge_mask, bool large_allowed, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t _req_arena_id, size_t* memid, mi_os_tld_t* tld)
mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* purge_mask, bool large_allowed, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t _req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
{
return mi_segment_cache_pop_ex(false, size, commit_mask, purge_mask, large_allowed, large, is_pinned, is_zero, _req_arena_id, memid, tld);
}
@ -195,7 +195,7 @@ void _mi_segment_cache_free_all(mi_os_tld_t* tld) {
bool is_pinned;
bool is_zero;
bool is_large;
size_t memid;
mi_memid_t memid;
const size_t size = MI_SEGMENT_SIZE;
void* p;
do {
@ -210,7 +210,7 @@ void _mi_segment_cache_free_all(mi_os_tld_t* tld) {
} while (p != NULL);
}
mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t memid, const mi_commit_mask_t* commit_mask, const mi_commit_mask_t* purge_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld)
mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, mi_memid_t memid, const mi_commit_mask_t* commit_mask, const mi_commit_mask_t* purge_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld)
{
#ifdef MI_CACHE_DISABLE
return false;
@ -227,7 +227,7 @@ mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t me
//
// (note: we could also allow segments that are already fully decommitted but that never happens
// as the first slice is always committed (for the segment metadata))
if (!_mi_arena_is_os_allocated(memid) && is_pinned) return false;
if (!_mi_arena_memid_is_os_allocated(memid) && is_pinned) return false;
// numa node determines start field
int numa_node = _mi_os_numa_node(NULL);

View File

@ -813,16 +813,15 @@ static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_aren
Segment allocation
----------------------------------------------------------- */
static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment, bool eager_delay, mi_arena_id_t req_arena_id,
static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment, bool eager_delayed, mi_arena_id_t req_arena_id,
size_t* psegment_slices, size_t* ppre_size, size_t* pinfo_slices,
mi_commit_mask_t* pcommit_mask, mi_commit_mask_t* ppurge_mask,
bool* is_zero, bool* pcommit, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
{
// Allocate the segment from the OS
bool mem_large = (!eager_delay && (MI_SECURE==0)); // only allow large OS pages once we are no longer lazy
bool is_pinned = false;
size_t memid = 0;
mi_memid_t memid;
bool mem_large = (!eager_delayed && (MI_SECURE == 0)); // only allow large OS pages once we are no longer lazy
bool is_pinned = false;
size_t align_offset = 0;
size_t alignment = MI_SEGMENT_ALIGN;
@ -1399,7 +1398,6 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
if (mi_slice_is_used(slice)) {
// in use: reclaim the page in our heap
mi_page_t* page = mi_slice_to_page(slice);
mi_assert_internal(!page->is_reset);
mi_assert_internal(page->is_committed);
mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE);
mi_assert_internal(mi_page_heap(page) == NULL);