mirror of
https://github.com/microsoft/mimalloc.git
synced 2024-12-27 13:33:18 +08:00
merge from dev-align
This commit is contained in:
commit
4814a649be
@ -123,7 +123,7 @@
|
|||||||
<SDLCheck>true</SDLCheck>
|
<SDLCheck>true</SDLCheck>
|
||||||
<ConformanceMode>true</ConformanceMode>
|
<ConformanceMode>true</ConformanceMode>
|
||||||
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
|
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
|
||||||
<PreprocessorDefinitions>MI_DEBUG=3;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);</PreprocessorDefinitions>
|
<PreprocessorDefinitions>MI_DEBUG=4;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);</PreprocessorDefinitions>
|
||||||
<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
|
<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
|
||||||
<SupportJustMyCode>false</SupportJustMyCode>
|
<SupportJustMyCode>false</SupportJustMyCode>
|
||||||
<CompileAs>Default</CompileAs>
|
<CompileAs>Default</CompileAs>
|
||||||
|
@ -116,7 +116,7 @@
|
|||||||
<SDLCheck>true</SDLCheck>
|
<SDLCheck>true</SDLCheck>
|
||||||
<ConformanceMode>true</ConformanceMode>
|
<ConformanceMode>true</ConformanceMode>
|
||||||
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
|
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
|
||||||
<PreprocessorDefinitions>MI_DEBUG=3;%(PreprocessorDefinitions);</PreprocessorDefinitions>
|
<PreprocessorDefinitions>MI_DEBUG=4;%(PreprocessorDefinitions);</PreprocessorDefinitions>
|
||||||
<CompileAs>CompileAsCpp</CompileAs>
|
<CompileAs>CompileAsCpp</CompileAs>
|
||||||
<SupportJustMyCode>false</SupportJustMyCode>
|
<SupportJustMyCode>false</SupportJustMyCode>
|
||||||
<LanguageStandard>stdcpp20</LanguageStandard>
|
<LanguageStandard>stdcpp20</LanguageStandard>
|
||||||
|
@ -88,10 +88,13 @@ bool _mi_os_reset(void* p, size_t size, mi_stats_t* stats);
|
|||||||
size_t _mi_os_good_alloc_size(size_t size);
|
size_t _mi_os_good_alloc_size(size_t size);
|
||||||
bool _mi_os_has_overcommit(void);
|
bool _mi_os_has_overcommit(void);
|
||||||
|
|
||||||
|
void* _mi_os_alloc_aligned_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool* large, mi_stats_t* tld_stats);
|
||||||
|
void _mi_os_free_aligned(void* p, size_t size, size_t alignment, size_t align_offset, bool was_committed, mi_stats_t* tld_stats);
|
||||||
|
|
||||||
// arena.c
|
// arena.c
|
||||||
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
|
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
|
||||||
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
|
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
|
||||||
void _mi_arena_free(void* p, size_t size, size_t memid, bool is_committed, mi_os_tld_t* tld);
|
void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset, size_t memid, bool all_committed, mi_stats_t* stats);
|
||||||
mi_arena_id_t _mi_arena_id_none(void);
|
mi_arena_id_t _mi_arena_id_none(void);
|
||||||
bool _mi_arena_memid_is_suitable(size_t memid, mi_arena_id_t req_arena_id);
|
bool _mi_arena_memid_is_suitable(size_t memid, mi_arena_id_t req_arena_id);
|
||||||
|
|
||||||
@ -103,7 +106,7 @@ void _mi_segment_map_allocated_at(const mi_segment_t* segment);
|
|||||||
void _mi_segment_map_freed_at(const mi_segment_t* segment);
|
void _mi_segment_map_freed_at(const mi_segment_t* segment);
|
||||||
|
|
||||||
// "segment.c"
|
// "segment.c"
|
||||||
mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_wsize, mi_segments_tld_t* tld, mi_os_tld_t* os_tld);
|
mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld);
|
||||||
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld);
|
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld);
|
||||||
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
|
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
|
||||||
bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segments_tld_t* tld);
|
bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segments_tld_t* tld);
|
||||||
@ -118,7 +121,7 @@ void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t*
|
|||||||
|
|
||||||
|
|
||||||
// "page.c"
|
// "page.c"
|
||||||
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept mi_attr_malloc;
|
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc;
|
||||||
|
|
||||||
void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks
|
void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks
|
||||||
void _mi_page_unfull(mi_page_t* page);
|
void _mi_page_unfull(mi_page_t* page);
|
||||||
@ -155,6 +158,7 @@ mi_msecs_t _mi_clock_start(void);
|
|||||||
// "alloc.c"
|
// "alloc.c"
|
||||||
void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept; // called from `_mi_malloc_generic`
|
void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept; // called from `_mi_malloc_generic`
|
||||||
void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
|
void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
|
||||||
|
void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned`
|
||||||
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept;
|
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept;
|
||||||
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p);
|
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p);
|
||||||
bool _mi_free_delayed_block(mi_block_t* block);
|
bool _mi_free_delayed_block(mi_block_t* block);
|
||||||
@ -446,8 +450,8 @@ static inline mi_page_t* _mi_get_free_small_page(size_t size) {
|
|||||||
|
|
||||||
// Segment that contains the pointer
|
// Segment that contains the pointer
|
||||||
static inline mi_segment_t* _mi_ptr_segment(const void* p) {
|
static inline mi_segment_t* _mi_ptr_segment(const void* p) {
|
||||||
// mi_assert_internal(p != NULL);
|
mi_assert_internal(p != NULL);
|
||||||
return (mi_segment_t*)((uintptr_t)p & ~MI_SEGMENT_MASK);
|
return (mi_segment_t*)(((uintptr_t)p - 1) & ~MI_SEGMENT_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline mi_page_t* mi_slice_to_page(mi_slice_t* s) {
|
static inline mi_page_t* mi_slice_to_page(mi_slice_t* s) {
|
||||||
@ -478,7 +482,8 @@ static inline mi_slice_t* mi_slice_first(const mi_slice_t* slice) {
|
|||||||
// Get the page containing the pointer
|
// Get the page containing the pointer
|
||||||
static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const void* p) {
|
static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const void* p) {
|
||||||
ptrdiff_t diff = (uint8_t*)p - (uint8_t*)segment;
|
ptrdiff_t diff = (uint8_t*)p - (uint8_t*)segment;
|
||||||
mi_assert_internal(diff >= 0 && diff < (ptrdiff_t)MI_SEGMENT_SIZE);
|
mi_assert_internal(diff >= 0 && diff <= (ptrdiff_t)MI_SEGMENT_SIZE /* can be equal for large alignment */);
|
||||||
|
if (diff == MI_SEGMENT_SIZE) diff--;
|
||||||
size_t idx = (size_t)diff >> MI_SEGMENT_SLICE_SHIFT;
|
size_t idx = (size_t)diff >> MI_SEGMENT_SLICE_SHIFT;
|
||||||
mi_assert_internal(idx < segment->slice_entries);
|
mi_assert_internal(idx < segment->slice_entries);
|
||||||
mi_slice_t* slice0 = (mi_slice_t*)&segment->slices[idx];
|
mi_slice_t* slice0 = (mi_slice_t*)&segment->slices[idx];
|
||||||
|
@ -174,7 +174,8 @@ typedef int32_t mi_ssize_t;
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Maximum slice offset (15)
|
// Maximum slice offset (15)
|
||||||
#define MI_MAX_SLICE_OFFSET ((MI_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1)
|
// #define MI_MAX_SLICE_OFFSET ((MI_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1)
|
||||||
|
#define MI_MAX_SLICE_OFFSET ((MI_SEGMENT_SIZE / MI_SEGMENT_SLICE_SIZE))
|
||||||
|
|
||||||
// Used as a special value to encode block sizes in 32 bits.
|
// Used as a special value to encode block sizes in 32 bits.
|
||||||
#define MI_HUGE_BLOCK_SIZE ((uint32_t)(2*MI_GiB))
|
#define MI_HUGE_BLOCK_SIZE ((uint32_t)(2*MI_GiB))
|
||||||
@ -355,6 +356,8 @@ typedef struct mi_segment_s {
|
|||||||
bool mem_is_pinned; // `true` if we cannot decommit/reset/protect in this memory (i.e. when allocated using large OS pages)
|
bool mem_is_pinned; // `true` if we cannot decommit/reset/protect in this memory (i.e. when allocated using large OS pages)
|
||||||
bool mem_is_large; // in large/huge os pages?
|
bool mem_is_large; // in large/huge os pages?
|
||||||
bool mem_is_committed; // `true` if the whole segment is eagerly committed
|
bool mem_is_committed; // `true` if the whole segment is eagerly committed
|
||||||
|
size_t mem_alignment; // page alignment for huge pages (only used for alignment > MI_ALIGNMENT_MAX)
|
||||||
|
size_t mem_align_offset; // offset for huge page alignment (only used for alignment > MI_ALIGNMENT_MAX)
|
||||||
|
|
||||||
bool allow_decommit;
|
bool allow_decommit;
|
||||||
mi_msecs_t decommit_expire;
|
mi_msecs_t decommit_expire;
|
||||||
|
@ -167,10 +167,12 @@ mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, s
|
|||||||
// Note that `alignment` always follows `size` for consistency with unaligned
|
// Note that `alignment` always follows `size` for consistency with unaligned
|
||||||
// allocation, but unfortunately this differs from `posix_memalign` and `aligned_alloc`.
|
// allocation, but unfortunately this differs from `posix_memalign` and `aligned_alloc`.
|
||||||
// -------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// The MI_ALIGNMENT_MAX is deprecated; any alignment is supported but alignments up to MI_ALIGNMENT_MAX may be cheaper.
|
||||||
#if (INTPTR_MAX > INT32_MAX)
|
#if (INTPTR_MAX > INT32_MAX)
|
||||||
#define MI_ALIGNMENT_MAX (16*1024*1024UL) // maximum supported alignment is 16MiB
|
#define MI_ALIGNMENT_MAX (32*1024*1024UL)
|
||||||
#else
|
#else
|
||||||
#define MI_ALIGNMENT_MAX (1024*1024UL) // maximum supported alignment for 32-bit systems is 1MiB
|
#define MI_ALIGNMENT_MAX (2*1024*1024UL)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
|
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
|
||||||
|
@ -18,7 +18,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||||||
static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept
|
static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept
|
||||||
{
|
{
|
||||||
mi_assert_internal(size <= PTRDIFF_MAX);
|
mi_assert_internal(size <= PTRDIFF_MAX);
|
||||||
mi_assert_internal(alignment!=0 && _mi_is_power_of_two(alignment) && alignment <= MI_ALIGNMENT_MAX);
|
mi_assert_internal(alignment != 0 && _mi_is_power_of_two(alignment));
|
||||||
|
|
||||||
const uintptr_t align_mask = alignment - 1; // for any x, `(x & align_mask) == (x % alignment)`
|
const uintptr_t align_mask = alignment - 1; // for any x, `(x & align_mask) == (x % alignment)`
|
||||||
const size_t padsize = size + MI_PADDING_SIZE;
|
const size_t padsize = size + MI_PADDING_SIZE;
|
||||||
@ -30,18 +30,41 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
|
|||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
// otherwise over-allocate
|
void* p;
|
||||||
const size_t oversize = size + alignment - 1;
|
size_t oversize;
|
||||||
void* p = _mi_heap_malloc_zero(heap, oversize, zero);
|
if mi_unlikely(alignment > MI_ALIGNMENT_MAX) {
|
||||||
|
// use OS allocation for very large alignment and allocate inside a huge page (dedicated segment with 1 page)
|
||||||
|
if mi_unlikely(offset != 0) {
|
||||||
|
// todo: cannot support offset alignment for very large alignments yet
|
||||||
|
#if MI_DEBUG > 0
|
||||||
|
_mi_error_message(EOVERFLOW, "aligned allocation with a very large alignment cannot be used with an alignment offset (size %zu, alignment %zu, offset %zu)\n", size, alignment, offset);
|
||||||
|
#endif
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size);
|
||||||
|
p = _mi_heap_malloc_zero_ex(heap, oversize, zero, alignment); // the page block size should be large enough to align in the single huge page block
|
||||||
if (p == NULL) return NULL;
|
if (p == NULL) return NULL;
|
||||||
|
const uintptr_t adjustx = alignment - (((uintptr_t)p + offset) & align_mask);
|
||||||
|
const mi_page_t* page = _mi_ptr_page(p);
|
||||||
|
const size_t bsize = mi_page_usable_block_size(page);
|
||||||
|
mi_assert_internal(bsize >= adjustx + size);
|
||||||
|
mi_assert_internal(true);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
// otherwise over-allocate
|
||||||
|
oversize = size + alignment - 1;
|
||||||
|
p = _mi_heap_malloc_zero(heap, oversize, zero);
|
||||||
|
if (p == NULL) return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
// .. and align within the allocation
|
// .. and align within the allocation
|
||||||
uintptr_t adjust = alignment - (((uintptr_t)p + offset) & align_mask);
|
uintptr_t adjust = alignment - (((uintptr_t)p + offset) & align_mask);
|
||||||
mi_assert_internal(adjust <= alignment);
|
mi_assert_internal(adjust <= alignment);
|
||||||
void* aligned_p = (adjust == alignment ? p : (void*)((uintptr_t)p + adjust));
|
void* aligned_p = (adjust == alignment ? p : (void*)((uintptr_t)p + adjust));
|
||||||
if (aligned_p != p) mi_page_set_has_aligned(_mi_ptr_page(p), true);
|
if (aligned_p != p) { mi_page_set_has_aligned(_mi_ptr_page(p), true); }
|
||||||
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
|
|
||||||
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p));
|
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p));
|
||||||
|
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
|
||||||
|
|
||||||
#if MI_TRACK_ENABLED
|
#if MI_TRACK_ENABLED
|
||||||
if (p != aligned_p) {
|
if (p != aligned_p) {
|
||||||
@ -66,12 +89,14 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
|
|||||||
#endif
|
#endif
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
if mi_unlikely(alignment > MI_ALIGNMENT_MAX) { // we cannot align at a boundary larger than this (or otherwise we cannot find segment headers)
|
if mi_unlikely(alignment > MI_ALIGNMENT_MAX) { // we cannot align at a boundary larger than this (or otherwise we cannot find segment headers)
|
||||||
#if MI_DEBUG > 0
|
#if MI_DEBUG > 0
|
||||||
_mi_error_message(EOVERFLOW, "aligned allocation has a maximum alignment of %zu (size %zu, alignment %zu)\n", MI_ALIGNMENT_MAX, size, alignment);
|
_mi_error_message(EOVERFLOW, "aligned allocation has a maximum alignment of %zu (size %zu, alignment %zu)\n", MI_ALIGNMENT_MAX, size, alignment);
|
||||||
#endif
|
#endif
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
if mi_unlikely(size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
|
if mi_unlikely(size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
|
||||||
#if MI_DEBUG > 0
|
#if MI_DEBUG > 0
|
||||||
_mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment);
|
_mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment);
|
||||||
@ -82,7 +107,7 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
|
|||||||
const size_t padsize = size + MI_PADDING_SIZE; // note: cannot overflow due to earlier size > PTRDIFF_MAX check
|
const size_t padsize = size + MI_PADDING_SIZE; // note: cannot overflow due to earlier size > PTRDIFF_MAX check
|
||||||
|
|
||||||
// try first if there happens to be a small block available with just the right alignment
|
// try first if there happens to be a small block available with just the right alignment
|
||||||
if mi_likely(padsize <= MI_SMALL_SIZE_MAX) {
|
if mi_likely(padsize <= MI_SMALL_SIZE_MAX && alignment <= padsize) {
|
||||||
mi_page_t* page = _mi_heap_get_free_small_page(heap, padsize);
|
mi_page_t* page = _mi_heap_get_free_small_page(heap, padsize);
|
||||||
const bool is_aligned = (((uintptr_t)page->free+offset) & align_mask)==0;
|
const bool is_aligned = (((uintptr_t)page->free+offset) & align_mask)==0;
|
||||||
if mi_likely(page->free != NULL && is_aligned)
|
if mi_likely(page->free != NULL && is_aligned)
|
||||||
|
13
src/alloc.c
13
src/alloc.c
@ -30,7 +30,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
|
|||||||
mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size);
|
mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size);
|
||||||
mi_block_t* const block = page->free;
|
mi_block_t* const block = page->free;
|
||||||
if mi_unlikely(block == NULL) {
|
if mi_unlikely(block == NULL) {
|
||||||
return _mi_malloc_generic(heap, size, zero);
|
return _mi_malloc_generic(heap, size, zero, 0);
|
||||||
}
|
}
|
||||||
mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
|
mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
|
||||||
// pop from the free list
|
// pop from the free list
|
||||||
@ -117,14 +117,15 @@ mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t si
|
|||||||
}
|
}
|
||||||
|
|
||||||
// The main allocation function
|
// The main allocation function
|
||||||
extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
|
inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept {
|
||||||
if mi_likely(size <= MI_SMALL_SIZE_MAX) {
|
if mi_likely(size <= MI_SMALL_SIZE_MAX) {
|
||||||
|
mi_assert_internal(huge_alignment == 0);
|
||||||
return mi_heap_malloc_small_zero(heap, size, zero);
|
return mi_heap_malloc_small_zero(heap, size, zero);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
mi_assert(heap!=NULL);
|
mi_assert(heap!=NULL);
|
||||||
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
|
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
|
||||||
void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero); // note: size can overflow but it is detected in malloc_generic
|
void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment); // note: size can overflow but it is detected in malloc_generic
|
||||||
mi_assert_internal(p == NULL || mi_usable_size(p) >= size);
|
mi_assert_internal(p == NULL || mi_usable_size(p) >= size);
|
||||||
#if MI_STAT>1
|
#if MI_STAT>1
|
||||||
if (p != NULL) {
|
if (p != NULL) {
|
||||||
@ -137,6 +138,10 @@ extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
|
||||||
|
return _mi_heap_malloc_zero_ex(heap, size, zero, 0);
|
||||||
|
}
|
||||||
|
|
||||||
mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
|
mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
|
||||||
return _mi_heap_malloc_zero(heap, size, false);
|
return _mi_heap_malloc_zero(heap, size, false);
|
||||||
}
|
}
|
||||||
@ -471,8 +476,8 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
if mi_unlikely(p == NULL) return NULL;
|
||||||
mi_segment_t* const segment = _mi_ptr_segment(p);
|
mi_segment_t* const segment = _mi_ptr_segment(p);
|
||||||
if mi_unlikely(segment == NULL) return NULL; // checks also for (p==NULL)
|
|
||||||
|
|
||||||
#if (MI_DEBUG>0)
|
#if (MI_DEBUG>0)
|
||||||
if mi_unlikely(!mi_is_in_heap_region(p)) {
|
if mi_unlikely(!mi_is_in_heap_region(p)) {
|
||||||
|
17
src/arena.c
17
src/arena.c
@ -245,7 +245,7 @@ static mi_decl_noinline void* mi_arena_allocate(int numa_node, size_t size, size
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero,
|
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero,
|
||||||
mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld)
|
mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld)
|
||||||
{
|
{
|
||||||
mi_assert_internal(commit != NULL && is_pinned != NULL && is_zero != NULL && memid != NULL && tld != NULL);
|
mi_assert_internal(commit != NULL && is_pinned != NULL && is_zero != NULL && memid != NULL && tld != NULL);
|
||||||
@ -259,7 +259,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool*
|
|||||||
const int numa_node = _mi_os_numa_node(tld); // current numa node
|
const int numa_node = _mi_os_numa_node(tld); // current numa node
|
||||||
|
|
||||||
// try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
|
// try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
|
||||||
if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN) {
|
if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) {
|
||||||
void* p = mi_arena_allocate(numa_node, size, alignment, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
void* p = mi_arena_allocate(numa_node, size, alignment, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||||
if (p != NULL) return p;
|
if (p != NULL) return p;
|
||||||
}
|
}
|
||||||
@ -271,14 +271,14 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool*
|
|||||||
}
|
}
|
||||||
*is_zero = true;
|
*is_zero = true;
|
||||||
*memid = MI_MEMID_OS;
|
*memid = MI_MEMID_OS;
|
||||||
void* p = _mi_os_alloc_aligned(size, alignment, *commit, large, tld->stats);
|
void* p = _mi_os_alloc_aligned_offset(size, alignment, align_offset, *commit, large, tld->stats);
|
||||||
if (p != NULL) *is_pinned = *large;
|
if (p != NULL) *is_pinned = *large;
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld)
|
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld)
|
||||||
{
|
{
|
||||||
return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||||
}
|
}
|
||||||
|
|
||||||
void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) {
|
void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) {
|
||||||
@ -295,17 +295,18 @@ void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) {
|
|||||||
Arena free
|
Arena free
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
|
||||||
void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_os_tld_t* tld) {
|
void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset, size_t memid, bool all_committed, mi_stats_t* stats) {
|
||||||
mi_assert_internal(size > 0 && tld->stats != NULL);
|
mi_assert_internal(size > 0 && stats != NULL);
|
||||||
if (p==NULL) return;
|
if (p==NULL) return;
|
||||||
if (size==0) return;
|
if (size==0) return;
|
||||||
|
|
||||||
if (memid == MI_MEMID_OS) {
|
if (memid == MI_MEMID_OS) {
|
||||||
// was a direct OS allocation, pass through
|
// was a direct OS allocation, pass through
|
||||||
_mi_os_free_ex(p, size, all_committed, tld->stats);
|
_mi_os_free_aligned(p, size, alignment, align_offset, all_committed, stats);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
// allocated in an arena
|
// allocated in an arena
|
||||||
|
mi_assert_internal(align_offset == 0);
|
||||||
size_t arena_idx;
|
size_t arena_idx;
|
||||||
size_t bitmap_idx;
|
size_t bitmap_idx;
|
||||||
mi_arena_memid_indices(memid, &arena_idx, &bitmap_idx);
|
mi_arena_memid_indices(memid, &arena_idx, &bitmap_idx);
|
||||||
@ -329,7 +330,7 @@ void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_o
|
|||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
mi_assert_internal(arena->blocks_committed != NULL);
|
mi_assert_internal(arena->blocks_committed != NULL);
|
||||||
_mi_os_decommit(p, blocks * MI_ARENA_BLOCK_SIZE, tld->stats); // ok if this fails
|
_mi_os_decommit(p, blocks * MI_ARENA_BLOCK_SIZE, stats); // ok if this fails
|
||||||
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
|
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
|
||||||
}
|
}
|
||||||
// and make it available to others again
|
// and make it available to others again
|
||||||
|
37
src/os.c
37
src/os.c
@ -837,8 +837,45 @@ void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* lar
|
|||||||
return mi_os_mem_alloc_aligned(size, alignment, commit, allow_large, (large!=NULL?large:&allow_large), &_mi_stats_main /*tld->stats*/ );
|
return mi_os_mem_alloc_aligned(size, alignment, commit, allow_large, (large!=NULL?large:&allow_large), &_mi_stats_main /*tld->stats*/ );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* -----------------------------------------------------------
|
||||||
|
OS aligned allocation with an offset. This is used
|
||||||
|
for large alignments > MI_SEGMENT_SIZE so we can align
|
||||||
|
the first page at an offset from the start of the segment.
|
||||||
|
As we may need to overallocate, we need to free such pointers
|
||||||
|
using `mi_free_aligned` to use the actual start of the
|
||||||
|
memory region.
|
||||||
|
----------------------------------------------------------- */
|
||||||
|
|
||||||
|
|
||||||
|
void* _mi_os_alloc_aligned_offset(size_t size, size_t alignment, size_t offset, bool commit, bool* large, mi_stats_t* tld_stats) {
|
||||||
|
mi_assert(offset <= MI_SEGMENT_SIZE);
|
||||||
|
mi_assert(offset <= size);
|
||||||
|
mi_assert((alignment % _mi_os_page_size()) == 0);
|
||||||
|
if (offset > MI_SEGMENT_SIZE) return NULL;
|
||||||
|
if (offset == 0) {
|
||||||
|
return _mi_os_alloc_aligned(size, alignment, commit, large, tld_stats);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
const size_t extra = _mi_align_up(offset, alignment) - offset;
|
||||||
|
const size_t oversize = size + extra;
|
||||||
|
void* start = _mi_os_alloc_aligned(oversize, alignment, commit, large, tld_stats);
|
||||||
|
if (start == NULL) return NULL;
|
||||||
|
void* p = (uint8_t*)start + extra;
|
||||||
|
mi_assert(_mi_is_aligned((uint8_t*)p + offset, alignment));
|
||||||
|
if (commit && extra > _mi_os_page_size()) {
|
||||||
|
_mi_os_decommit(start, extra, tld_stats);
|
||||||
|
}
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void _mi_os_free_aligned(void* p, size_t size, size_t alignment, size_t align_offset, bool was_committed, mi_stats_t* tld_stats) {
|
||||||
|
mi_assert(align_offset <= MI_SEGMENT_SIZE);
|
||||||
|
const size_t extra = _mi_align_up(align_offset, alignment) - align_offset;
|
||||||
|
void* start = (uint8_t*)p - extra;
|
||||||
|
_mi_os_free_ex(start, size + extra, was_committed, tld_stats);
|
||||||
|
}
|
||||||
|
|
||||||
/* -----------------------------------------------------------
|
/* -----------------------------------------------------------
|
||||||
OS memory API: reset, commit, decommit, protect, unprotect.
|
OS memory API: reset, commit, decommit, protect, unprotect.
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
35
src/page.c
35
src/page.c
@ -254,15 +254,21 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// allocate a fresh page from a segment
|
// allocate a fresh page from a segment
|
||||||
static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size_t block_size) {
|
static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size_t block_size, size_t page_alignment) {
|
||||||
mi_assert_internal(pq==NULL||mi_heap_contains_queue(heap, pq));
|
mi_assert_internal(pq==NULL||mi_heap_contains_queue(heap, pq));
|
||||||
mi_page_t* page = _mi_segment_page_alloc(heap, block_size, &heap->tld->segments, &heap->tld->os);
|
mi_assert_internal(pq==NULL || block_size > MI_MEDIUM_OBJ_SIZE_MAX || block_size == pq->block_size);
|
||||||
|
mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments, &heap->tld->os);
|
||||||
if (page == NULL) {
|
if (page == NULL) {
|
||||||
// this may be out-of-memory, or an abandoned page was reclaimed (and in our queue)
|
// this may be out-of-memory, or an abandoned page was reclaimed (and in our queue)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
mi_assert_internal(pq==NULL || _mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
|
mi_assert_internal(pq==NULL || _mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
|
||||||
mi_page_init(heap, page, block_size, heap->tld);
|
mi_assert_internal(pq!=NULL || page->xblock_size != 0);
|
||||||
|
mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size);
|
||||||
|
// a fresh page was found, initialize it
|
||||||
|
const size_t full_block_size = (pq == NULL ? mi_page_block_size(page) : block_size); // see also: mi_segment_huge_page_alloc
|
||||||
|
mi_assert_internal(full_block_size >= block_size);
|
||||||
|
mi_page_init(heap, page, full_block_size, heap->tld);
|
||||||
mi_heap_stat_increase(heap, pages, 1);
|
mi_heap_stat_increase(heap, pages, 1);
|
||||||
if (pq!=NULL) mi_page_queue_push(heap, pq, page); // huge pages use pq==NULL
|
if (pq!=NULL) mi_page_queue_push(heap, pq, page); // huge pages use pq==NULL
|
||||||
mi_assert_expensive(_mi_page_is_valid(page));
|
mi_assert_expensive(_mi_page_is_valid(page));
|
||||||
@ -272,7 +278,7 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size
|
|||||||
// Get a fresh page to use
|
// Get a fresh page to use
|
||||||
static mi_page_t* mi_page_fresh(mi_heap_t* heap, mi_page_queue_t* pq) {
|
static mi_page_t* mi_page_fresh(mi_heap_t* heap, mi_page_queue_t* pq) {
|
||||||
mi_assert_internal(mi_heap_contains_queue(heap, pq));
|
mi_assert_internal(mi_heap_contains_queue(heap, pq));
|
||||||
mi_page_t* page = mi_page_fresh_alloc(heap, pq, pq->block_size);
|
mi_page_t* page = mi_page_fresh_alloc(heap, pq, pq->block_size, 0);
|
||||||
if (page==NULL) return NULL;
|
if (page==NULL) return NULL;
|
||||||
mi_assert_internal(pq->block_size==mi_page_block_size(page));
|
mi_assert_internal(pq->block_size==mi_page_block_size(page));
|
||||||
mi_assert_internal(pq==mi_page_queue(heap, mi_page_block_size(page)));
|
mi_assert_internal(pq==mi_page_queue(heap, mi_page_block_size(page)));
|
||||||
@ -648,6 +654,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
|||||||
mi_assert_internal(page_size <= page->slice_count*MI_SEGMENT_SLICE_SIZE);
|
mi_assert_internal(page_size <= page->slice_count*MI_SEGMENT_SLICE_SIZE);
|
||||||
mi_assert_internal(page_size / block_size < (1L<<16));
|
mi_assert_internal(page_size / block_size < (1L<<16));
|
||||||
page->reserved = (uint16_t)(page_size / block_size);
|
page->reserved = (uint16_t)(page_size / block_size);
|
||||||
|
mi_assert_internal(page->reserved > 0);
|
||||||
#ifdef MI_ENCODE_FREELIST
|
#ifdef MI_ENCODE_FREELIST
|
||||||
page->keys[0] = _mi_heap_random_next(heap);
|
page->keys[0] = _mi_heap_random_next(heap);
|
||||||
page->keys[1] = _mi_heap_random_next(heap);
|
page->keys[1] = _mi_heap_random_next(heap);
|
||||||
@ -797,12 +804,12 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noex
|
|||||||
// Because huge pages contain just one block, and the segment contains
|
// Because huge pages contain just one block, and the segment contains
|
||||||
// just that page, we always treat them as abandoned and any thread
|
// just that page, we always treat them as abandoned and any thread
|
||||||
// that frees the block can free the whole page and segment directly.
|
// that frees the block can free the whole page and segment directly.
|
||||||
static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size) {
|
static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment) {
|
||||||
size_t block_size = _mi_os_good_alloc_size(size);
|
size_t block_size = _mi_os_good_alloc_size(size);
|
||||||
mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE);
|
mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE || page_alignment > 0);
|
||||||
bool is_huge = (block_size > MI_LARGE_OBJ_SIZE_MAX);
|
bool is_huge = (block_size > MI_LARGE_OBJ_SIZE_MAX || page_alignment > 0);
|
||||||
mi_page_queue_t* pq = (is_huge ? NULL : mi_page_queue(heap, block_size));
|
mi_page_queue_t* pq = (is_huge ? NULL : mi_page_queue(heap, block_size));
|
||||||
mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size);
|
mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment);
|
||||||
if (page != NULL) {
|
if (page != NULL) {
|
||||||
mi_assert_internal(mi_page_immediate_available(page));
|
mi_assert_internal(mi_page_immediate_available(page));
|
||||||
|
|
||||||
@ -833,16 +840,16 @@ static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size) {
|
|||||||
|
|
||||||
// Allocate a page
|
// Allocate a page
|
||||||
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
|
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
|
||||||
static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size) mi_attr_noexcept {
|
static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignment) mi_attr_noexcept {
|
||||||
// huge allocation?
|
// huge allocation?
|
||||||
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
|
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
|
||||||
if mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE)) {
|
if mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE) || huge_alignment > 0) {
|
||||||
if mi_unlikely(req_size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
|
if mi_unlikely(req_size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
|
||||||
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size);
|
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
return mi_large_huge_page_alloc(heap,size);
|
return mi_large_huge_page_alloc(heap,size,huge_alignment);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
@ -854,7 +861,7 @@ static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size) mi_attr_noexcept {
|
|||||||
|
|
||||||
// Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed.
|
// Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed.
|
||||||
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
|
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
|
||||||
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept
|
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept
|
||||||
{
|
{
|
||||||
mi_assert_internal(heap != NULL);
|
mi_assert_internal(heap != NULL);
|
||||||
|
|
||||||
@ -873,10 +880,10 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexce
|
|||||||
_mi_heap_delayed_free_partial(heap);
|
_mi_heap_delayed_free_partial(heap);
|
||||||
|
|
||||||
// find (or allocate) a page of the right size
|
// find (or allocate) a page of the right size
|
||||||
mi_page_t* page = mi_find_page(heap, size);
|
mi_page_t* page = mi_find_page(heap, size, huge_alignment);
|
||||||
if mi_unlikely(page == NULL) { // first time out of memory, try to collect and retry the allocation once more
|
if mi_unlikely(page == NULL) { // first time out of memory, try to collect and retry the allocation once more
|
||||||
mi_heap_collect(heap, true /* force */);
|
mi_heap_collect(heap, true /* force */);
|
||||||
page = mi_find_page(heap, size);
|
page = mi_find_page(heap, size, huge_alignment);
|
||||||
}
|
}
|
||||||
|
|
||||||
if mi_unlikely(page == NULL) { // out of memory
|
if mi_unlikely(page == NULL) { // out of memory
|
||||||
|
23
src/region.c
23
src/region.c
@ -50,9 +50,9 @@ bool _mi_os_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
|
|||||||
|
|
||||||
// arena.c
|
// arena.c
|
||||||
mi_arena_id_t _mi_arena_id_none(void);
|
mi_arena_id_t _mi_arena_id_none(void);
|
||||||
void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_stats_t* stats);
|
void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset, size_t memid, bool all_committed, mi_stats_t* stats);
|
||||||
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
|
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
|
||||||
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
|
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -181,7 +181,7 @@ static bool mi_region_try_alloc_os(size_t blocks, bool commit, bool allow_large,
|
|||||||
bool is_zero = false;
|
bool is_zero = false;
|
||||||
bool is_pinned = false;
|
bool is_pinned = false;
|
||||||
size_t arena_memid = 0;
|
size_t arena_memid = 0;
|
||||||
void* const start = _mi_arena_alloc_aligned(MI_REGION_SIZE, MI_SEGMENT_ALIGN, ®ion_commit, ®ion_large, &is_pinned, &is_zero, _mi_arena_id_none(), & arena_memid, tld);
|
void* const start = _mi_arena_alloc_aligned(MI_REGION_SIZE, MI_SEGMENT_ALIGN, 0, ®ion_commit, ®ion_large, &is_pinned, &is_zero, _mi_arena_id_none(), & arena_memid, tld);
|
||||||
if (start == NULL) return false;
|
if (start == NULL) return false;
|
||||||
mi_assert_internal(!(region_large && !allow_large));
|
mi_assert_internal(!(region_large && !allow_large));
|
||||||
mi_assert_internal(!region_large || region_commit);
|
mi_assert_internal(!region_large || region_commit);
|
||||||
@ -190,7 +190,7 @@ static bool mi_region_try_alloc_os(size_t blocks, bool commit, bool allow_large,
|
|||||||
const size_t idx = mi_atomic_increment_acq_rel(®ions_count);
|
const size_t idx = mi_atomic_increment_acq_rel(®ions_count);
|
||||||
if (idx >= MI_REGION_MAX) {
|
if (idx >= MI_REGION_MAX) {
|
||||||
mi_atomic_decrement_acq_rel(®ions_count);
|
mi_atomic_decrement_acq_rel(®ions_count);
|
||||||
_mi_arena_free(start, MI_REGION_SIZE, arena_memid, region_commit, tld->stats);
|
_mi_arena_free(start, MI_REGION_SIZE, MI_SEGMENT_ALIGN, 0, arena_memid, region_commit, tld->stats);
|
||||||
_mi_warning_message("maximum regions used: %zu GiB (perhaps recompile with a larger setting for MI_HEAP_REGION_MAX_SIZE)", _mi_divide_up(MI_HEAP_REGION_MAX_SIZE, MI_GiB));
|
_mi_warning_message("maximum regions used: %zu GiB (perhaps recompile with a larger setting for MI_HEAP_REGION_MAX_SIZE)", _mi_divide_up(MI_HEAP_REGION_MAX_SIZE, MI_GiB));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -347,7 +347,7 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* large, bool*
|
|||||||
|
|
||||||
// Allocate `size` memory aligned at `alignment`. Return non NULL on success, with a given memory `id`.
|
// Allocate `size` memory aligned at `alignment`. Return non NULL on success, with a given memory `id`.
|
||||||
// (`id` is abstract, but `id = idx*MI_REGION_MAP_BITS + bitidx`)
|
// (`id` is abstract, but `id = idx*MI_REGION_MAP_BITS + bitidx`)
|
||||||
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
|
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
|
||||||
{
|
{
|
||||||
mi_assert_internal(memid != NULL && tld != NULL);
|
mi_assert_internal(memid != NULL && tld != NULL);
|
||||||
mi_assert_internal(size > 0);
|
mi_assert_internal(size > 0);
|
||||||
@ -363,7 +363,7 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* l
|
|||||||
void* p = NULL;
|
void* p = NULL;
|
||||||
size_t arena_memid;
|
size_t arena_memid;
|
||||||
const size_t blocks = mi_region_block_count(size);
|
const size_t blocks = mi_region_block_count(size);
|
||||||
if (blocks <= MI_REGION_MAX_OBJ_BLOCKS && alignment <= MI_SEGMENT_ALIGN) {
|
if (blocks <= MI_REGION_MAX_OBJ_BLOCKS && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) {
|
||||||
p = mi_region_try_alloc(blocks, commit, large, is_pinned, is_zero, memid, tld);
|
p = mi_region_try_alloc(blocks, commit, large, is_pinned, is_zero, memid, tld);
|
||||||
if (p == NULL) {
|
if (p == NULL) {
|
||||||
_mi_warning_message("unable to allocate from region: size %zu\n", size);
|
_mi_warning_message("unable to allocate from region: size %zu\n", size);
|
||||||
@ -371,12 +371,12 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* l
|
|||||||
}
|
}
|
||||||
if (p == NULL) {
|
if (p == NULL) {
|
||||||
// and otherwise fall back to the OS
|
// and otherwise fall back to the OS
|
||||||
p = _mi_arena_alloc_aligned(size, alignment, commit, large, is_pinned, is_zero, _mi_arena_id_none(), & arena_memid, tld);
|
p = _mi_arena_alloc_aligned(size, alignment, align_offset, commit, large, is_pinned, is_zero, _mi_arena_id_none(), & arena_memid, tld);
|
||||||
*memid = mi_memid_create_from_arena(arena_memid);
|
*memid = mi_memid_create_from_arena(arena_memid);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (p != NULL) {
|
if (p != NULL) {
|
||||||
mi_assert_internal((uintptr_t)p % alignment == 0);
|
mi_assert_internal(((uintptr_t)p + align_offset) % alignment == 0);
|
||||||
#if (MI_DEBUG>=2) && !MI_TRACK_ENABLED
|
#if (MI_DEBUG>=2) && !MI_TRACK_ENABLED
|
||||||
if (*commit) { ((uint8_t*)p)[0] = 0; } // ensure the memory is committed
|
if (*commit) { ((uint8_t*)p)[0] = 0; } // ensure the memory is committed
|
||||||
#endif
|
#endif
|
||||||
@ -391,7 +391,7 @@ Free
|
|||||||
-----------------------------------------------------------------------------*/
|
-----------------------------------------------------------------------------*/
|
||||||
|
|
||||||
// Free previously allocated memory with a given id.
|
// Free previously allocated memory with a given id.
|
||||||
void _mi_mem_free(void* p, size_t size, size_t id, bool full_commit, bool any_reset, mi_os_tld_t* tld) {
|
void _mi_mem_free(void* p, size_t size, size_t alignment, size_t align_offset, size_t id, bool full_commit, bool any_reset, mi_os_tld_t* tld) {
|
||||||
mi_assert_internal(size > 0 && tld != NULL);
|
mi_assert_internal(size > 0 && tld != NULL);
|
||||||
if (p==NULL) return;
|
if (p==NULL) return;
|
||||||
if (size==0) return;
|
if (size==0) return;
|
||||||
@ -402,10 +402,11 @@ void _mi_mem_free(void* p, size_t size, size_t id, bool full_commit, bool any_re
|
|||||||
mem_region_t* region;
|
mem_region_t* region;
|
||||||
if (mi_memid_is_arena(id,®ion,&bit_idx,&arena_memid)) {
|
if (mi_memid_is_arena(id,®ion,&bit_idx,&arena_memid)) {
|
||||||
// was a direct arena allocation, pass through
|
// was a direct arena allocation, pass through
|
||||||
_mi_arena_free(p, size, arena_memid, full_commit, tld->stats);
|
_mi_arena_free(p, size, alignment, align_offset, arena_memid, full_commit, tld->stats);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
// allocated in a region
|
// allocated in a region
|
||||||
|
mi_assert_internal(align_offset == 0);
|
||||||
mi_assert_internal(size <= MI_REGION_MAX_OBJ_SIZE); if (size > MI_REGION_MAX_OBJ_SIZE) return;
|
mi_assert_internal(size <= MI_REGION_MAX_OBJ_SIZE); if (size > MI_REGION_MAX_OBJ_SIZE) return;
|
||||||
const size_t blocks = mi_region_block_count(size);
|
const size_t blocks = mi_region_block_count(size);
|
||||||
mi_assert_internal(blocks + bit_idx <= MI_BITMAP_FIELD_BITS);
|
mi_assert_internal(blocks + bit_idx <= MI_BITMAP_FIELD_BITS);
|
||||||
@ -469,7 +470,7 @@ void _mi_mem_collect(mi_os_tld_t* tld) {
|
|||||||
mi_atomic_store_release(®ion->info, (size_t)0);
|
mi_atomic_store_release(®ion->info, (size_t)0);
|
||||||
if (start != NULL) { // && !_mi_os_is_huge_reserved(start)) {
|
if (start != NULL) { // && !_mi_os_is_huge_reserved(start)) {
|
||||||
_mi_abandoned_await_readers(); // ensure no pending reads
|
_mi_abandoned_await_readers(); // ensure no pending reads
|
||||||
_mi_arena_free(start, MI_REGION_SIZE, arena_memid, (~commit == 0), tld->stats);
|
_mi_arena_free(start, MI_REGION_SIZE, MI_SEGMENT_ALIGN, 0, arena_memid, (~commit == 0), tld->stats);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -245,7 +245,7 @@ mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t me
|
|||||||
static _Atomic(uintptr_t) mi_segment_map[MI_SEGMENT_MAP_WSIZE + 1]; // 2KiB per TB with 64MiB segments
|
static _Atomic(uintptr_t) mi_segment_map[MI_SEGMENT_MAP_WSIZE + 1]; // 2KiB per TB with 64MiB segments
|
||||||
|
|
||||||
static size_t mi_segment_map_index_of(const mi_segment_t* segment, size_t* bitidx) {
|
static size_t mi_segment_map_index_of(const mi_segment_t* segment, size_t* bitidx) {
|
||||||
mi_assert_internal(_mi_ptr_segment(segment) == segment); // is it aligned on MI_SEGMENT_SIZE?
|
mi_assert_internal(_mi_ptr_segment(segment + 1) == segment); // is it aligned on MI_SEGMENT_SIZE?
|
||||||
if ((uintptr_t)segment >= MI_MAX_ADDRESS) {
|
if ((uintptr_t)segment >= MI_MAX_ADDRESS) {
|
||||||
*bitidx = 0;
|
*bitidx = 0;
|
||||||
return MI_SEGMENT_MAP_WSIZE;
|
return MI_SEGMENT_MAP_WSIZE;
|
||||||
|
@ -386,11 +386,13 @@ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
|||||||
|
|
||||||
// _mi_os_free(segment, mi_segment_size(segment), /*segment->memid,*/ tld->stats);
|
// _mi_os_free(segment, mi_segment_size(segment), /*segment->memid,*/ tld->stats);
|
||||||
const size_t size = mi_segment_size(segment);
|
const size_t size = mi_segment_size(segment);
|
||||||
if (size != MI_SEGMENT_SIZE || !_mi_segment_cache_push(segment, size, segment->memid, &segment->commit_mask, &segment->decommit_mask, segment->mem_is_large, segment->mem_is_pinned, tld->os)) {
|
if (size != MI_SEGMENT_SIZE || segment->mem_align_offset != 0 || // only push regular segments on the cache
|
||||||
|
!_mi_segment_cache_push(segment, size, segment->memid, &segment->commit_mask, &segment->decommit_mask, segment->mem_is_large, segment->mem_is_pinned, tld->os))
|
||||||
|
{
|
||||||
const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size);
|
const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size);
|
||||||
if (csize > 0 && !segment->mem_is_pinned) _mi_stat_decrease(&_mi_stats_main.committed, csize);
|
if (csize > 0 && !segment->mem_is_pinned) _mi_stat_decrease(&_mi_stats_main.committed, csize);
|
||||||
_mi_abandoned_await_readers(); // wait until safe to free
|
_mi_abandoned_await_readers(); // wait until safe to free
|
||||||
_mi_arena_free(segment, mi_segment_size(segment), segment->memid, segment->mem_is_pinned /* pretend not committed to not double count decommits */, tld->os);
|
_mi_arena_free(segment, mi_segment_size(segment), segment->mem_alignment, segment->mem_align_offset, segment->memid, segment->mem_is_pinned /* pretend not committed to not double count decommits */, tld->stats);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -406,7 +408,7 @@ void _mi_segment_thread_collect(mi_segments_tld_t* tld) {
|
|||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
|
||||||
static void mi_segment_commit_mask(mi_segment_t* segment, bool conservative, uint8_t* p, size_t size, uint8_t** start_p, size_t* full_size, mi_commit_mask_t* cm) {
|
static void mi_segment_commit_mask(mi_segment_t* segment, bool conservative, uint8_t* p, size_t size, uint8_t** start_p, size_t* full_size, mi_commit_mask_t* cm) {
|
||||||
mi_assert_internal(_mi_ptr_segment(p) == segment);
|
mi_assert_internal(_mi_ptr_segment(p + 1) == segment);
|
||||||
mi_assert_internal(segment->kind != MI_SEGMENT_HUGE);
|
mi_assert_internal(segment->kind != MI_SEGMENT_HUGE);
|
||||||
mi_commit_mask_create_empty(cm);
|
mi_commit_mask_create_empty(cm);
|
||||||
if (size == 0 || size > MI_SEGMENT_SIZE || segment->kind == MI_SEGMENT_HUGE) return;
|
if (size == 0 || size > MI_SEGMENT_SIZE || segment->kind == MI_SEGMENT_HUGE) return;
|
||||||
@ -761,16 +763,14 @@ static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_aren
|
|||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
|
||||||
// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
|
// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
|
||||||
static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page)
|
static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page)
|
||||||
{
|
{
|
||||||
mi_assert_internal((required==0 && huge_page==NULL) || (required>0 && huge_page != NULL));
|
mi_assert_internal((required==0 && huge_page==NULL) || (required>0 && huge_page != NULL));
|
||||||
mi_assert_internal((segment==NULL) || (segment!=NULL && required==0));
|
mi_assert_internal((segment==NULL) || (segment!=NULL && required==0));
|
||||||
// calculate needed sizes first
|
// calculate needed sizes first
|
||||||
size_t info_slices;
|
size_t info_slices;
|
||||||
size_t pre_size;
|
size_t pre_size;
|
||||||
const size_t segment_slices = mi_segment_calculate_slices(required, &pre_size, &info_slices);
|
size_t segment_slices = mi_segment_calculate_slices(required, &pre_size, &info_slices);
|
||||||
const size_t slice_entries = (segment_slices > MI_SLICES_PER_SEGMENT ? MI_SLICES_PER_SEGMENT : segment_slices);
|
|
||||||
const size_t segment_size = segment_slices * MI_SEGMENT_SLICE_SIZE;
|
|
||||||
|
|
||||||
// Commit eagerly only if not the first N lazy segments (to reduce impact of many threads that allocate just a little)
|
// Commit eagerly only if not the first N lazy segments (to reduce impact of many threads that allocate just a little)
|
||||||
const bool eager_delay = (// !_mi_os_has_overcommit() && // never delay on overcommit systems
|
const bool eager_delay = (// !_mi_os_has_overcommit() && // never delay on overcommit systems
|
||||||
@ -797,9 +797,25 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
|
|||||||
bool mem_large = (!eager_delay && (MI_SECURE==0)); // only allow large OS pages once we are no longer lazy
|
bool mem_large = (!eager_delay && (MI_SECURE==0)); // only allow large OS pages once we are no longer lazy
|
||||||
bool is_pinned = false;
|
bool is_pinned = false;
|
||||||
size_t memid = 0;
|
size_t memid = 0;
|
||||||
|
size_t align_offset = 0;
|
||||||
|
size_t alignment = MI_SEGMENT_SIZE;
|
||||||
|
size_t segment_size = segment_slices * MI_SEGMENT_SLICE_SIZE;
|
||||||
|
|
||||||
|
if (page_alignment > 0) {
|
||||||
|
mi_assert_internal(huge_page != NULL);
|
||||||
|
mi_assert_internal(page_alignment >= MI_SEGMENT_ALIGN);
|
||||||
|
alignment = page_alignment;
|
||||||
|
const size_t info_size = info_slices * MI_SEGMENT_SLICE_SIZE;
|
||||||
|
align_offset = _mi_align_up( info_size, MI_SEGMENT_ALIGN );
|
||||||
|
segment_size += _mi_align_up(align_offset - info_size, MI_SEGMENT_SLICE_SIZE);
|
||||||
|
segment_slices = segment_size / MI_SEGMENT_SLICE_SIZE;
|
||||||
|
}
|
||||||
|
else {
|
||||||
segment = (mi_segment_t*)_mi_segment_cache_pop(segment_size, &commit_mask, &decommit_mask, &mem_large, &is_pinned, &is_zero, req_arena_id, &memid, os_tld);
|
segment = (mi_segment_t*)_mi_segment_cache_pop(segment_size, &commit_mask, &decommit_mask, &mem_large, &is_pinned, &is_zero, req_arena_id, &memid, os_tld);
|
||||||
|
}
|
||||||
|
|
||||||
if (segment==NULL) {
|
if (segment==NULL) {
|
||||||
segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &is_pinned, &is_zero, req_arena_id, &memid, os_tld);
|
segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, alignment, align_offset, &commit, &mem_large, &is_pinned, &is_zero, req_arena_id, &memid, os_tld);
|
||||||
if (segment == NULL) return NULL; // failed to allocate
|
if (segment == NULL) return NULL; // failed to allocate
|
||||||
if (commit) {
|
if (commit) {
|
||||||
mi_commit_mask_create_full(&commit_mask);
|
mi_commit_mask_create_full(&commit_mask);
|
||||||
@ -826,6 +842,8 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
|
|||||||
segment->mem_is_pinned = is_pinned;
|
segment->mem_is_pinned = is_pinned;
|
||||||
segment->mem_is_large = mem_large;
|
segment->mem_is_large = mem_large;
|
||||||
segment->mem_is_committed = mi_commit_mask_is_full(&commit_mask);
|
segment->mem_is_committed = mi_commit_mask_is_full(&commit_mask);
|
||||||
|
segment->mem_alignment = alignment;
|
||||||
|
segment->mem_align_offset = align_offset;
|
||||||
mi_segments_track_size((long)(segment_size), tld);
|
mi_segments_track_size((long)(segment_size), tld);
|
||||||
_mi_segment_map_allocated_at(segment);
|
_mi_segment_map_allocated_at(segment);
|
||||||
}
|
}
|
||||||
@ -862,6 +880,7 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
|
|||||||
|
|
||||||
|
|
||||||
// initialize segment info
|
// initialize segment info
|
||||||
|
const size_t slice_entries = (segment_slices > MI_SLICES_PER_SEGMENT ? MI_SLICES_PER_SEGMENT : segment_slices);
|
||||||
segment->segment_slices = segment_slices;
|
segment->segment_slices = segment_slices;
|
||||||
segment->segment_info_slices = info_slices;
|
segment->segment_info_slices = info_slices;
|
||||||
segment->thread_id = _mi_thread_id();
|
segment->thread_id = _mi_thread_id();
|
||||||
@ -912,8 +931,8 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
|
|||||||
|
|
||||||
|
|
||||||
// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
|
// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
|
||||||
static mi_segment_t* mi_segment_alloc(size_t required, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page) {
|
static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page) {
|
||||||
return mi_segment_init(NULL, required, req_arena_id, tld, os_tld, huge_page);
|
return mi_segment_init(NULL, required, page_alignment, req_arena_id, tld, os_tld, huge_page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1450,7 +1469,7 @@ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_
|
|||||||
return segment;
|
return segment;
|
||||||
}
|
}
|
||||||
// 2. otherwise allocate a fresh segment
|
// 2. otherwise allocate a fresh segment
|
||||||
return mi_segment_alloc(0, heap->arena_id, tld, os_tld, NULL);
|
return mi_segment_alloc(0, 0, heap->arena_id, tld, os_tld, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1490,14 +1509,28 @@ static mi_page_t* mi_segments_page_alloc(mi_heap_t* heap, mi_page_kind_t page_ki
|
|||||||
Huge page allocation
|
Huge page allocation
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
|
||||||
static mi_page_t* mi_segment_huge_page_alloc(size_t size, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
|
static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
|
||||||
{
|
{
|
||||||
mi_page_t* page = NULL;
|
mi_page_t* page = NULL;
|
||||||
mi_segment_t* segment = mi_segment_alloc(size,req_arena_id,tld,os_tld,&page);
|
mi_segment_t* segment = mi_segment_alloc(size,page_alignment,req_arena_id,tld,os_tld,&page);
|
||||||
if (segment == NULL || page==NULL) return NULL;
|
if (segment == NULL || page==NULL) return NULL;
|
||||||
mi_assert_internal(segment->used==1);
|
mi_assert_internal(segment->used==1);
|
||||||
mi_assert_internal(mi_page_block_size(page) >= size);
|
mi_assert_internal(mi_page_block_size(page) >= size);
|
||||||
segment->thread_id = 0; // huge segments are immediately abandoned
|
segment->thread_id = 0; // huge segments are immediately abandoned
|
||||||
|
#if MI_DEBUG > 3
|
||||||
|
if (page_alignment > 0) {
|
||||||
|
size_t psize;
|
||||||
|
void* p = _mi_segment_page_start(segment, page, &psize);
|
||||||
|
void* aligned_p = (void*)_mi_align_up((uintptr_t)p, page_alignment);
|
||||||
|
mi_assert_internal(page_alignment == 0 || _mi_is_aligned(aligned_p, page_alignment));
|
||||||
|
mi_assert_internal(page_alignment == 0 || psize - ((uint8_t*)aligned_p - (uint8_t*)p) >= size);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
// for huge pages we initialize the xblock_size as we may
|
||||||
|
// overallocate to accommodate large alignments.
|
||||||
|
size_t psize;
|
||||||
|
_mi_segment_page_start(segment, page, &psize);
|
||||||
|
page->xblock_size = (psize > MI_HUGE_BLOCK_SIZE ? MI_HUGE_BLOCK_SIZE : (uint32_t)psize);
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1531,9 +1564,17 @@ void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block
|
|||||||
/* -----------------------------------------------------------
|
/* -----------------------------------------------------------
|
||||||
Page allocation and free
|
Page allocation and free
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
|
mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
|
||||||
mi_page_t* page;
|
mi_page_t* page;
|
||||||
if (block_size <= MI_SMALL_OBJ_SIZE_MAX) {
|
if mi_unlikely(page_alignment > MI_ALIGNMENT_MAX) {
|
||||||
|
mi_assert_internal(_mi_is_power_of_two(page_alignment));
|
||||||
|
mi_assert_internal(page_alignment >= MI_SEGMENT_SIZE);
|
||||||
|
if (page_alignment < MI_SEGMENT_SIZE) {
|
||||||
|
page_alignment = MI_SEGMENT_SIZE;
|
||||||
|
}
|
||||||
|
page = mi_segment_huge_page_alloc(block_size,page_alignment,heap->arena_id,tld,os_tld);
|
||||||
|
}
|
||||||
|
else if (block_size <= MI_SMALL_OBJ_SIZE_MAX) {
|
||||||
page = mi_segments_page_alloc(heap,MI_PAGE_SMALL,block_size,block_size,tld,os_tld);
|
page = mi_segments_page_alloc(heap,MI_PAGE_SMALL,block_size,block_size,tld,os_tld);
|
||||||
}
|
}
|
||||||
else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) {
|
else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) {
|
||||||
@ -1543,7 +1584,7 @@ mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, mi_segment
|
|||||||
page = mi_segments_page_alloc(heap,MI_PAGE_LARGE,block_size,block_size,tld, os_tld);
|
page = mi_segments_page_alloc(heap,MI_PAGE_LARGE,block_size,block_size,tld, os_tld);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
page = mi_segment_huge_page_alloc(block_size,heap->arena_id,tld,os_tld);
|
page = mi_segment_huge_page_alloc(block_size,page_alignment,heap->arena_id,tld,os_tld);
|
||||||
}
|
}
|
||||||
mi_assert_internal(page == NULL || _mi_heap_memid_is_suitable(heap, _mi_page_segment(page)->memid));
|
mi_assert_internal(page == NULL || _mi_heap_memid_is_suitable(heap, _mi_page_segment(page)->memid));
|
||||||
mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld));
|
mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld));
|
||||||
|
@ -161,10 +161,29 @@ int main(void) {
|
|||||||
result = ok;
|
result = ok;
|
||||||
};
|
};
|
||||||
CHECK_BODY("malloc-aligned7") {
|
CHECK_BODY("malloc-aligned7") {
|
||||||
void* p = mi_malloc_aligned(1024,MI_ALIGNMENT_MAX); mi_free(p);
|
void* p = mi_malloc_aligned(1024,MI_ALIGNMENT_MAX);
|
||||||
|
mi_free(p);
|
||||||
|
result = ((uintptr_t)p % MI_ALIGNMENT_MAX) == 0;
|
||||||
};
|
};
|
||||||
CHECK_BODY("malloc-aligned8") {
|
CHECK_BODY("malloc-aligned8") {
|
||||||
void* p = mi_malloc_aligned(1024,2*MI_ALIGNMENT_MAX); mi_free(p);
|
bool ok = true;
|
||||||
|
for (int i = 0; i < 5 && ok; i++) {
|
||||||
|
int n = (1 << i);
|
||||||
|
void* p = mi_malloc_aligned(1024, n * MI_ALIGNMENT_MAX);
|
||||||
|
ok = ((uintptr_t)p % (n*MI_ALIGNMENT_MAX)) == 0;
|
||||||
|
mi_free(p);
|
||||||
|
}
|
||||||
|
result = ok;
|
||||||
|
};
|
||||||
|
CHECK_BODY("malloc-aligned9") {
|
||||||
|
bool ok = true;
|
||||||
|
for (int i = 0; i < 5 && ok; i++) {
|
||||||
|
int n = (1 << i);
|
||||||
|
void* p = mi_malloc_aligned( 2*n*MI_ALIGNMENT_MAX, n*MI_ALIGNMENT_MAX);
|
||||||
|
ok = ((uintptr_t)p % (n*MI_ALIGNMENT_MAX)) == 0;
|
||||||
|
mi_free(p);
|
||||||
|
}
|
||||||
|
result = ok;
|
||||||
};
|
};
|
||||||
CHECK_BODY("malloc-aligned-at1") {
|
CHECK_BODY("malloc-aligned-at1") {
|
||||||
void* p = mi_malloc_aligned_at(48,32,0); result = (p != NULL && ((uintptr_t)(p) + 0) % 32 == 0); mi_free(p);
|
void* p = mi_malloc_aligned_at(48,32,0); result = (p != NULL && ((uintptr_t)(p) + 0) % 32 == 0); mi_free(p);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user