mirror of
https://github.com/microsoft/mimalloc.git
synced 2025-01-14 00:27:59 +08:00
add full block_size and page_start to page info
This commit is contained in:
parent
6688b45fbd
commit
b5665f0eec
@ -217,6 +217,12 @@
|
||||
<ClCompile Include="..\..\src\bitmap.c">
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\free.c">
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\heap.c" />
|
||||
<ClCompile Include="..\..\src\init.c" />
|
||||
<ClCompile Include="..\..\src\libc.c" />
|
||||
|
@ -58,6 +58,9 @@
|
||||
<ClCompile Include="..\..\src\libc.c">
|
||||
<Filter>Sources</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\free.c">
|
||||
<Filter>Sources</Filter>
|
||||
</ClCompile>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="..\..\src\bitmap.h">
|
||||
|
@ -147,7 +147,7 @@ void _mi_segment_map_freed_at(const mi_segment_t* segment);
|
||||
mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld);
|
||||
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld);
|
||||
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
|
||||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t block_size, size_t* page_size, size_t* pre_size); // page start for any page
|
||||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size, size_t* pre_size); // page start for any page
|
||||
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
|
||||
@ -452,10 +452,9 @@ static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const
|
||||
}
|
||||
|
||||
// Quick page start for initialized pages
|
||||
static inline uint8_t* _mi_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size) {
|
||||
const size_t bsize = page->xblock_size;
|
||||
mi_assert_internal(bsize > 0 && (bsize%sizeof(void*)) == 0);
|
||||
return _mi_segment_page_start(segment, page, bsize, page_size, NULL);
|
||||
static inline uint8_t* mi_page_start(const mi_page_t* page) {
|
||||
mi_assert_internal(page->page_start != NULL);
|
||||
return page->page_start;
|
||||
}
|
||||
|
||||
// Get the page containing the pointer
|
||||
@ -466,16 +465,8 @@ static inline mi_page_t* _mi_ptr_page(void* p) {
|
||||
|
||||
// Get the block size of a page (special case for huge objects)
|
||||
static inline size_t mi_page_block_size(const mi_page_t* page) {
|
||||
const size_t bsize = page->xblock_size;
|
||||
mi_assert_internal(bsize > 0);
|
||||
if mi_likely(bsize < MI_HUGE_BLOCK_SIZE) {
|
||||
return bsize;
|
||||
}
|
||||
else {
|
||||
size_t psize;
|
||||
_mi_segment_page_start(_mi_page_segment(page), page, bsize, &psize, NULL);
|
||||
return psize;
|
||||
}
|
||||
mi_assert_internal(page->block_size > 0);
|
||||
return page->block_size;
|
||||
}
|
||||
|
||||
static inline bool mi_page_is_huge(const mi_page_t* page) {
|
||||
|
@ -181,7 +181,6 @@ typedef int32_t mi_ssize_t;
|
||||
#define MI_MEDIUM_OBJ_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/4) // 128KiB
|
||||
#define MI_LARGE_OBJ_SIZE_MAX (MI_LARGE_PAGE_SIZE/2) // 2MiB
|
||||
#define MI_LARGE_OBJ_WSIZE_MAX (MI_LARGE_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
|
||||
#define MI_HUGE_OBJ_SIZE_MAX (2*MI_INTPTR_SIZE*MI_SEGMENT_SIZE) // (must match MI_REGION_MAX_ALLOC_SIZE in memory.c)
|
||||
|
||||
// Maximum number of size classes. (spaced exponentially in 12.5% increments)
|
||||
#define MI_BIN_HUGE (73U)
|
||||
@ -190,9 +189,6 @@ typedef int32_t mi_ssize_t;
|
||||
#error "mimalloc internal: define more bins"
|
||||
#endif
|
||||
|
||||
// Used as a special value to encode block sizes in 32 bits.
|
||||
#define MI_HUGE_BLOCK_SIZE ((uint32_t)MI_HUGE_OBJ_SIZE_MAX)
|
||||
|
||||
// Alignments over MI_BLOCK_ALIGNMENT_MAX are allocated in dedicated huge page segments
|
||||
#define MI_BLOCK_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
|
||||
|
||||
@ -258,7 +254,6 @@ typedef uintptr_t mi_thread_free_t;
|
||||
// implement a monotonic heartbeat. The `thread_free` list is needed for
|
||||
// avoiding atomic operations in the common case.
|
||||
//
|
||||
//
|
||||
// `used - |thread_free|` == actual blocks that are in use (alive)
|
||||
// `used - |thread_free| + |free| + |local_free| == capacity`
|
||||
//
|
||||
@ -266,16 +261,13 @@ typedef uintptr_t mi_thread_free_t;
|
||||
// the number of memory accesses in the `mi_page_all_free` function(s).
|
||||
//
|
||||
// Notes:
|
||||
// - Access is optimized for `mi_free` and `mi_page_alloc` (in `alloc.c`)
|
||||
// - Access is optimized for `free.c:mi_free` and `alloc.c:mi_page_alloc`
|
||||
// - Using `uint16_t` does not seem to slow things down
|
||||
// - The size is 8 words on 64-bit which helps the page index calculations
|
||||
// (and 10 words on 32-bit, and encoded free lists add 2 words. Sizes 10
|
||||
// and 12 are still good for address calculation)
|
||||
// - To limit the structure size, the `xblock_size` is 32-bits only; for
|
||||
// blocks > MI_HUGE_BLOCK_SIZE the size is determined from the segment page size
|
||||
// - The size is 10 words on 64-bit which helps the page index calculations
|
||||
// (and 14 words on 32-bit, and encoded free lists add 2 words)
|
||||
// - `xthread_free` uses the bottom bits as a delayed-free flags to optimize
|
||||
// concurrent frees where only the first concurrent free adds to the owning
|
||||
// heap `thread_delayed_free` list (see `alloc.c:mi_free_block_mt`).
|
||||
// heap `thread_delayed_free` list (see `free.c:mi_free_block_mt`).
|
||||
// The invariant is that no-delayed-free is only set if there is
|
||||
// at least one block that will be added, or as already been added, to
|
||||
// the owning heap `thread_delayed_free` list. This guarantees that pages
|
||||
@ -290,16 +282,16 @@ typedef struct mi_page_s {
|
||||
// layout like this to optimize access in `mi_malloc` and `mi_free`
|
||||
uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear`
|
||||
uint16_t reserved; // number of blocks reserved in memory
|
||||
uint16_t used; // number of blocks in use (including blocks in `thread_free`)
|
||||
mi_page_flags_t flags; // `in_full` and `has_aligned` flags (8 bits)
|
||||
uint8_t block_size_shift; // if not zero, then `(1 << block_size_shift) == block_size` (only used for fast path in `free.c:_mi_page_ptr_unalign`)
|
||||
uint8_t free_is_zero:1; // `true` if the blocks in the free list are zero initialized
|
||||
uint8_t retire_expire:7; // expiration count for retired blocks
|
||||
|
||||
// padding
|
||||
mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)
|
||||
mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`)
|
||||
uint16_t used; // number of blocks in use (including blocks in `thread_free`)
|
||||
uint8_t block_size_shift; // if not zero, then `(1 << block_size_shift) == block_size` (only used for fast path in `free.c:_mi_page_ptr_unalign`)
|
||||
uint8_t block_offset_adj; // if not zero, then `(mi_page_start(_,page,_) - (uint8_t*)page - MI_MAX_ALIGN_SIZE*(block_offset_adj-1)) % block_size == 0)` (only used for fast path in `free.c:_mi_page_ptr_unalign`)
|
||||
uint32_t xblock_size; // size available in each block (always `>0`)
|
||||
size_t block_size; // size available in each block (always `>0`)
|
||||
uint8_t* page_start; // start of the page area containing the blocks
|
||||
|
||||
#if (MI_ENCODE_FREELIST || MI_PADDING)
|
||||
uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) or padding canary
|
||||
@ -310,6 +302,10 @@ typedef struct mi_page_s {
|
||||
|
||||
struct mi_page_s* next; // next page owned by the heap with the same `block_size`
|
||||
struct mi_page_s* prev; // previous page owned by the heap with the same `block_size`
|
||||
|
||||
#if MI_INTPTR_SIZE==4 // pad to 14 words on 32-bit
|
||||
void* padding[1];
|
||||
#endif
|
||||
} mi_page_t;
|
||||
|
||||
|
||||
@ -548,7 +544,6 @@ typedef struct mi_stats_s {
|
||||
mi_stat_counter_t searches;
|
||||
mi_stat_counter_t normal_count;
|
||||
mi_stat_counter_t huge_count;
|
||||
mi_stat_counter_t giant_count;
|
||||
mi_stat_counter_t arena_count;
|
||||
mi_stat_counter_t arena_crossover_count;
|
||||
mi_stat_counter_t arena_rollback_count;
|
||||
|
10
src/alloc.c
10
src/alloc.c
@ -30,7 +30,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||
// Note: in release mode the (inlined) routine is about 7 instructions with a single test.
|
||||
extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept
|
||||
{
|
||||
mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size);
|
||||
mi_assert_internal(page->block_size == 0 /* empty heap */ || mi_page_block_size(page) >= size);
|
||||
mi_block_t* const block = page->free;
|
||||
if mi_unlikely(block == NULL) {
|
||||
return _mi_malloc_generic(heap, size, zero, 0);
|
||||
@ -53,14 +53,14 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
|
||||
|
||||
// zero the block? note: we need to zero the full block size (issue #63)
|
||||
if mi_unlikely(zero) {
|
||||
mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic)
|
||||
mi_assert_internal(page->xblock_size >= MI_PADDING_SIZE);
|
||||
mi_assert_internal(page->block_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic)
|
||||
mi_assert_internal(page->block_size >= MI_PADDING_SIZE);
|
||||
if (page->free_is_zero) {
|
||||
block->next = 0;
|
||||
mi_track_mem_defined(block, page->xblock_size - MI_PADDING_SIZE);
|
||||
mi_track_mem_defined(block, page->block_size - MI_PADDING_SIZE);
|
||||
}
|
||||
else {
|
||||
_mi_memzero_aligned(block, page->xblock_size - MI_PADDING_SIZE);
|
||||
_mi_memzero_aligned(block, page->block_size - MI_PADDING_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
|
16
src/free.c
16
src/free.c
@ -55,14 +55,7 @@ static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool
|
||||
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p) {
|
||||
mi_assert_internal(page!=NULL && p!=NULL);
|
||||
|
||||
size_t diff;
|
||||
if mi_likely(page->block_offset_adj != 0) {
|
||||
diff = (uint8_t*)p - (uint8_t*)page - (MI_MAX_ALIGN_SIZE*(page->block_offset_adj - 1));
|
||||
}
|
||||
else {
|
||||
diff = (uint8_t*)p - _mi_page_start(segment, page, NULL);
|
||||
}
|
||||
|
||||
size_t diff = (uint8_t*)p - page->page_start;
|
||||
size_t adjust;
|
||||
if mi_likely(page->block_size_shift != 0) {
|
||||
adjust = diff & (((size_t)1 << page->block_size_shift) - 1);
|
||||
@ -519,12 +512,7 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
|
||||
#if !MI_HUGE_PAGE_ABANDON
|
||||
else {
|
||||
const size_t bpsize = mi_page_block_size(page);
|
||||
if (bpsize <= MI_HUGE_OBJ_SIZE_MAX) {
|
||||
mi_heap_stat_decrease(heap, huge, bpsize);
|
||||
}
|
||||
else {
|
||||
mi_heap_stat_decrease(heap, giant, bpsize);
|
||||
}
|
||||
mi_heap_stat_decrease(heap, huge, bpsize);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
14
src/heap.c
14
src/heap.c
@ -289,12 +289,7 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_
|
||||
// stats
|
||||
const size_t bsize = mi_page_block_size(page);
|
||||
if (bsize > MI_LARGE_OBJ_SIZE_MAX) {
|
||||
if (bsize > MI_HUGE_OBJ_SIZE_MAX) {
|
||||
mi_heap_stat_decrease(heap, giant, bsize);
|
||||
}
|
||||
else {
|
||||
mi_heap_stat_decrease(heap, huge, bsize);
|
||||
}
|
||||
mi_heap_stat_decrease(heap, huge, bsize);
|
||||
}
|
||||
#if (MI_STAT)
|
||||
_mi_page_free_collect(page, false); // update used count
|
||||
@ -467,8 +462,7 @@ static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_pa
|
||||
MI_UNUSED(heap);
|
||||
MI_UNUSED(pq);
|
||||
bool* found = (bool*)vfound;
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
void* start = _mi_page_start(segment, page, NULL);
|
||||
void* start = mi_page_start(page);
|
||||
void* end = (uint8_t*)start + (page->capacity * mi_page_block_size(page));
|
||||
*found = (p >= start && p < end);
|
||||
return (!*found); // continue if not found
|
||||
@ -514,7 +508,7 @@ static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_v
|
||||
const size_t bsize = mi_page_block_size(page);
|
||||
const size_t ubsize = mi_page_usable_block_size(page); // without padding
|
||||
size_t psize;
|
||||
uint8_t* pstart = _mi_page_start(_mi_page_segment(page), page, &psize);
|
||||
uint8_t* pstart = _mi_segment_page_start(_mi_page_segment(page), page, &psize, NULL);
|
||||
|
||||
if (page->capacity == 1) {
|
||||
// optimize page with one block
|
||||
@ -581,7 +575,7 @@ static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_pa
|
||||
xarea.page = page;
|
||||
xarea.area.reserved = page->reserved * bsize;
|
||||
xarea.area.committed = page->capacity * bsize;
|
||||
xarea.area.blocks = _mi_page_start(_mi_page_segment(page), page, NULL);
|
||||
xarea.area.blocks = mi_page_start(page);
|
||||
xarea.area.used = page->used; // number of blocks in use (#553)
|
||||
xarea.area.block_size = ubsize;
|
||||
xarea.area.full_block_size = bsize;
|
||||
|
@ -17,15 +17,15 @@ const mi_page_t _mi_page_empty = {
|
||||
0, false, false, false,
|
||||
0, // capacity
|
||||
0, // reserved capacity
|
||||
0, // used
|
||||
{ 0 }, // flags
|
||||
0, // block size shift
|
||||
false, // is_zero
|
||||
0, // retire_expire
|
||||
NULL, // free
|
||||
NULL, // local_free
|
||||
0, // used
|
||||
0, // block size shift
|
||||
0, // block offset adj
|
||||
0, // xblock_size
|
||||
0, // block_size
|
||||
NULL, // page_start
|
||||
#if (MI_PADDING || MI_ENCODE_FREELIST)
|
||||
{ 0, 0 },
|
||||
#endif
|
||||
@ -78,7 +78,6 @@ const mi_page_t _mi_page_empty = {
|
||||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||
MI_STAT_COUNT_NULL(), \
|
||||
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
|
||||
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
|
||||
{ 0, 0 }, { 0, 0 }, { 0, 0 } \
|
||||
|
@ -11,6 +11,10 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||
|
||||
#ifndef MI_IN_PAGE_C
|
||||
#error "this file should be included from 'page.c'"
|
||||
// include to help an IDE
|
||||
#include "mimalloc.h"
|
||||
#include "mimalloc/internal.h"
|
||||
#include "mimalloc/atomic.h"
|
||||
#endif
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
@ -138,20 +142,20 @@ static bool mi_heap_contains_queue(const mi_heap_t* heap, const mi_page_queue_t*
|
||||
#endif
|
||||
|
||||
static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) {
|
||||
uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(page->xblock_size));
|
||||
uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(mi_page_block_size(page)));
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
mi_assert_internal(heap != NULL && bin <= MI_BIN_FULL);
|
||||
mi_page_queue_t* pq = &heap->pages[bin];
|
||||
mi_assert_internal(bin >= MI_BIN_HUGE || page->xblock_size == pq->block_size);
|
||||
mi_assert_internal(bin >= MI_BIN_HUGE || mi_page_block_size(page) == pq->block_size);
|
||||
mi_assert_expensive(mi_page_queue_contains(pq, page));
|
||||
return pq;
|
||||
}
|
||||
|
||||
static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) {
|
||||
uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(page->xblock_size));
|
||||
uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(mi_page_block_size(page)));
|
||||
mi_assert_internal(bin <= MI_BIN_FULL);
|
||||
mi_page_queue_t* pq = &heap->pages[bin];
|
||||
mi_assert_internal(mi_page_is_in_full(page) || page->xblock_size == pq->block_size);
|
||||
mi_assert_internal(mi_page_is_in_full(page) || mi_page_block_size(page) == pq->block_size);
|
||||
return pq;
|
||||
}
|
||||
|
||||
@ -206,7 +210,7 @@ static bool mi_page_queue_is_empty(mi_page_queue_t* queue) {
|
||||
static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
|
||||
mi_assert_internal(page != NULL);
|
||||
mi_assert_expensive(mi_page_queue_contains(queue, page));
|
||||
mi_assert_internal(page->xblock_size == queue->block_size || (page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
|
||||
mi_assert_internal(mi_page_block_size(page) == queue->block_size || (mi_page_block_size(page) > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
if (page->prev != NULL) page->prev->next = page->next;
|
||||
if (page->next != NULL) page->next->prev = page->prev;
|
||||
@ -231,8 +235,8 @@ static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
|
||||
#endif
|
||||
mi_assert_internal(page->xblock_size == queue->block_size ||
|
||||
(page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) ||
|
||||
mi_assert_internal(mi_page_block_size(page) == queue->block_size ||
|
||||
(mi_page_block_size(page) > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) ||
|
||||
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
|
||||
|
||||
mi_page_set_in_full(page, mi_page_queue_is_full(queue));
|
||||
@ -258,11 +262,13 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro
|
||||
mi_assert_internal(page != NULL);
|
||||
mi_assert_expensive(mi_page_queue_contains(from, page));
|
||||
mi_assert_expensive(!mi_page_queue_contains(to, page));
|
||||
mi_assert_internal((page->xblock_size == to->block_size && page->xblock_size == from->block_size) ||
|
||||
(page->xblock_size == to->block_size && mi_page_queue_is_full(from)) ||
|
||||
(page->xblock_size == from->block_size && mi_page_queue_is_full(to)) ||
|
||||
(page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(to)) ||
|
||||
(page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_full(to)));
|
||||
const size_t bsize = mi_page_block_size(page);
|
||||
MI_UNUSED(bsize);
|
||||
mi_assert_internal((bsize == to->block_size && bsize == from->block_size) ||
|
||||
(bsize == to->block_size && mi_page_queue_is_full(from)) ||
|
||||
(bsize == from->block_size && mi_page_queue_is_full(to)) ||
|
||||
(bsize > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(to)) ||
|
||||
(bsize > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_full(to)));
|
||||
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
if (page->prev != NULL) page->prev->next = page->next;
|
||||
|
83
src/page.c
83
src/page.c
@ -59,7 +59,7 @@ static inline uint8_t* mi_page_area(const mi_page_t* page) {
|
||||
|
||||
static bool mi_page_list_is_valid(mi_page_t* page, mi_block_t* p) {
|
||||
size_t psize;
|
||||
uint8_t* page_area = _mi_page_start(_mi_page_segment(page), page, &psize);
|
||||
uint8_t* page_area = _mi_segment_page_start(_mi_page_segment(page), page, &psize, NULL);
|
||||
mi_block_t* start = (mi_block_t*)page_area;
|
||||
mi_block_t* end = (mi_block_t*)(page_area + psize);
|
||||
while(p != NULL) {
|
||||
@ -78,14 +78,14 @@ static bool mi_page_list_is_valid(mi_page_t* page, mi_block_t* p) {
|
||||
}
|
||||
|
||||
static bool mi_page_is_valid_init(mi_page_t* page) {
|
||||
mi_assert_internal(page->xblock_size > 0);
|
||||
mi_assert_internal(mi_page_block_size(page) > 0);
|
||||
mi_assert_internal(page->used <= page->capacity);
|
||||
mi_assert_internal(page->capacity <= page->reserved);
|
||||
|
||||
const size_t bsize = mi_page_block_size(page);
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
uint8_t* start = _mi_page_start(segment,page,NULL);
|
||||
mi_assert_internal(start == _mi_segment_page_start(segment,page,bsize,NULL,NULL));
|
||||
uint8_t* start = mi_page_start(page);
|
||||
mi_assert_internal(start == _mi_segment_page_start(segment,page,NULL,NULL));
|
||||
//mi_assert_internal(start + page->capacity*page->block_size == page->top);
|
||||
|
||||
mi_assert_internal(mi_page_list_is_valid(page,page->free));
|
||||
@ -283,10 +283,9 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
mi_assert_internal(pq==NULL || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
|
||||
#endif
|
||||
mi_assert_internal(pq!=NULL || page->xblock_size != 0);
|
||||
mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size);
|
||||
// a fresh page was found, initialize it
|
||||
const size_t full_block_size = ((pq == NULL || mi_page_queue_is_huge(pq)) ? mi_page_block_size(page) : block_size); // see also: mi_segment_huge_page_alloc
|
||||
const size_t full_block_size = (pq == NULL || mi_page_is_huge(page) ? mi_page_block_size(page) : block_size); // see also: mi_segment_huge_page_alloc
|
||||
mi_assert_internal(full_block_size >= block_size);
|
||||
mi_page_init(heap, page, full_block_size, heap->tld);
|
||||
mi_heap_stat_increase(heap, pages, 1);
|
||||
@ -425,7 +424,7 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
|
||||
_mi_segment_page_free(page, force, segments_tld);
|
||||
}
|
||||
|
||||
#define MI_MAX_RETIRE_SIZE MI_LARGE_OBJ_SIZE_MAX
|
||||
#define MI_MAX_RETIRE_SIZE MI_LARGE_OBJ_SIZE_MAX // should be less than size for MI_BIN_HUGE
|
||||
#define MI_RETIRE_CYCLES (16)
|
||||
|
||||
// Retire a page with no more used blocks
|
||||
@ -448,10 +447,12 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
|
||||
// how to check this efficiently though...
|
||||
// for now, we don't retire if it is the only page left of this size class.
|
||||
mi_page_queue_t* pq = mi_page_queue_of(page);
|
||||
if mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_queue_is_special(pq)) { // not too large && not full or huge queue?
|
||||
const size_t bsize = mi_page_block_size(page);
|
||||
if mi_likely(bsize < MI_MAX_RETIRE_SIZE) { // not too large && not full or huge queue?
|
||||
mi_assert_internal(!mi_page_queue_is_special(pq));
|
||||
if (pq->last==page && pq->first==page) { // the only page in the queue?
|
||||
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
|
||||
page->retire_expire = (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
|
||||
page->retire_expire = (bsize <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
mi_assert_internal(pq >= heap->pages);
|
||||
const size_t index = pq - heap->pages;
|
||||
@ -514,7 +515,7 @@ static void mi_page_free_list_extend_secure(mi_heap_t* const heap, mi_page_t* co
|
||||
#endif
|
||||
mi_assert_internal(page->capacity + extend <= page->reserved);
|
||||
mi_assert_internal(bsize == mi_page_block_size(page));
|
||||
void* const page_area = _mi_page_start(_mi_page_segment(page), page, NULL);
|
||||
void* const page_area = mi_page_start(page);
|
||||
|
||||
// initialize a randomized free list
|
||||
// set up `slice_count` slices to alternate between
|
||||
@ -572,7 +573,7 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co
|
||||
#endif
|
||||
mi_assert_internal(page->capacity + extend <= page->reserved);
|
||||
mi_assert_internal(bsize == mi_page_block_size(page));
|
||||
void* const page_area = _mi_page_start(_mi_page_segment(page), page, NULL );
|
||||
void* const page_area = mi_page_start(page);
|
||||
|
||||
mi_block_t* const start = mi_page_block_at(page, page_area, bsize, page->capacity);
|
||||
|
||||
@ -616,15 +617,15 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld)
|
||||
|
||||
size_t page_size;
|
||||
//uint8_t* page_start =
|
||||
_mi_page_start(_mi_page_segment(page), page, &page_size);
|
||||
_mi_segment_page_start(_mi_page_segment(page), page, &page_size, NULL);
|
||||
mi_stat_counter_increase(tld->stats.pages_extended, 1);
|
||||
|
||||
// calculate the extend count
|
||||
const size_t bsize = (page->xblock_size < MI_HUGE_BLOCK_SIZE ? page->xblock_size : page_size);
|
||||
const size_t bsize = mi_page_block_size(page);
|
||||
size_t extend = page->reserved - page->capacity;
|
||||
mi_assert_internal(extend > 0);
|
||||
|
||||
size_t max_extend = (bsize >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE/(uint32_t)bsize);
|
||||
size_t max_extend = (bsize >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE/bsize);
|
||||
if (max_extend < MI_MIN_EXTEND) { max_extend = MI_MIN_EXTEND; }
|
||||
mi_assert_internal(max_extend > 0);
|
||||
|
||||
@ -658,10 +659,10 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
||||
mi_assert_internal(block_size > 0);
|
||||
// set fields
|
||||
mi_page_set_heap(page, heap);
|
||||
page->block_size = block_size;
|
||||
size_t page_size;
|
||||
const void* page_start = _mi_segment_page_start(segment, page, block_size, &page_size, NULL);
|
||||
mi_track_mem_noaccess(page_start,page_size);
|
||||
page->xblock_size = (block_size < MI_HUGE_BLOCK_SIZE ? (uint32_t)block_size : MI_HUGE_BLOCK_SIZE);
|
||||
page->page_start = _mi_segment_page_start(segment, page, &page_size, NULL);
|
||||
mi_track_mem_noaccess(page->page_start,page_size);
|
||||
mi_assert_internal(page_size / block_size < (1L<<16));
|
||||
page->reserved = (uint16_t)(page_size / block_size);
|
||||
mi_assert_internal(page->reserved > 0);
|
||||
@ -673,20 +674,14 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
||||
#if MI_DEBUG>2
|
||||
if (page->is_zero_init) {
|
||||
mi_track_mem_defined(page_start, page_size);
|
||||
mi_assert_expensive(!page->is_zero_init || mi_mem_is_zero(page_start, page_size));
|
||||
mi_assert_expensive(!page->is_zero_init || mi_mem_is_zero(page->page_start, page_size));
|
||||
}
|
||||
#endif
|
||||
if (block_size > 0 && _mi_is_power_of_two(block_size)) {
|
||||
page->block_size_shift = (uint8_t)(mi_ctz((uintptr_t)block_size));
|
||||
}
|
||||
if (block_size > 0) {
|
||||
const ptrdiff_t start_offset = (uint8_t*)page_start - (uint8_t*)page;
|
||||
const ptrdiff_t start_adjust = start_offset % block_size;
|
||||
if (start_offset >= 0 && (start_adjust % MI_MAX_ALIGN_SIZE) == 0 && (start_adjust / MI_MAX_ALIGN_SIZE) < 255) {
|
||||
const ptrdiff_t adjust = (start_adjust / MI_MAX_ALIGN_SIZE);
|
||||
mi_assert_internal(adjust + 1 == (uint8_t)(adjust + 1));
|
||||
page->block_offset_adj = (uint8_t)(adjust + 1);
|
||||
}
|
||||
else {
|
||||
page->block_size_shift = 0;
|
||||
}
|
||||
|
||||
mi_assert_internal(page->capacity == 0);
|
||||
@ -701,8 +696,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
||||
mi_assert_internal(page->keys[0] != 0);
|
||||
mi_assert_internal(page->keys[1] != 0);
|
||||
#endif
|
||||
mi_assert_internal(page->block_size_shift == 0 || (block_size == (1UL << page->block_size_shift)));
|
||||
mi_assert_internal(page->block_offset_adj == 0 || (((uint8_t*)page_start - (uint8_t*)page - MI_MAX_ALIGN_SIZE*(page->block_offset_adj-1))) % block_size == 0);
|
||||
mi_assert_internal(page->block_size_shift == 0 || (block_size == ((size_t)1 << page->block_size_shift)));
|
||||
mi_assert_expensive(mi_page_is_valid_init(page));
|
||||
|
||||
// initialize an initial free list
|
||||
@ -827,40 +821,31 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noex
|
||||
General allocation
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// A huge page is allocated directly without being in a queue.
|
||||
// Because huge pages contain just one block, and the segment contains
|
||||
// just that page, we always treat them as abandoned and any thread
|
||||
// that frees the block can free the whole page and segment directly.
|
||||
// Huge pages are also use if the requested alignment is very large (> MI_BLOCK_ALIGNMENT_MAX).
|
||||
// Huge pages contain just one block, and the segment contains just that page.
|
||||
// Huge pages are also use if the requested alignment is very large (> MI_BLOCK_ALIGNMENT_MAX)
|
||||
// so their size is not always `> MI_LARGE_OBJ_SIZE_MAX`.
|
||||
static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment) {
|
||||
size_t block_size = _mi_os_good_alloc_size(size);
|
||||
mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE || page_alignment > 0);
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
mi_page_queue_t* pq = NULL;
|
||||
#else
|
||||
mi_page_queue_t* pq = mi_page_queue(heap, MI_HUGE_OBJ_SIZE_MAX); // not block_size as that can be low if the page_alignment > 0
|
||||
mi_assert_internal(mi_page_queue_is_huge(pq));
|
||||
mi_page_queue_t* pq = mi_page_queue(heap, block_size);
|
||||
// mi_assert_internal(mi_page_queue_is_huge(pq));
|
||||
#endif
|
||||
mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size,page_alignment);
|
||||
if (page != NULL) {
|
||||
const size_t bsize = mi_page_block_size(page); // note: not `mi_page_usable_block_size` as `size` includes padding already
|
||||
mi_assert_internal(bsize >= size);
|
||||
mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment);
|
||||
if (page != NULL) {
|
||||
mi_assert_internal(mi_page_block_size(page) >= size);
|
||||
mi_assert_internal(mi_page_immediate_available(page));
|
||||
mi_assert_internal(_mi_page_segment(page)->page_kind==MI_PAGE_HUGE);
|
||||
mi_assert_internal(mi_page_is_huge(page));
|
||||
mi_assert_internal(_mi_page_segment(page)->used==1);
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
|
||||
mi_page_set_heap(page, NULL);
|
||||
#endif
|
||||
|
||||
if (bsize > MI_HUGE_OBJ_SIZE_MAX) {
|
||||
mi_heap_stat_increase(heap, giant, bsize);
|
||||
mi_heap_stat_counter_increase(heap, giant_count, 1);
|
||||
}
|
||||
else {
|
||||
mi_heap_stat_increase(heap, huge, bsize);
|
||||
mi_heap_stat_counter_increase(heap, huge_count, 1);
|
||||
}
|
||||
mi_heap_stat_increase(heap, huge, mi_page_block_size(page));
|
||||
mi_heap_stat_counter_increase(heap, huge_count, 1);
|
||||
}
|
||||
return page;
|
||||
}
|
||||
@ -927,7 +912,7 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al
|
||||
mi_assert_internal(mi_page_block_size(page) >= size);
|
||||
|
||||
// and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc)
|
||||
if mi_unlikely(zero && page->xblock_size == 0) {
|
||||
if mi_unlikely(zero && page->block_size == 0) {
|
||||
// note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case.
|
||||
void* p = _mi_page_malloc(heap, page, size, false);
|
||||
mi_assert_internal(p != NULL);
|
||||
|
@ -412,13 +412,13 @@ static uint8_t* mi_segment_raw_page_start(const mi_segment_t* segment, const mi_
|
||||
#endif
|
||||
|
||||
if (page_size != NULL) *page_size = psize;
|
||||
mi_assert_internal(page->xblock_size == 0 || _mi_ptr_page(p) == page);
|
||||
mi_assert_internal(page->block_size == 0 || _mi_ptr_page(p) == page);
|
||||
mi_assert_internal(_mi_ptr_segment(p) == segment);
|
||||
return p;
|
||||
}
|
||||
|
||||
// Start of the page available memory; can be used on uninitialized pages (only `segment_idx` must be set)
|
||||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t block_size, size_t* page_size, size_t* pre_size)
|
||||
static uint8_t* mi_segment_page_start_ex(const mi_segment_t* segment, const mi_page_t* page, size_t block_size, size_t* page_size, size_t* pre_size)
|
||||
{
|
||||
size_t psize;
|
||||
uint8_t* p = mi_segment_raw_page_start(segment, page, &psize);
|
||||
@ -437,11 +437,15 @@ uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* pa
|
||||
}
|
||||
|
||||
if (page_size != NULL) *page_size = psize;
|
||||
mi_assert_internal(page->xblock_size==0 || _mi_ptr_page(p) == page);
|
||||
mi_assert_internal(_mi_ptr_page(p) == page);
|
||||
mi_assert_internal(_mi_ptr_segment(p) == segment);
|
||||
return p;
|
||||
}
|
||||
|
||||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size, size_t* pre_size) {
|
||||
return mi_segment_page_start_ex(segment, page, mi_page_block_size(page), page_size, pre_size);
|
||||
}
|
||||
|
||||
static size_t mi_segment_calculate_sizes(size_t capacity, size_t required, size_t* pre_size, size_t* info_size)
|
||||
{
|
||||
const size_t minsize = sizeof(mi_segment_t) + ((capacity - 1) * sizeof(mi_page_t)) + 16 /* padding */;
|
||||
@ -707,15 +711,19 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_seg
|
||||
page->is_zero_init = false;
|
||||
page->segment_in_use = false;
|
||||
|
||||
// zero the page data, but not the segment fields and capacity, and block_size (for page size calculations)
|
||||
uint32_t block_size = page->xblock_size;
|
||||
// zero the page data, but not the segment fields and capacity, page start, and block_size (for page size calculations)
|
||||
size_t block_size = page->block_size;
|
||||
uint8_t block_size_shift = page->block_size_shift;
|
||||
uint8_t* page_start = page->page_start;
|
||||
uint16_t capacity = page->capacity;
|
||||
uint16_t reserved = page->reserved;
|
||||
ptrdiff_t ofs = offsetof(mi_page_t,capacity);
|
||||
_mi_memzero((uint8_t*)page + ofs, sizeof(*page) - ofs);
|
||||
page->capacity = capacity;
|
||||
page->reserved = reserved;
|
||||
page->xblock_size = block_size;
|
||||
page->block_size = block_size;
|
||||
page->block_size_shift = block_size_shift;
|
||||
page->page_start = page_start;
|
||||
segment->used--;
|
||||
|
||||
// schedule purge
|
||||
@ -831,7 +839,6 @@ void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
|
||||
// Possibly clear pages and check if free space is available
|
||||
static bool mi_segment_check_free(mi_segment_t* segment, size_t block_size, bool* all_pages_free)
|
||||
{
|
||||
mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE);
|
||||
bool has_page = false;
|
||||
size_t pages_used = 0;
|
||||
size_t pages_used_empty = 0;
|
||||
@ -847,7 +854,7 @@ static bool mi_segment_check_free(mi_segment_t* segment, size_t block_size, bool
|
||||
pages_used_empty++;
|
||||
has_page = true;
|
||||
}
|
||||
else if (page->xblock_size == block_size && mi_page_has_any_available(page)) {
|
||||
else if (mi_page_block_size(page) == block_size && mi_page_has_any_available(page)) {
|
||||
// a page has available free blocks of the right size
|
||||
has_page = true;
|
||||
}
|
||||
@ -901,7 +908,7 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
|
||||
else {
|
||||
// otherwise reclaim it into the heap
|
||||
_mi_page_reclaim(heap, page);
|
||||
if (requested_block_size == page->xblock_size && mi_page_has_any_available(page)) {
|
||||
if (requested_block_size == mi_page_block_size(page) && mi_page_has_any_available(page)) {
|
||||
if (right_page_reclaimed != NULL) { *right_page_reclaimed = true; }
|
||||
}
|
||||
}
|
||||
@ -1008,7 +1015,7 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size,
|
||||
static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t block_size, mi_page_kind_t page_kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
|
||||
{
|
||||
mi_assert_internal(page_kind <= MI_PAGE_LARGE);
|
||||
mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE);
|
||||
mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX);
|
||||
|
||||
// 1. try to reclaim an abandoned segment
|
||||
bool reclaimed;
|
||||
@ -1077,7 +1084,7 @@ static mi_page_t* mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, mi_p
|
||||
mi_assert_internal(page != NULL);
|
||||
#if MI_DEBUG>=2 && !MI_TRACK_ENABLED // && !MI_TSAN
|
||||
// verify it is committed
|
||||
_mi_segment_page_start(_mi_page_segment(page), page, sizeof(void*), NULL, NULL)[0] = 0;
|
||||
mi_segment_page_start_ex(_mi_page_segment(page), page, sizeof(void*), NULL, NULL)[0] = 0;
|
||||
#endif
|
||||
return page;
|
||||
}
|
||||
@ -1100,7 +1107,7 @@ static mi_page_t* mi_segment_large_page_alloc(mi_heap_t* heap, size_t block_size
|
||||
mi_page_t* page = mi_segment_find_free(segment, tld);
|
||||
mi_assert_internal(page != NULL);
|
||||
#if MI_DEBUG>=2 && !MI_TRACK_ENABLED // && !MI_TSAN
|
||||
_mi_segment_page_start(segment, page, sizeof(void*), NULL, NULL)[0] = 0;
|
||||
mi_segment_page_start_ex(segment, page, sizeof(void*), NULL, NULL)[0] = 0;
|
||||
#endif
|
||||
return page;
|
||||
}
|
||||
@ -1117,11 +1124,11 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment,
|
||||
mi_page_t* page = mi_segment_find_free(segment, tld);
|
||||
mi_assert_internal(page != NULL);
|
||||
|
||||
// for huge pages we initialize the xblock_size as we may
|
||||
// for huge pages we initialize the block_size as we may
|
||||
// overallocate to accommodate large alignments.
|
||||
size_t psize;
|
||||
uint8_t* start = _mi_segment_page_start(segment, page, 0, &psize, NULL);
|
||||
page->xblock_size = (psize > MI_HUGE_BLOCK_SIZE ? MI_HUGE_BLOCK_SIZE : (uint32_t)psize);
|
||||
uint8_t* start = mi_segment_page_start_ex(segment, page, 0, &psize, NULL);
|
||||
page->block_size = psize;
|
||||
|
||||
// reset the part of the page that will not be used; this can be quite large (close to MI_SEGMENT_SIZE)
|
||||
if (page_alignment > 0 && segment->allow_decommit && page->is_committed) {
|
||||
|
@ -117,8 +117,7 @@ static void mi_stats_add(mi_stats_t* stats, const mi_stats_t* src) {
|
||||
mi_stat_counter_add(&stats->page_no_retire, &src->page_no_retire, 1);
|
||||
mi_stat_counter_add(&stats->searches, &src->searches, 1);
|
||||
mi_stat_counter_add(&stats->normal_count, &src->normal_count, 1);
|
||||
mi_stat_counter_add(&stats->huge_count, &src->huge_count, 1);
|
||||
mi_stat_counter_add(&stats->giant_count, &src->giant_count, 1);
|
||||
mi_stat_counter_add(&stats->huge_count, &src->huge_count, 1);
|
||||
#if MI_STAT>1
|
||||
for (size_t i = 0; i <= MI_BIN_HUGE; i++) {
|
||||
if (src->normal_bins[i].allocated > 0 || src->normal_bins[i].freed > 0) {
|
||||
@ -316,12 +315,10 @@ static void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out0, void* arg0)
|
||||
#endif
|
||||
#if MI_STAT
|
||||
mi_stat_print(&stats->normal, "normal", (stats->normal_count.count == 0 ? 1 : -(stats->normal.allocated / stats->normal_count.count)), out, arg);
|
||||
mi_stat_print(&stats->huge, "huge", (stats->huge_count.count == 0 ? 1 : -(stats->huge.allocated / stats->huge_count.count)), out, arg);
|
||||
mi_stat_print(&stats->giant, "giant", (stats->giant_count.count == 0 ? 1 : -(stats->giant.allocated / stats->giant_count.count)), out, arg);
|
||||
mi_stat_print(&stats->huge, "huge", (stats->huge_count.count == 0 ? 1 : -(stats->huge.allocated / stats->huge_count.count)), out, arg);
|
||||
mi_stat_count_t total = { 0,0,0,0 };
|
||||
mi_stat_add(&total, &stats->normal, 1);
|
||||
mi_stat_add(&total, &stats->huge, 1);
|
||||
mi_stat_add(&total, &stats->giant, 1);
|
||||
mi_stat_print(&total, "total", 1, out, arg);
|
||||
#endif
|
||||
#if MI_STAT>1
|
||||
|
Loading…
x
Reference in New Issue
Block a user