mirror of
https://github.com/microsoft/mimalloc.git
synced 2024-12-25 20:14:12 +08:00
add is_huge page flag to ensure the right page queue is returned (see #868)
This commit is contained in:
parent
9c96d05ee4
commit
006ae2d055
@ -470,7 +470,9 @@ static inline size_t mi_page_block_size(const mi_page_t* page) {
|
||||
}
|
||||
|
||||
static inline bool mi_page_is_huge(const mi_page_t* page) {
|
||||
return (_mi_page_segment(page)->page_kind == MI_PAGE_HUGE);
|
||||
mi_assert_internal((page->is_huge && _mi_page_segment(page)->page_kind == MI_PAGE_HUGE) ||
|
||||
(!page->is_huge && _mi_page_segment(page)->page_kind != MI_PAGE_HUGE));
|
||||
return page->is_huge;
|
||||
}
|
||||
|
||||
// Get the usable block size of a page without fixed padding.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
|
||||
Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
@ -278,6 +278,7 @@ typedef struct mi_page_s {
|
||||
uint8_t segment_in_use:1; // `true` if the segment allocated this page
|
||||
uint8_t is_committed:1; // `true` if the page virtual memory is committed
|
||||
uint8_t is_zero_init:1; // `true` if the page was initially zero initialized
|
||||
uint8_t is_huge:1; // `true` if the page is in a huge segment
|
||||
|
||||
// layout like this to optimize access in `mi_malloc` and `mi_free`
|
||||
uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear`
|
||||
@ -285,7 +286,7 @@ typedef struct mi_page_s {
|
||||
uint16_t used; // number of blocks in use (including blocks in `thread_free`)
|
||||
mi_page_flags_t flags; // `in_full` and `has_aligned` flags (8 bits)
|
||||
uint8_t block_size_shift; // if not zero, then `(1 << block_size_shift) == block_size` (only used for fast path in `free.c:_mi_page_ptr_unalign`)
|
||||
uint8_t free_is_zero:1; // `true` if the blocks in the free list are zero initialized
|
||||
uint8_t free_is_zero:1; // `true` if the blocks in the free list are zero initialized
|
||||
uint8_t retire_expire:7; // expiration count for retired blocks
|
||||
// padding
|
||||
mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2022, Microsoft Research, Daan Leijen
|
||||
Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
|
@ -14,7 +14,8 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||
|
||||
// Empty page used to initialize the small free pages array
|
||||
const mi_page_t _mi_page_empty = {
|
||||
0, false, false, false,
|
||||
0,
|
||||
false, false, false, false,
|
||||
0, // capacity
|
||||
0, // reserved capacity
|
||||
0, // used
|
||||
@ -78,9 +79,10 @@ const mi_page_t _mi_page_empty = {
|
||||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||
MI_STAT_COUNT_NULL(), \
|
||||
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
|
||||
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
|
||||
{ 0, 0 }, { 0, 0 }, { 0, 0 } \
|
||||
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
|
||||
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } \
|
||||
MI_STAT_COUNT_END_NULL()
|
||||
|
||||
// --------------------------------------------------------
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2020, Microsoft Research, Daan Leijen
|
||||
Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
@ -141,21 +141,21 @@ static bool mi_heap_contains_queue(const mi_heap_t* heap, const mi_page_queue_t*
|
||||
}
|
||||
#endif
|
||||
|
||||
static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) {
|
||||
uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(mi_page_block_size(page)));
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
mi_assert_internal(heap != NULL && bin <= MI_BIN_FULL);
|
||||
static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) {
|
||||
mi_assert_internal(heap!=NULL);
|
||||
uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : (mi_page_is_huge(page) ? MI_BIN_HUGE : mi_bin(mi_page_block_size(page))));
|
||||
mi_assert_internal(bin <= MI_BIN_FULL);
|
||||
mi_page_queue_t* pq = &heap->pages[bin];
|
||||
mi_assert_internal(bin >= MI_BIN_HUGE || mi_page_block_size(page) == pq->block_size);
|
||||
mi_assert_expensive(mi_page_queue_contains(pq, page));
|
||||
mi_assert_internal((mi_page_block_size(page) == pq->block_size) ||
|
||||
(mi_page_is_huge(page) && mi_page_queue_is_huge(pq)) ||
|
||||
(mi_page_is_in_full(page) && mi_page_queue_is_full(pq)));
|
||||
return pq;
|
||||
}
|
||||
|
||||
static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) {
|
||||
uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(mi_page_block_size(page)));
|
||||
mi_assert_internal(bin <= MI_BIN_FULL);
|
||||
mi_page_queue_t* pq = &heap->pages[bin];
|
||||
mi_assert_internal(mi_page_is_in_full(page) || mi_page_block_size(page) == pq->block_size);
|
||||
static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) {
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
|
||||
mi_assert_expensive(mi_page_queue_contains(pq, page));
|
||||
return pq;
|
||||
}
|
||||
|
||||
@ -210,7 +210,9 @@ static bool mi_page_queue_is_empty(mi_page_queue_t* queue) {
|
||||
static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
|
||||
mi_assert_internal(page != NULL);
|
||||
mi_assert_expensive(mi_page_queue_contains(queue, page));
|
||||
mi_assert_internal(mi_page_block_size(page) == queue->block_size || (mi_page_block_size(page) > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
|
||||
mi_assert_internal(mi_page_block_size(page) == queue->block_size ||
|
||||
(mi_page_is_huge(page) && mi_page_queue_is_huge(queue)) ||
|
||||
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
if (page->prev != NULL) page->prev->next = page->next;
|
||||
if (page->next != NULL) page->next->prev = page->prev;
|
||||
@ -236,7 +238,7 @@ static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_
|
||||
mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
|
||||
#endif
|
||||
mi_assert_internal(mi_page_block_size(page) == queue->block_size ||
|
||||
(mi_page_block_size(page) > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) ||
|
||||
(mi_page_is_huge(page) && mi_page_queue_is_huge(queue)) ||
|
||||
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
|
||||
|
||||
mi_page_set_in_full(page, mi_page_queue_is_full(queue));
|
||||
@ -267,8 +269,8 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro
|
||||
mi_assert_internal((bsize == to->block_size && bsize == from->block_size) ||
|
||||
(bsize == to->block_size && mi_page_queue_is_full(from)) ||
|
||||
(bsize == from->block_size && mi_page_queue_is_full(to)) ||
|
||||
(bsize > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(to)) ||
|
||||
(bsize > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_full(to)));
|
||||
(mi_page_is_huge(page) && mi_page_queue_is_huge(to)) ||
|
||||
(mi_page_is_huge(page) && mi_page_queue_is_full(to)));
|
||||
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
if (page->prev != NULL) page->prev->next = page->next;
|
||||
|
19
src/page.c
19
src/page.c
@ -1,5 +1,5 @@
|
||||
/*----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2020, Microsoft Research, Daan Leijen
|
||||
Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
@ -82,7 +82,7 @@ static bool mi_page_is_valid_init(mi_page_t* page) {
|
||||
mi_assert_internal(page->used <= page->capacity);
|
||||
mi_assert_internal(page->capacity <= page->reserved);
|
||||
|
||||
const size_t bsize = mi_page_block_size(page);
|
||||
// const size_t bsize = mi_page_block_size(page);
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
uint8_t* start = mi_page_start(page);
|
||||
mi_assert_internal(start == _mi_segment_page_start(segment,page,NULL,NULL));
|
||||
@ -448,8 +448,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
|
||||
// for now, we don't retire if it is the only page left of this size class.
|
||||
mi_page_queue_t* pq = mi_page_queue_of(page);
|
||||
const size_t bsize = mi_page_block_size(page);
|
||||
if mi_likely(bsize < MI_MAX_RETIRE_SIZE) { // not too large && not full or huge queue?
|
||||
mi_assert_internal(!mi_page_queue_is_special(pq));
|
||||
if mi_likely( /* bsize < MI_MAX_RETIRE_SIZE && */ !mi_page_queue_is_special(pq)) { // not full or huge queue?
|
||||
if (pq->last==page && pq->first==page) { // the only page in the queue?
|
||||
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
|
||||
page->retire_expire = (bsize <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
|
||||
@ -662,7 +661,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
||||
page->block_size = block_size;
|
||||
size_t page_size;
|
||||
page->page_start = _mi_segment_page_start(segment, page, &page_size, NULL);
|
||||
mi_track_mem_noaccess(page->page_start,page_size);
|
||||
mi_track_mem_noaccess(page->page_start,page_size);
|
||||
mi_assert_internal(page_size / block_size < (1L<<16));
|
||||
page->reserved = (uint16_t)(page_size / block_size);
|
||||
mi_assert_internal(page->reserved > 0);
|
||||
@ -821,7 +820,7 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noex
|
||||
General allocation
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// Huge pages contain just one block, and the segment contains just that page.
|
||||
// Huge pages contain just one block, and the segment contains just that page.
|
||||
// Huge pages are also use if the requested alignment is very large (> MI_BLOCK_ALIGNMENT_MAX)
|
||||
// so their size is not always `> MI_LARGE_OBJ_SIZE_MAX`.
|
||||
static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment) {
|
||||
@ -830,15 +829,15 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_a
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
mi_page_queue_t* pq = NULL;
|
||||
#else
|
||||
mi_page_queue_t* pq = mi_page_queue(heap, block_size);
|
||||
// mi_assert_internal(mi_page_queue_is_huge(pq));
|
||||
mi_page_queue_t* pq = mi_page_queue(heap, MI_LARGE_OBJ_SIZE_MAX+1); // always in the huge queue regardless of the block size
|
||||
mi_assert_internal(mi_page_queue_is_huge(pq));
|
||||
#endif
|
||||
mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment);
|
||||
if (page != NULL) {
|
||||
if (page != NULL) {
|
||||
mi_assert_internal(mi_page_block_size(page) >= size);
|
||||
mi_assert_internal(mi_page_immediate_available(page));
|
||||
mi_assert_internal(_mi_page_segment(page)->page_kind==MI_PAGE_HUGE);
|
||||
mi_assert_internal(mi_page_is_huge(page));
|
||||
mi_assert_internal(_mi_page_segment(page)->page_kind == MI_PAGE_HUGE);
|
||||
mi_assert_internal(_mi_page_segment(page)->used==1);
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
|
||||
|
@ -142,6 +142,7 @@ static bool mi_segment_is_valid(const mi_segment_t* segment, mi_segments_tld_t*
|
||||
mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
|
||||
mi_assert_internal(segment->used <= segment->capacity);
|
||||
mi_assert_internal(segment->abandoned <= segment->used);
|
||||
mi_assert_internal(segment->page_kind <= MI_PAGE_MEDIUM || segment->capacity == 1);
|
||||
size_t nfree = 0;
|
||||
for (size_t i = 0; i < segment->capacity; i++) {
|
||||
const mi_page_t* const page = &segment->pages[i];
|
||||
@ -151,6 +152,7 @@ static bool mi_segment_is_valid(const mi_segment_t* segment, mi_segments_tld_t*
|
||||
if (page->segment_in_use) {
|
||||
mi_assert_expensive(!mi_pages_purge_contains(page, tld));
|
||||
}
|
||||
if (segment->page_kind == MI_PAGE_HUGE) mi_assert_internal(page->is_huge);
|
||||
}
|
||||
mi_assert_internal(nfree + segment->used == segment->capacity);
|
||||
// mi_assert_internal(segment->thread_id == _mi_thread_id() || (segment->thread_id==0)); // or 0
|
||||
@ -615,11 +617,13 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
|
||||
_mi_memzero((uint8_t*)segment + ofs, info_size - ofs);
|
||||
|
||||
// initialize pages info
|
||||
const bool is_huge = (page_kind == MI_PAGE_HUGE);
|
||||
for (size_t i = 0; i < capacity; i++) {
|
||||
mi_assert_internal(i <= 255);
|
||||
segment->pages[i].segment_idx = (uint8_t)i;
|
||||
segment->pages[i].is_committed = segment->memid.initially_committed;
|
||||
segment->pages[i].is_zero_init = segment->memid.initially_zero;
|
||||
segment->pages[i].is_huge = is_huge;
|
||||
}
|
||||
|
||||
// initialize
|
||||
@ -753,7 +757,7 @@ void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld)
|
||||
mi_segment_abandon(segment,tld);
|
||||
}
|
||||
else if (segment->used + 1 == segment->capacity) {
|
||||
mi_assert_internal(segment->page_kind <= MI_PAGE_MEDIUM); // for now we only support small and medium pages
|
||||
mi_assert_internal(segment->page_kind <= MI_PAGE_MEDIUM); // large and huge pages are always the single page in a segment
|
||||
if (segment->page_kind <= MI_PAGE_MEDIUM) {
|
||||
// move back to segments free list
|
||||
mi_segment_insert_in_free_queue(segment,tld);
|
||||
@ -1123,13 +1127,14 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment,
|
||||
#endif
|
||||
mi_page_t* page = mi_segment_find_free(segment, tld);
|
||||
mi_assert_internal(page != NULL);
|
||||
mi_assert_internal(page->is_huge);
|
||||
|
||||
// for huge pages we initialize the block_size as we may
|
||||
// overallocate to accommodate large alignments.
|
||||
size_t psize;
|
||||
uint8_t* start = mi_segment_page_start_ex(segment, page, 0, &psize, NULL);
|
||||
page->block_size = psize;
|
||||
|
||||
|
||||
// reset the part of the page that will not be used; this can be quite large (close to MI_SEGMENT_SIZE)
|
||||
if (page_alignment > 0 && segment->allow_decommit && page->is_committed) {
|
||||
uint8_t* aligned_p = (uint8_t*)_mi_align_up((uintptr_t)start, page_alignment);
|
||||
|
Loading…
x
Reference in New Issue
Block a user