add is_huge page flag to ensure the right page queue is returned (see #868)

This commit is contained in:
daanx 2024-03-24 17:07:28 -07:00
parent 9c96d05ee4
commit 006ae2d055
7 changed files with 46 additions and 35 deletions

View File

@ -470,7 +470,9 @@ static inline size_t mi_page_block_size(const mi_page_t* page) {
} }
static inline bool mi_page_is_huge(const mi_page_t* page) { static inline bool mi_page_is_huge(const mi_page_t* page) {
return (_mi_page_segment(page)->page_kind == MI_PAGE_HUGE); mi_assert_internal((page->is_huge && _mi_page_segment(page)->page_kind == MI_PAGE_HUGE) ||
(!page->is_huge && _mi_page_segment(page)->page_kind != MI_PAGE_HUGE));
return page->is_huge;
} }
// Get the usable block size of a page without fixed padding. // Get the usable block size of a page without fixed padding.

View File

@ -1,5 +1,5 @@
/* ---------------------------------------------------------------------------- /* ----------------------------------------------------------------------------
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution. "LICENSE" at the root of this distribution.
@ -278,6 +278,7 @@ typedef struct mi_page_s {
uint8_t segment_in_use:1; // `true` if the segment allocated this page uint8_t segment_in_use:1; // `true` if the segment allocated this page
uint8_t is_committed:1; // `true` if the page virtual memory is committed uint8_t is_committed:1; // `true` if the page virtual memory is committed
uint8_t is_zero_init:1; // `true` if the page was initially zero initialized uint8_t is_zero_init:1; // `true` if the page was initially zero initialized
uint8_t is_huge:1; // `true` if the page is in a huge segment
// layout like this to optimize access in `mi_malloc` and `mi_free` // layout like this to optimize access in `mi_malloc` and `mi_free`
uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear` uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear`

View File

@ -1,5 +1,5 @@
/* ---------------------------------------------------------------------------- /* ----------------------------------------------------------------------------
Copyright (c) 2018-2022, Microsoft Research, Daan Leijen Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution. "LICENSE" at the root of this distribution.

View File

@ -14,7 +14,8 @@ terms of the MIT license. A copy of the license can be found in the file
// Empty page used to initialize the small free pages array // Empty page used to initialize the small free pages array
const mi_page_t _mi_page_empty = { const mi_page_t _mi_page_empty = {
0, false, false, false, 0,
false, false, false, false,
0, // capacity 0, // capacity
0, // reserved capacity 0, // reserved capacity
0, // used 0, // used
@ -78,9 +79,10 @@ const mi_page_t _mi_page_empty = {
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
MI_STAT_COUNT_NULL(), \
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
{ 0, 0 }, { 0, 0 }, { 0, 0 } \ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } \
MI_STAT_COUNT_END_NULL() MI_STAT_COUNT_END_NULL()
// -------------------------------------------------------- // --------------------------------------------------------

View File

@ -1,5 +1,5 @@
/*---------------------------------------------------------------------------- /*----------------------------------------------------------------------------
Copyright (c) 2018-2020, Microsoft Research, Daan Leijen Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution. "LICENSE" at the root of this distribution.
@ -141,21 +141,21 @@ static bool mi_heap_contains_queue(const mi_heap_t* heap, const mi_page_queue_t*
} }
#endif #endif
static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) { static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) {
uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(mi_page_block_size(page))); mi_assert_internal(heap!=NULL);
mi_heap_t* heap = mi_page_heap(page); uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : (mi_page_is_huge(page) ? MI_BIN_HUGE : mi_bin(mi_page_block_size(page))));
mi_assert_internal(heap != NULL && bin <= MI_BIN_FULL); mi_assert_internal(bin <= MI_BIN_FULL);
mi_page_queue_t* pq = &heap->pages[bin]; mi_page_queue_t* pq = &heap->pages[bin];
mi_assert_internal(bin >= MI_BIN_HUGE || mi_page_block_size(page) == pq->block_size); mi_assert_internal((mi_page_block_size(page) == pq->block_size) ||
mi_assert_expensive(mi_page_queue_contains(pq, page)); (mi_page_is_huge(page) && mi_page_queue_is_huge(pq)) ||
(mi_page_is_in_full(page) && mi_page_queue_is_full(pq)));
return pq; return pq;
} }
static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) { static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) {
uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(mi_page_block_size(page))); mi_heap_t* heap = mi_page_heap(page);
mi_assert_internal(bin <= MI_BIN_FULL); mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
mi_page_queue_t* pq = &heap->pages[bin]; mi_assert_expensive(mi_page_queue_contains(pq, page));
mi_assert_internal(mi_page_is_in_full(page) || mi_page_block_size(page) == pq->block_size);
return pq; return pq;
} }
@ -210,7 +210,9 @@ static bool mi_page_queue_is_empty(mi_page_queue_t* queue) {
static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) { static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
mi_assert_internal(page != NULL); mi_assert_internal(page != NULL);
mi_assert_expensive(mi_page_queue_contains(queue, page)); mi_assert_expensive(mi_page_queue_contains(queue, page));
mi_assert_internal(mi_page_block_size(page) == queue->block_size || (mi_page_block_size(page) > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue))); mi_assert_internal(mi_page_block_size(page) == queue->block_size ||
(mi_page_is_huge(page) && mi_page_queue_is_huge(queue)) ||
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
mi_heap_t* heap = mi_page_heap(page); mi_heap_t* heap = mi_page_heap(page);
if (page->prev != NULL) page->prev->next = page->next; if (page->prev != NULL) page->prev->next = page->next;
if (page->next != NULL) page->next->prev = page->prev; if (page->next != NULL) page->next->prev = page->prev;
@ -236,7 +238,7 @@ static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_
mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE); mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
#endif #endif
mi_assert_internal(mi_page_block_size(page) == queue->block_size || mi_assert_internal(mi_page_block_size(page) == queue->block_size ||
(mi_page_block_size(page) > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) || (mi_page_is_huge(page) && mi_page_queue_is_huge(queue)) ||
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue))); (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
mi_page_set_in_full(page, mi_page_queue_is_full(queue)); mi_page_set_in_full(page, mi_page_queue_is_full(queue));
@ -267,8 +269,8 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro
mi_assert_internal((bsize == to->block_size && bsize == from->block_size) || mi_assert_internal((bsize == to->block_size && bsize == from->block_size) ||
(bsize == to->block_size && mi_page_queue_is_full(from)) || (bsize == to->block_size && mi_page_queue_is_full(from)) ||
(bsize == from->block_size && mi_page_queue_is_full(to)) || (bsize == from->block_size && mi_page_queue_is_full(to)) ||
(bsize > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(to)) || (mi_page_is_huge(page) && mi_page_queue_is_huge(to)) ||
(bsize > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_full(to))); (mi_page_is_huge(page) && mi_page_queue_is_full(to)));
mi_heap_t* heap = mi_page_heap(page); mi_heap_t* heap = mi_page_heap(page);
if (page->prev != NULL) page->prev->next = page->next; if (page->prev != NULL) page->prev->next = page->next;

View File

@ -1,5 +1,5 @@
/*---------------------------------------------------------------------------- /*----------------------------------------------------------------------------
Copyright (c) 2018-2020, Microsoft Research, Daan Leijen Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution. "LICENSE" at the root of this distribution.
@ -82,7 +82,7 @@ static bool mi_page_is_valid_init(mi_page_t* page) {
mi_assert_internal(page->used <= page->capacity); mi_assert_internal(page->used <= page->capacity);
mi_assert_internal(page->capacity <= page->reserved); mi_assert_internal(page->capacity <= page->reserved);
const size_t bsize = mi_page_block_size(page); // const size_t bsize = mi_page_block_size(page);
mi_segment_t* segment = _mi_page_segment(page); mi_segment_t* segment = _mi_page_segment(page);
uint8_t* start = mi_page_start(page); uint8_t* start = mi_page_start(page);
mi_assert_internal(start == _mi_segment_page_start(segment,page,NULL,NULL)); mi_assert_internal(start == _mi_segment_page_start(segment,page,NULL,NULL));
@ -448,8 +448,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
// for now, we don't retire if it is the only page left of this size class. // for now, we don't retire if it is the only page left of this size class.
mi_page_queue_t* pq = mi_page_queue_of(page); mi_page_queue_t* pq = mi_page_queue_of(page);
const size_t bsize = mi_page_block_size(page); const size_t bsize = mi_page_block_size(page);
if mi_likely(bsize < MI_MAX_RETIRE_SIZE) { // not too large && not full or huge queue? if mi_likely( /* bsize < MI_MAX_RETIRE_SIZE && */ !mi_page_queue_is_special(pq)) { // not full or huge queue?
mi_assert_internal(!mi_page_queue_is_special(pq));
if (pq->last==page && pq->first==page) { // the only page in the queue? if (pq->last==page && pq->first==page) { // the only page in the queue?
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1); mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
page->retire_expire = (bsize <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4); page->retire_expire = (bsize <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
@ -830,15 +829,15 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_a
#if MI_HUGE_PAGE_ABANDON #if MI_HUGE_PAGE_ABANDON
mi_page_queue_t* pq = NULL; mi_page_queue_t* pq = NULL;
#else #else
mi_page_queue_t* pq = mi_page_queue(heap, block_size); mi_page_queue_t* pq = mi_page_queue(heap, MI_LARGE_OBJ_SIZE_MAX+1); // always in the huge queue regardless of the block size
// mi_assert_internal(mi_page_queue_is_huge(pq)); mi_assert_internal(mi_page_queue_is_huge(pq));
#endif #endif
mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment); mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment);
if (page != NULL) { if (page != NULL) {
mi_assert_internal(mi_page_block_size(page) >= size); mi_assert_internal(mi_page_block_size(page) >= size);
mi_assert_internal(mi_page_immediate_available(page)); mi_assert_internal(mi_page_immediate_available(page));
mi_assert_internal(_mi_page_segment(page)->page_kind==MI_PAGE_HUGE);
mi_assert_internal(mi_page_is_huge(page)); mi_assert_internal(mi_page_is_huge(page));
mi_assert_internal(_mi_page_segment(page)->page_kind == MI_PAGE_HUGE);
mi_assert_internal(_mi_page_segment(page)->used==1); mi_assert_internal(_mi_page_segment(page)->used==1);
#if MI_HUGE_PAGE_ABANDON #if MI_HUGE_PAGE_ABANDON
mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue

View File

@ -142,6 +142,7 @@ static bool mi_segment_is_valid(const mi_segment_t* segment, mi_segments_tld_t*
mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie); mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
mi_assert_internal(segment->used <= segment->capacity); mi_assert_internal(segment->used <= segment->capacity);
mi_assert_internal(segment->abandoned <= segment->used); mi_assert_internal(segment->abandoned <= segment->used);
mi_assert_internal(segment->page_kind <= MI_PAGE_MEDIUM || segment->capacity == 1);
size_t nfree = 0; size_t nfree = 0;
for (size_t i = 0; i < segment->capacity; i++) { for (size_t i = 0; i < segment->capacity; i++) {
const mi_page_t* const page = &segment->pages[i]; const mi_page_t* const page = &segment->pages[i];
@ -151,6 +152,7 @@ static bool mi_segment_is_valid(const mi_segment_t* segment, mi_segments_tld_t*
if (page->segment_in_use) { if (page->segment_in_use) {
mi_assert_expensive(!mi_pages_purge_contains(page, tld)); mi_assert_expensive(!mi_pages_purge_contains(page, tld));
} }
if (segment->page_kind == MI_PAGE_HUGE) mi_assert_internal(page->is_huge);
} }
mi_assert_internal(nfree + segment->used == segment->capacity); mi_assert_internal(nfree + segment->used == segment->capacity);
// mi_assert_internal(segment->thread_id == _mi_thread_id() || (segment->thread_id==0)); // or 0 // mi_assert_internal(segment->thread_id == _mi_thread_id() || (segment->thread_id==0)); // or 0
@ -615,11 +617,13 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
_mi_memzero((uint8_t*)segment + ofs, info_size - ofs); _mi_memzero((uint8_t*)segment + ofs, info_size - ofs);
// initialize pages info // initialize pages info
const bool is_huge = (page_kind == MI_PAGE_HUGE);
for (size_t i = 0; i < capacity; i++) { for (size_t i = 0; i < capacity; i++) {
mi_assert_internal(i <= 255); mi_assert_internal(i <= 255);
segment->pages[i].segment_idx = (uint8_t)i; segment->pages[i].segment_idx = (uint8_t)i;
segment->pages[i].is_committed = segment->memid.initially_committed; segment->pages[i].is_committed = segment->memid.initially_committed;
segment->pages[i].is_zero_init = segment->memid.initially_zero; segment->pages[i].is_zero_init = segment->memid.initially_zero;
segment->pages[i].is_huge = is_huge;
} }
// initialize // initialize
@ -753,7 +757,7 @@ void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld)
mi_segment_abandon(segment,tld); mi_segment_abandon(segment,tld);
} }
else if (segment->used + 1 == segment->capacity) { else if (segment->used + 1 == segment->capacity) {
mi_assert_internal(segment->page_kind <= MI_PAGE_MEDIUM); // for now we only support small and medium pages mi_assert_internal(segment->page_kind <= MI_PAGE_MEDIUM); // large and huge pages are always the single page in a segment
if (segment->page_kind <= MI_PAGE_MEDIUM) { if (segment->page_kind <= MI_PAGE_MEDIUM) {
// move back to segments free list // move back to segments free list
mi_segment_insert_in_free_queue(segment,tld); mi_segment_insert_in_free_queue(segment,tld);
@ -1123,6 +1127,7 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment,
#endif #endif
mi_page_t* page = mi_segment_find_free(segment, tld); mi_page_t* page = mi_segment_find_free(segment, tld);
mi_assert_internal(page != NULL); mi_assert_internal(page != NULL);
mi_assert_internal(page->is_huge);
// for huge pages we initialize the block_size as we may // for huge pages we initialize the block_size as we may
// overallocate to accommodate large alignments. // overallocate to accommodate large alignments.