new segment allocation; good results with Qas service

This commit is contained in:
daan 2019-08-15 23:19:52 -07:00
parent f2ba95bc64
commit a0b4ac2f66
4 changed files with 40 additions and 34 deletions

View File

@ -254,20 +254,21 @@ static inline mi_slice_t* mi_page_to_slice(mi_page_t* p) {
return (mi_slice_t*)(p);
}
static size_t mi_slice_index(const mi_slice_t* slice) {
mi_segment_t* segment = _mi_ptr_segment(slice);
ptrdiff_t index = slice - segment->slices;
mi_assert_internal(index >= 0 && index < (ptrdiff_t)segment->slice_count);
return index;
}
// Segment belonging to a page
static inline mi_segment_t* _mi_page_segment(const mi_page_t* page) {
mi_segment_t* segment = _mi_ptr_segment(page);
mi_assert_internal(segment == NULL || page == mi_slice_to_page(&segment->slices[mi_slice_index(mi_page_to_slice((mi_page_t*)page))]));
mi_segment_t* segment = _mi_ptr_segment(page);
mi_assert_internal(segment == NULL || (mi_slice_t*)page >= segment->slices && (mi_slice_t*)page < segment->slices + segment->slice_count);
return segment;
}
static inline mi_slice_t* mi_slice_first(const mi_slice_t* slice) {
mi_slice_t* start = (mi_slice_t*)((uint8_t*)slice - slice->slice_offset);
mi_assert_internal(start >= _mi_ptr_segment(slice)->slices);
mi_assert_internal(start->slice_offset == 0);
mi_assert_internal(start + start->slice_count > slice);
return start;
}
// Get the page containing the pointer
static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const void* p) {
ptrdiff_t diff = (uint8_t*)p - (uint8_t*)segment;
@ -275,8 +276,7 @@ static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const
uintptr_t idx = (uintptr_t)diff >> MI_SEGMENT_SLICE_SHIFT;
mi_assert_internal(idx < segment->slice_count);
mi_slice_t* slice0 = (mi_slice_t*)&segment->slices[idx];
mi_slice_t* slice = slice0 - slice0->slice_offset; // adjust to the block that holds the page data
mi_assert_internal(slice->slice_count > slice0->slice_offset);
mi_slice_t* slice = mi_slice_first(slice0); // adjust to the block that holds the page data
mi_assert_internal(slice->slice_offset == 0);
mi_assert_internal(slice >= segment->slices && slice < segment->slices + segment->slice_count);
return mi_slice_to_page(slice);

View File

@ -78,7 +78,7 @@ terms of the MIT license. A copy of the license can be found in the file
#define MI_SEGMENT_SHIFT (10 + MI_SEGMENT_SLICE_SHIFT) // 64mb
#define MI_SMALL_PAGE_SHIFT (MI_SEGMENT_SLICE_SHIFT) // 64kb
#define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SEGMENT_SLICE_SHIFT) // 512kb
#define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SEGMENT_SLICE_SHIFT) // 1024kb
// Derived constants
@ -90,7 +90,7 @@ terms of the MIT license. A copy of the license can be found in the file
#define MI_SMALL_PAGE_SIZE (1<<MI_SMALL_PAGE_SHIFT)
#define MI_MEDIUM_PAGE_SIZE (1<<MI_MEDIUM_PAGE_SHIFT)
#define MI_MEDIUM_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/8) // 64kb on 64-bit
#define MI_MEDIUM_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/4) // 128kb on 64-bit
#define MI_MEDIUM_WSIZE_MAX (MI_MEDIUM_SIZE_MAX/MI_INTPTR_SIZE) // 64kb on 64-bit
#define MI_LARGE_SIZE_MAX (MI_SEGMENT_SIZE/4) // 16mb on 64-bit
@ -155,8 +155,8 @@ typedef uintptr_t mi_thread_free_t;
// - using `uint16_t` does not seem to slow things down
typedef struct mi_page_s {
// "owned" by the segment
size_t slice_count; // slices in this page (0 if not a page)
uint16_t slice_offset; // distance from the actual page data slice (0 if a page)
uint32_t slice_count; // slices in this page (0 if not a page)
uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
bool is_reset; // `true` if the page memory was reset
bool is_committed; // `true` if the page virtual memory is committed

View File

@ -170,7 +170,7 @@ static void _mi_page_thread_free_collect(mi_page_t* page)
void _mi_page_free_collect(mi_page_t* page, bool force) {
mi_assert_internal(page!=NULL);
// collect the thread free list
if (force || mi_tf_block(page->thread_free) != NULL) { // quick test to avoid an atomic operation
_mi_page_thread_free_collect(page);
@ -703,7 +703,7 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn) mi_attr_noexcept {
General allocation
----------------------------------------------------------- */
// A huge page is allocated directly without being in a queue
// Large and huge pages are allocated directly without being in a queue
static mi_page_t* mi_large_page_alloc(mi_heap_t* heap, size_t size) {
size_t block_size = _mi_wsize_from_size(size) * sizeof(uintptr_t);
mi_assert_internal(_mi_bin(block_size) == MI_BIN_HUGE);

View File

@ -74,6 +74,13 @@ static size_t mi_slice_bin(size_t slice_count) {
return bin;
}
static size_t mi_slice_index(const mi_slice_t* slice) {
mi_segment_t* segment = _mi_ptr_segment(slice);
ptrdiff_t index = slice - segment->slices;
mi_assert_internal(index >= 0 && index < (ptrdiff_t)segment->slice_count);
return index;
}
/* -----------------------------------------------------------
Page Queues
@ -98,7 +105,7 @@ static mi_page_t* mi_page_queue_pop(mi_page_queue_t* pq)
}
*/
static void mi_page_queue_push(mi_page_queue_t* pq, mi_page_t* page) {
static void mi_page_queue_enqueue(mi_page_queue_t* pq, mi_page_t* page) {
// todo: or push to the end?
mi_assert_internal(page->prev == NULL && page->next==NULL);
page->prev = NULL; // paranoia
@ -158,14 +165,14 @@ static bool mi_segment_is_valid(mi_segment_t* segment, mi_segments_tld_t* tld) {
if (slice->block_size > 0) { // a page in use, all slices need their back offset set
used_count++;
for (size_t i = index; i <= maxindex; i++) {
mi_assert_internal(segment->slices[i].slice_offset == i - index);
mi_assert_internal(segment->slices[i].slice_offset == (i - index)*sizeof(mi_page_t));
mi_assert_internal(i==index || segment->slices[i].slice_count == 0);
mi_assert_internal(i==index || segment->slices[i].block_size == 1);
}
}
else { // free range of slices; only last slice needs a valid back offset
mi_slice_t* end = &segment->slices[maxindex];
mi_assert_internal(slice == end - end->slice_offset);
mi_assert_internal((uint8_t*)slice == (uint8_t*)end - end->slice_offset);
mi_assert_internal(slice == end || end->slice_count == 0 );
mi_assert_internal(end->block_size == 0);
if (segment->kind == MI_SEGMENT_NORMAL && segment->thread_id != 0) {
@ -272,7 +279,7 @@ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
// The thread local segment cache is limited to be at most 1/8 of the peak size of segments in use,
// and no more than 1.
#define MI_SEGMENT_CACHE_MAX (2)
#define MI_SEGMENT_CACHE_MAX (4)
#define MI_SEGMENT_CACHE_FRACTION (8)
// note: returned segment may be partially reset
@ -362,16 +369,16 @@ static void mi_segment_page_init(mi_segment_t* segment, size_t slice_index, size
// set first and last slice (the intermediates can be undetermined)
mi_slice_t* slice = &segment->slices[slice_index];
slice->slice_count = slice_count;
slice->slice_count = (uint32_t)slice_count;
slice->slice_offset = 0;
if (slice_count > 1) {
mi_slice_t* end = &segment->slices[slice_index + slice_count - 1];
end->slice_count = 0;
end->slice_offset = (uint16_t)slice_count - 1;
end->slice_offset = (uint32_t)(sizeof(mi_page_t)*(slice_count - 1));
end->block_size = 0;
}
// and push it on the free page queue (if it was not a huge page)
if (pq != NULL) mi_page_queue_push( pq, mi_slice_to_page(slice) );
if (pq != NULL) mi_page_queue_enqueue( pq, mi_slice_to_page(slice) );
else slice->block_size = 0; // mark huge page as free anyways
}
@ -393,7 +400,7 @@ static void mi_segment_page_split(mi_page_t* page, size_t slice_count, mi_segmen
size_t next_index = mi_slice_index(mi_page_to_slice(page)) + slice_count;
size_t next_count = page->slice_count - slice_count;
mi_segment_page_init( segment, next_index, next_count, tld );
page->slice_count = slice_count;
page->slice_count = (uint32_t)slice_count;
}
static mi_page_t* mi_segment_page_find(size_t slice_count, mi_segments_tld_t* tld) {
@ -494,11 +501,11 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_segments_tld_t* tld, m
for (size_t i = 0; i < islice_count; i++) {
mi_slice_t* slice = &segment->slices[i];
if (i==0) {
slice->slice_count = islice_count;
slice->slice_count = (uint32_t)islice_count;
slice->block_size = islice_count * MI_SEGMENT_SLICE_SIZE;
}
else {
slice->slice_offset = (uint16_t)i;
slice->slice_offset = (uint32_t)(sizeof(mi_page_t)*i);
slice->block_size = 1;
}
}
@ -553,7 +560,7 @@ static mi_page_t* mi_segment_page_alloc(mi_page_kind_t page_kind, size_t require
mi_assert_internal(required <= MI_LARGE_SIZE_MAX && page_kind <= MI_PAGE_LARGE);
// find a free page
size_t page_size = _mi_align_up(required,MI_SEGMENT_SLICE_SIZE);
size_t page_size = _mi_align_up(required,(required > MI_MEDIUM_PAGE_SIZE ? MI_MEDIUM_PAGE_SIZE : MI_SEGMENT_SLICE_SIZE));
size_t slices_needed = page_size / MI_SEGMENT_SLICE_SIZE;
mi_page_t* page = mi_segment_page_find(slices_needed,tld); //(required <= MI_SMALL_SIZE_MAX ? 0 : slices_needed), tld);
if (page==NULL) {
@ -569,7 +576,7 @@ static mi_page_t* mi_segment_page_alloc(mi_page_kind_t page_kind, size_t require
bool commit = false;
bool unreset = false;
for (size_t i = 0; i < page->slice_count; i++, slice++) {
slice->slice_offset = (uint16_t)i;
slice->slice_offset = (uint32_t)(sizeof(mi_page_t)*i);
slice->block_size = 1;
if (i > 0) slice->slice_count = 0;
if (!segment->all_committed && !slice->is_committed) {
@ -610,8 +617,7 @@ static mi_slice_t* mi_segment_page_free_coalesce(mi_page_t* page, mi_segments_tl
mi_segment_page_delete(next, tld);
}
if (slice > segment->slices) {
mi_slice_t* prev = slice - 1;
prev = prev - prev->slice_offset;
mi_slice_t* prev = mi_slice_first(slice - 1);
mi_assert_internal(prev >= segment->slices);
if (prev->block_size==0) {
// free previous slice -- remove it from free and merge
@ -653,7 +659,7 @@ static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld
}
// zero the page data
size_t slice_count = page->slice_count; // don't clear the slice_count
uint32_t slice_count = page->slice_count; // don't clear the slice_count
bool is_reset = page->is_reset; // don't clear the reset flag
bool is_committed = page->is_committed; // don't clear the commit flag
memset(page, 0, sizeof(*page));
@ -839,7 +845,7 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, mi_segments_tld_t* tld
mi_assert_internal(page->block_size > 0 && page->slice_count > 0);
size_t initial_count = page->slice_count;
page = page + initial_count;
page->slice_count = (segment->segment_size - segment->segment_info_size)/MI_SEGMENT_SLICE_SIZE;
page->slice_count = (uint32_t)((segment->segment_size - segment->segment_info_size)/MI_SEGMENT_SLICE_SIZE);
page->slice_offset = 0;
page->block_size = size;
mi_assert_internal(page->slice_count * MI_SEGMENT_SLICE_SIZE >= size);
@ -847,7 +853,7 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, mi_segments_tld_t* tld
// set back pointers
for (size_t i = 1; i <segment->slice_count; i++) {
mi_slice_t* slice = (mi_slice_t*)(page + i);
slice->slice_offset = (uint16_t)i;
slice->slice_offset = (uint32_t)(sizeof(mi_page_t)*i);
slice->block_size = 1;
slice->slice_count = 0;
}