mirror of
https://github.com/microsoft/mimalloc.git
synced 2024-12-28 22:05:40 +08:00
Merge branch 'dev' into dev-win
This commit is contained in:
commit
05631ebfc4
@ -152,15 +152,15 @@ bool _mi_page_is_valid(mi_page_t* page);
|
||||
|
||||
// Overflow detecting multiply
|
||||
#define MI_MUL_NO_OVERFLOW ((size_t)1 << (4*sizeof(size_t))) // sqrt(SIZE_MAX)
|
||||
static inline bool mi_mul_overflow(size_t size, size_t count, size_t* total) {
|
||||
static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
|
||||
#if __has_builtin(__builtin_umul_overflow) || __GNUC__ >= 5
|
||||
#if (MI_INTPTR_SIZE == 4)
|
||||
return __builtin_umul_overflow(size, count, total);
|
||||
return __builtin_umul_overflow(count, size, total);
|
||||
#else
|
||||
return __builtin_umull_overflow(size, count, total);
|
||||
return __builtin_umull_overflow(count, size, total);
|
||||
#endif
|
||||
#else /* __builtin_umul_overflow is unavailable */
|
||||
*total = size * count;
|
||||
*total = count * size;
|
||||
return ((size >= MI_MUL_NO_OVERFLOW || count >= MI_MUL_NO_OVERFLOW)
|
||||
&& size > 0 && (SIZE_MAX / size) < count);
|
||||
#endif
|
||||
|
@ -303,14 +303,14 @@ static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
|
||||
mi_assert_internal(heap!=NULL);
|
||||
if (from==NULL || from->page_count == 0) return;
|
||||
|
||||
// unfull all full pages
|
||||
mi_page_t* page = heap->pages[MI_BIN_FULL].first;
|
||||
// unfull all full pages in the `from` heap
|
||||
mi_page_t* page = from->pages[MI_BIN_FULL].first;
|
||||
while (page != NULL) {
|
||||
mi_page_t* next = page->next;
|
||||
_mi_page_unfull(page);
|
||||
page = next;
|
||||
}
|
||||
mi_assert_internal(heap->pages[MI_BIN_FULL].first == NULL);
|
||||
mi_assert_internal(from->pages[MI_BIN_FULL].first == NULL);
|
||||
|
||||
// free outstanding thread delayed free blocks
|
||||
_mi_heap_delayed_free(from);
|
||||
|
@ -106,14 +106,14 @@ mi_heap_t _mi_heap_main = {
|
||||
MI_SMALL_PAGES_EMPTY,
|
||||
MI_PAGE_QUEUES_EMPTY,
|
||||
NULL,
|
||||
0,
|
||||
0,
|
||||
0, // thread id
|
||||
#if MI_INTPTR_SIZE==8 // the cookie of the main heap can be fixed (unlike page cookies that need to be secure!)
|
||||
0xCDCDCDCDCDCDCDCDUL,
|
||||
#else
|
||||
0xCDCDCDCDUL,
|
||||
#endif
|
||||
0,
|
||||
0, // random
|
||||
0, // page count
|
||||
false // can reclaim
|
||||
};
|
||||
|
||||
|
22
src/memory.c
22
src/memory.c
@ -128,6 +128,7 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit
|
||||
size_t mask = mi_region_block_mask(blocks,bitidx);
|
||||
mi_assert_internal(mask != 0);
|
||||
mi_assert_internal((mask & mi_atomic_read(®ion->map)) == mask);
|
||||
mi_assert_internal(®ions[idx] == region);
|
||||
|
||||
// ensure the region is reserved
|
||||
void* start = mi_atomic_read_ptr(®ion->start);
|
||||
@ -142,6 +143,7 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit
|
||||
} while (!mi_atomic_compare_exchange(®ion->map, map & ~mask, map));
|
||||
return false;
|
||||
}
|
||||
Sleep(10);
|
||||
|
||||
// set the newly allocated region
|
||||
if (mi_atomic_compare_exchange_ptr(®ion->start, start, NULL)) {
|
||||
@ -149,9 +151,23 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit
|
||||
mi_atomic_increment(®ions_count);
|
||||
}
|
||||
else {
|
||||
// failed, another thread allocated just before us, free our allocated memory
|
||||
// TODO: should we keep the allocated memory and assign it to some other region?
|
||||
_mi_os_free(start, MI_REGION_SIZE, tld->stats);
|
||||
// failed, another thread allocated just before us!
|
||||
// we assign it to a later slot instead (up to 4 tries).
|
||||
// note: we don't need to increment the region count, this will happen on another allocation
|
||||
for(size_t i = 1; i <= 4 && idx + i < MI_REGION_MAX; i++) {
|
||||
void* s = mi_atomic_read_ptr(®ions[idx+i].start);
|
||||
if (s == NULL) { // quick test
|
||||
if (mi_atomic_compare_exchange_ptr(®ions[idx+i].start, start, s)) {
|
||||
start = NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (start != NULL) {
|
||||
// free it if we didn't succeed to save it to some other region
|
||||
_mi_os_free(start, MI_REGION_SIZE, tld->stats);
|
||||
}
|
||||
// and continue with the memory at our index
|
||||
start = mi_atomic_read_ptr(®ion->start);
|
||||
}
|
||||
}
|
||||
|
3
src/os.c
3
src/os.c
@ -22,6 +22,9 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||
#else
|
||||
#include <sys/mman.h> // mmap
|
||||
#include <unistd.h> // sysconf
|
||||
#if defined(__linux__)
|
||||
#include <linux/mman.h> // linux mmap flags
|
||||
#endif
|
||||
#if defined(__APPLE__)
|
||||
#include <mach/vm_statistics.h>
|
||||
#endif
|
||||
|
@ -745,7 +745,7 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
|
||||
// huge allocation?
|
||||
mi_page_t* page;
|
||||
if (mi_unlikely(size > MI_LARGE_OBJ_SIZE_MAX)) {
|
||||
if (mi_unlikely(size >= (SIZE_MAX - MI_MAX_ALIGN_SIZE))) {
|
||||
if (mi_unlikely(size > PTRDIFF_MAX)) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
|
||||
page = NULL;
|
||||
}
|
||||
else {
|
||||
|
Loading…
x
Reference in New Issue
Block a user