merge from dev

This commit is contained in:
daan 2020-03-16 16:07:52 -07:00
commit 2d52b967bc
9 changed files with 72 additions and 14 deletions

View File

@ -131,6 +131,11 @@ if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU|Intel")
endif() endif()
endif() endif()
# Architecture flags
if(${CMAKE_HOST_SYSTEM_PROCESSOR} MATCHES "arm")
list(APPEND mi_cflags -march=native)
endif()
# extra needed libraries # extra needed libraries
if(WIN32) if(WIN32)
list(APPEND mi_libraries psapi shell32 user32 bcrypt) list(APPEND mi_libraries psapi shell32 user32 bcrypt)
@ -216,7 +221,7 @@ if(NOT WIN32)
# install a symlink in the /usr/local/lib to the versioned library # install a symlink in the /usr/local/lib to the versioned library
set(mi_symlink "${CMAKE_SHARED_MODULE_PREFIX}${mi_basename}${CMAKE_SHARED_LIBRARY_SUFFIX}") set(mi_symlink "${CMAKE_SHARED_MODULE_PREFIX}${mi_basename}${CMAKE_SHARED_LIBRARY_SUFFIX}")
set(mi_soname "mimalloc-${mi_version}/${mi_symlink}.${mi_version}") set(mi_soname "mimalloc-${mi_version}/${mi_symlink}.${mi_version}")
install(CODE "execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ${mi_soname} ${mi_symlink} WORKING_DIRECTORY ${CMAKE_INSTALL_PREFIX}/${mi_install_dir}/..)") install(CODE "execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ${mi_soname} ${mi_symlink} WORKING_DIRECTORY ${mi_install_dir}/..)")
install(CODE "MESSAGE(\"-- Symbolic link: ${CMAKE_INSTALL_PREFIX}/lib/${mi_symlink} -> ${mi_soname}\")") install(CODE "MESSAGE(\"-- Symbolic link: ${CMAKE_INSTALL_PREFIX}/lib/${mi_symlink} -> ${mi_soname}\")")
endif() endif()

View File

@ -332,6 +332,8 @@ struct mi_heap_s {
uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list
mi_random_ctx_t random; // random number context used for secure allocation mi_random_ctx_t random; // random number context used for secure allocation
size_t page_count; // total number of pages in the `pages` queues. size_t page_count; // total number of pages in the `pages` queues.
size_t page_retired_min; // smallest retired index (retired pages are fully free, but still in the page queues)
size_t page_retired_max; // largest retired index into the `pages` array.
mi_heap_t* next; // list of heaps per thread mi_heap_t* next; // list of heaps per thread
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
}; };

View File

@ -11,7 +11,7 @@ mimalloc (pronounced "me-malloc")
is a general purpose allocator with excellent [performance](#performance) characteristics. is a general purpose allocator with excellent [performance](#performance) characteristics.
Initially developed by Daan Leijen for the run-time systems of the Initially developed by Daan Leijen for the run-time systems of the
[Koka](https://github.com/koka-lang/koka) and [Lean](https://github.com/leanprover/lean) languages. [Koka](https://github.com/koka-lang/koka) and [Lean](https://github.com/leanprover/lean) languages.
Latest release:`v1.6.0` (2020-02-09). Latest release:`v1.6.1` (2020-02-17).
It is a drop-in replacement for `malloc` and can be used in other programs It is a drop-in replacement for `malloc` and can be used in other programs
without code changes, for example, on dynamically linked ELF-based systems (Linux, BSD, etc.) you can use it as: without code changes, for example, on dynamically linked ELF-based systems (Linux, BSD, etc.) you can use it as:
@ -57,6 +57,7 @@ Enjoy!
### Releases ### Releases
* 2020-02-17, `v1.6.1`: stable release 1.6: minor updates (build with clang-cl, fix alignment issue for small objects).
* 2020-02-09, `v1.6.0`: stable release 1.6: fixed potential memory leak, improved overriding * 2020-02-09, `v1.6.0`: stable release 1.6: fixed potential memory leak, improved overriding
and thread local support on FreeBSD, NetBSD, DragonFly, and macOSX. New byte-precise and thread local support on FreeBSD, NetBSD, DragonFly, and macOSX. New byte-precise
heap block overflow detection in debug mode (besides the double-free detection and free-list heap block overflow detection in debug mode (besides the double-free detection and free-list
@ -275,8 +276,7 @@ resolved to the _mimalloc_ library.
Note that certain security restrictions may apply when doing this from Note that certain security restrictions may apply when doing this from
the [shell](https://stackoverflow.com/questions/43941322/dyld-insert-libraries-ignored-when-calling-application-through-bash). the [shell](https://stackoverflow.com/questions/43941322/dyld-insert-libraries-ignored-when-calling-application-through-bash).
Note: unfortunately, at this time, dynamic overriding on macOS seems broken but it is (Note: macOS support for dynamic overriding is recent, please report any issues.)
actively worked on to fix this (see issue [`#50`](https://github.com/microsoft/mimalloc/issues/50)).
### Override on Windows ### Override on Windows

View File

@ -67,6 +67,11 @@ MI_ALLOC_API1(inline mi_decl_restrict void*, malloc_small, mi_heap_t*, heap, siz
mi_assert(heap!=NULL); mi_assert(heap!=NULL);
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
mi_assert(size <= MI_SMALL_SIZE_MAX); mi_assert(size <= MI_SMALL_SIZE_MAX);
#if (MI_PADDING)
if (size == 0) {
size = sizeof(void*);
}
#endif
mi_page_t* page = _mi_heap_get_free_small_page(heap,size + MI_PADDING_SIZE); mi_page_t* page = _mi_heap_get_free_small_page(heap,size + MI_PADDING_SIZE);
void* p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE MI_SOURCE_XARG); void* p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE MI_SOURCE_XARG);
mi_assert_internal(p==NULL || mi_usable_size(p) >= size); mi_assert_internal(p==NULL || mi_usable_size(p) >= size);

View File

@ -97,6 +97,7 @@ const mi_heap_t _mi_heap_empty = {
{ 0, 0 }, // keys { 0, 0 }, // keys
{ {0}, {0}, 0 }, { {0}, {0}, 0 },
0, // page count 0, // page count
MI_BIN_FULL, 0, // page retired min/max
NULL, // next NULL, // next
false false
}; };
@ -131,6 +132,7 @@ mi_heap_t _mi_heap_main = {
{ 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!) { 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!)
{ {0x846ca68b}, {0}, 0 }, // random { {0x846ca68b}, {0}, 0 }, // random
0, // page count 0, // page count
MI_BIN_FULL, 0, // page retired min/max
NULL, // next heap NULL, // next heap
false // can reclaim false // can reclaim
}; };
@ -241,7 +243,9 @@ static bool _mi_heap_done(mi_heap_t* heap) {
mi_assert_internal(heap->tld->segments.count == 0); mi_assert_internal(heap->tld->segments.count == 0);
_mi_os_free(heap, sizeof(mi_thread_data_t), &_mi_stats_main); _mi_os_free(heap, sizeof(mi_thread_data_t), &_mi_stats_main);
} }
#if (MI_DEBUG > 0) #if 0
// never free the main thread even in debug mode; if a dll is linked statically with mimalloc,
// there may still be delete/free calls after the mi_fls_done is called. Issue #207
else { else {
_mi_heap_destroy_pages(heap); _mi_heap_destroy_pages(heap);
mi_assert_internal(heap->tld->heap_backing == &_mi_heap_main); mi_assert_internal(heap->tld->heap_backing == &_mi_heap_main);
@ -483,6 +487,10 @@ static void mi_process_done(void) {
if (process_done) return; if (process_done) return;
process_done = true; process_done = true;
#if defined(_WIN32) && !defined(MI_SHARED_LIB)
FlsSetValue(mi_fls_key, NULL); // don't call main-thread callback
FlsFree(mi_fls_key); // call thread-done on all threads to prevent dangling callback pointer if statically linked with a DLL; Issue #208
#endif
#ifndef NDEBUG #ifndef NDEBUG
mi_collect(true); mi_collect(true);
#endif #endif
@ -490,7 +498,7 @@ static void mi_process_done(void) {
mi_option_is_enabled(mi_option_verbose)) { mi_option_is_enabled(mi_option_verbose)) {
mi_stats_print(NULL); mi_stats_print(NULL);
} }
mi_allocator_done(); mi_allocator_done();
_mi_verbose_message("process done: 0x%zx\n", _mi_heap_main.thread_id); _mi_verbose_message("process done: 0x%zx\n", _mi_heap_main.thread_id);
os_preloading = true; // don't call the C runtime anymore os_preloading = true; // don't call the C runtime anymore
} }

View File

@ -209,7 +209,12 @@ static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment
// on 64-bit systems, try to use the virtual address area after 4TiB for 4MiB aligned allocations // on 64-bit systems, try to use the virtual address area after 4TiB for 4MiB aligned allocations
void* hint; void* hint;
if (addr == NULL && (hint = mi_os_get_aligned_hint(try_alignment,size)) != NULL) { if (addr == NULL && (hint = mi_os_get_aligned_hint(try_alignment,size)) != NULL) {
return VirtualAlloc(hint, size, flags, PAGE_READWRITE); void* p = VirtualAlloc(hint, size, flags, PAGE_READWRITE);
if (p != NULL) return p;
DWORD err = GetLastError();
if (err != ERROR_INVALID_ADDRESS) { // if linked with multiple instances, we may have tried to allocate at an already allocated area
return NULL;
}
} }
#endif #endif
#if defined(MEM_EXTENDED_PARAMETER_TYPE_BITS) #if defined(MEM_EXTENDED_PARAMETER_TYPE_BITS)

View File

@ -380,7 +380,8 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
_mi_segment_page_free(page, force, segments_tld); _mi_segment_page_free(page, force, segments_tld);
} }
#define MI_MAX_RETIRE_SIZE (4*MI_SMALL_SIZE_MAX) #define MI_MAX_RETIRE_SIZE MI_LARGE_OBJ_SIZE_MAX
#define MI_RETIRE_CYCLES (16)
// Retire a page with no more used blocks // Retire a page with no more used blocks
// Important to not retire too quickly though as new // Important to not retire too quickly though as new
@ -405,7 +406,13 @@ void _mi_page_retire(mi_page_t* page) {
if (mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_is_in_full(page))) { if (mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_is_in_full(page))) {
if (pq->last==page && pq->first==page) { // the only page in the queue? if (pq->last==page && pq->first==page) { // the only page in the queue?
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1); mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
page->retire_expire = 16; page->retire_expire = MI_RETIRE_CYCLES;
mi_heap_t* heap = mi_page_heap(page);
mi_assert_internal(pq >= heap->pages);
const size_t index = pq - heap->pages;
mi_assert_internal(index < MI_BIN_FULL && index < MI_BIN_HUGE);
if (index < heap->page_retired_min) heap->page_retired_min = index;
if (index > heap->page_retired_max) heap->page_retired_max = index;
mi_assert_internal(mi_page_all_free(page)); mi_assert_internal(mi_page_all_free(page));
return; // dont't free after all return; // dont't free after all
} }
@ -415,22 +422,32 @@ void _mi_page_retire(mi_page_t* page) {
} }
// free retired pages: we don't need to look at the entire queues // free retired pages: we don't need to look at the entire queues
// since we only retire pages that are the last one in a queue. // since we only retire pages that are at the head position in a queue.
void _mi_heap_collect_retired(mi_heap_t* heap, bool force) { void _mi_heap_collect_retired(mi_heap_t* heap, bool force) {
for(mi_page_queue_t* pq = heap->pages; pq->block_size <= MI_MAX_RETIRE_SIZE; pq++) { size_t min = MI_BIN_FULL;
mi_page_t* page = pq->first; size_t max = 0;
for(size_t bin = heap->page_retired_min; bin <= heap->page_retired_max; bin++) {
mi_page_queue_t* pq = &heap->pages[bin];
mi_page_t* page = pq->first;
if (page != NULL && page->retire_expire != 0) { if (page != NULL && page->retire_expire != 0) {
if (mi_page_all_free(page)) { if (mi_page_all_free(page)) {
page->retire_expire--; page->retire_expire--;
if (force || page->retire_expire == 0) { if (force || page->retire_expire == 0) {
_mi_page_free(pq->first, pq, force); _mi_page_free(pq->first, pq, force);
} }
else {
// keep retired, update min/max
if (bin < min) min = bin;
if (bin > max) max = bin;
}
} }
else { else {
page->retire_expire = 0; page->retire_expire = 0;
} }
} }
} }
heap->page_retired_min = min;
heap->page_retired_max = max;
} }

View File

@ -7,7 +7,7 @@
#include <vector> #include <vector>
#include <thread> #include <thread>
#include <mimalloc.h> #include <mimalloc.h>
// #include <mimalloc-new-delete.h> #include <mimalloc-new-delete.h>
#include <mimalloc-override.h> #include <mimalloc-override.h>
#ifdef _WIN32 #ifdef _WIN32
@ -28,6 +28,7 @@ int main() {
// heap_no_delete(); // issue #202 // heap_no_delete(); // issue #202
// heap_late_free(); // issue #204 // heap_late_free(); // issue #204
// dangling_ptr_write(); // dangling_ptr_write();
// padding_shrink(); // issue #209
various_tests(); various_tests();
mi_stats_print(NULL); mi_stats_print(NULL);
return 0; return 0;
@ -159,3 +160,18 @@ static void heap_late_free() {
t1.join(); t1.join();
} }
// issue #209
static void* shared_p;
static void alloc0(/* void* arg */)
{
shared_p = mi_malloc(8);
}
void padding_shrink(void)
{
auto t1 = std::thread(alloc0);
t1.join();
mi_free(shared_p);
}

View File

@ -123,7 +123,7 @@ int main() {
}); });
CHECK_BODY("posix_memalign_nomem", { CHECK_BODY("posix_memalign_nomem", {
void* p = &p; void* p = &p;
int err = mi_posix_memalign(&p, sizeof(void*), SIZE_MAX); volatile int err = mi_posix_memalign(&p, sizeof(void*), SIZE_MAX);
result = (err==ENOMEM && p==&p); result = (err==ENOMEM && p==&p);
}); });