merge from dev

This commit is contained in:
daan 2020-04-30 20:40:48 -07:00
commit 8e4e8c93a7
15 changed files with 145 additions and 73 deletions

View File

@ -15,6 +15,8 @@ option(MI_LOCAL_DYNAMIC_TLS "Use slightly slower, dlopen-compatible TLS mechanis
option(MI_BUILD_TESTS "Build test executables" ON)
option(MI_CHECK_FULL "Use full internal invariant checking in DEBUG mode (deprecated, use MI_DEBUG_FULL instead)" OFF)
option(MI_PADDING "Enable padding to detect heap block overflow (only in debug mode)" ON)
option(MI_XMALLOC "Enable abort() call on memory allocation failure by default" OFF)
option(MI_SHOW_ERRORS "Show error and warning messages by default" OFF)
include("cmake/mimalloc-config-version.cmake")
@ -67,6 +69,7 @@ if(MI_OVERRIDE MATCHES "ON")
# use zone's on macOS
message(STATUS " Use malloc zone to override malloc (MI_OSX_ZONE=ON)")
list(APPEND mi_sources src/alloc-override-osx.c)
list(APPEND mi_defines MI_OSX_ZONE=1)
if(NOT MI_INTERPOSE MATCHES "ON")
message(STATUS " (enabling INTERPOSE as well since zone's require this)")
set(MI_INTERPOSE "ON")
@ -105,6 +108,16 @@ if(MI_PADDING MATCHES "OFF")
list(APPEND mi_defines MI_PADDING=0)
endif()
if(MI_XMALLOC MATCHES "ON")
message(STATUS "Enable abort() calls on memory allocation failure (MI_XMALLOC=ON)")
list(APPEND mi_defines MI_XMALLOC=1)
endif()
if(MI_SHOW_ERRORS MATCHES "ON")
message(STATUS "Enable printing of error and warning messages by default (MI_SHOW_ERRORS=ON)")
list(APPEND mi_defines MI_SHOW_ERRORS=1)
endif()
if(MI_USE_CXX MATCHES "ON")
message(STATUS "Use the C++ compiler to compile (MI_USE_CXX=ON)")
set_source_files_properties(${mi_sources} PROPERTIES LANGUAGE CXX )

View File

@ -40,8 +40,8 @@ jobs:
cd $(BuildType)
ctest
displayName: CTest
- upload: $(Build.SourcesDirectory)/$(BuildType)
artifact: mimalloc-windows-$(BuildType)
# - upload: $(Build.SourcesDirectory)/$(BuildType)
# artifact: mimalloc-windows-$(BuildType)
- job:
displayName: Linux
@ -99,8 +99,8 @@ jobs:
displayName: Make
- script: make test -C $(BuildType)
displayName: CTest
- upload: $(Build.SourcesDirectory)/$(BuildType)
artifact: mimalloc-ubuntu-$(BuildType)
# - upload: $(Build.SourcesDirectory)/$(BuildType)
# artifact: mimalloc-ubuntu-$(BuildType)
- job:
displayName: macOS
@ -127,5 +127,5 @@ jobs:
displayName: Make
- script: make test -C $(BuildType)
displayName: CTest
- upload: $(Build.SourcesDirectory)/$(BuildType)
artifact: mimalloc-macos-$(BuildType)
# - upload: $(Build.SourcesDirectory)/$(BuildType)
# artifact: mimalloc-macos-$(BuildType)

View File

@ -61,7 +61,6 @@ terms of the MIT license. A copy of the license can be found in the file
#define MI_ENCODE_FREELIST 1
#endif
// ------------------------------------------------------
// Platform specific values
// ------------------------------------------------------

View File

@ -101,7 +101,8 @@ extern "C" {
// -----------------------------------------------------------------------------------
// Debugging
// We declare two entry points for each allocation function:
// the normal one (`mi_malloc`) and one that takes a source argument (`dbg_mi_malloc`)
// the normal one (`mi_malloc`) and one that takes a source argument (`dbg_mi_malloc`).
// The source argument is stored in heap blocks to track allocations in debug mode.
// The following macros make it easier to specify this.
// Note: these are even defined in release mode (where the source argument is ignored)
// so one can still build a debug program that links with the release build of mimalloc.
@ -356,8 +357,7 @@ typedef enum mi_option_e {
mi_option_debug_extra_padding,
mi_option_os_tag,
mi_option_max_errors,
_mi_option_last,
mi_option_eager_page_commit = mi_option_eager_commit
_mi_option_last
} mi_option_t;

View File

@ -41,8 +41,11 @@ extern malloc_zone_t* malloc_default_purgeable_zone(void) __attribute__((weak_im
------------------------------------------------------ */
static size_t zone_size(malloc_zone_t* zone, const void* p) {
UNUSED(zone); UNUSED(p);
return 0; // as we cannot guarantee that `p` comes from us, just return 0
UNUSED(zone);
if (!mi_is_in_heap_region(p))
return 0; // not our pointer, bail out
return mi_usable_size(p);
}
static void* zone_malloc(malloc_zone_t* zone, size_t size) {

View File

@ -165,7 +165,7 @@ extern "C" {
void cfree(void* p) MI_FORWARD0(mi_free, p);
void* reallocf(void* p, size_t newsize) MI_FORWARD2(mi_reallocf,p,newsize);
size_t malloc_size(void* p) MI_FORWARD1(mi_usable_size, p);
size_t malloc_size(const void* p) MI_FORWARD1(mi_usable_size,p);
#if !defined(__ANDROID__)
size_t malloc_usable_size(void* p) MI_FORWARD1(mi_usable_size, p);
#else

View File

@ -36,6 +36,7 @@ of 256MiB in practice.
// os.c
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, mi_os_tld_t* tld);
void _mi_os_free_ex(void* p, size_t size, bool was_committed, mi_stats_t* stats);
void _mi_os_free(void* p, size_t size, mi_stats_t* stats);
void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize);
@ -213,13 +214,13 @@ void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_zero, siz
Arena free
----------------------------------------------------------- */
void _mi_arena_free(void* p, size_t size, size_t memid, mi_stats_t* stats) {
void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_stats_t* stats) {
mi_assert_internal(size > 0 && stats != NULL);
if (p==NULL) return;
if (size==0) return;
if (memid == MI_MEMID_OS) {
// was a direct OS allocation, pass through
_mi_os_free(p, size, stats);
_mi_os_free_ex(p, size, all_committed, stats);
}
else {
// allocated in an arena

View File

@ -51,7 +51,11 @@ typedef struct mi_option_desc_s {
static mi_option_desc_t options[_mi_option_last] =
{
// stable options
#if MI_DEBUG || defined(MI_SHOW_ERRORS)
{ 1, UNINIT, MI_OPTION(show_errors) },
#else
{ 0, UNINIT, MI_OPTION(show_errors) },
#endif
{ 0, UNINIT, MI_OPTION(show_stats) },
{ 0, UNINIT, MI_OPTION(verbose) },
@ -77,7 +81,7 @@ static mi_option_desc_t options[_mi_option_last] =
#endif
{ 100, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds
{ 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
{ 0, UNINIT, MI_OPTION(debug_extra_padding) }, // extra padding in bytes
{ 0, UNINIT, MI_OPTION(debug_extra_padding) }, // extra padding in bytes (on top of the standard end padding in debug mode)
{ 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose
{ 16, UNINIT, MI_OPTION(max_errors) } // maximum errors that are output
};
@ -261,13 +265,17 @@ static void mi_recurse_exit(void) {
}
void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message) {
if (!mi_recurse_enter()) return;
if (out==NULL || (FILE*)out==stdout || (FILE*)out==stderr) { // TODO: use mi_out_stderr for stderr?
if (!mi_recurse_enter()) return;
out = mi_out_get_default(&arg);
if (prefix != NULL) out(prefix, arg);
out(message, arg);
mi_recurse_exit();
}
else {
if (prefix != NULL) out(prefix, arg);
out(message, arg);
}
if (prefix != NULL) out(prefix,arg);
out(message,arg);
mi_recurse_exit();
}
// Define our own limited `fprintf` that avoids memory allocation.
@ -356,6 +364,11 @@ static void mi_error_default(int err) {
abort();
}
#endif
#if defined(MI_XMALLOC)
if (err==ENOMEM || err==EOVERFLOW) { // abort on memory allocation fails in xmalloc mode
abort();
}
#endif
}
void mi_register_error(mi_error_fun* fun, void* arg) {

View File

@ -618,7 +618,7 @@ static void mi_mprotect_hint(int err) {
}
// Commit/Decommit memory.
// Usuelly commit is aligned liberal, while decommit is aligned conservative.
// Usually commit is aligned liberal, while decommit is aligned conservative.
// (but not for the reset version where we want commit to be conservative as well)
static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservative, bool* is_zero, mi_stats_t* stats) {
// page align in the range, commit liberally, decommit conservative

View File

@ -570,7 +570,8 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld)
if (page->capacity >= page->reserved) return;
size_t page_size;
uint8_t* page_start = _mi_page_start(_mi_page_segment(page), page, &page_size);
//uint8_t* page_start =
_mi_page_start(_mi_page_segment(page), page, &page_size);
mi_stat_counter_increase(tld->stats.pages_extended, 1);
// calculate the extend count
@ -588,12 +589,6 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld)
mi_assert_internal(extend > 0 && extend + page->capacity <= page->reserved);
mi_assert_internal(extend < (1UL<<16));
// commit on-demand for large and huge pages?
if (_mi_page_segment(page)->page_kind >= MI_PAGE_LARGE && !mi_option_is_enabled(mi_option_eager_page_commit)) {
uint8_t* start = page_start + (page->capacity * bsize);
_mi_mem_commit(start, extend * bsize, NULL, &tld->os);
}
// and append the extend the free list
if (extend < MI_MIN_SLICES || MI_SECURE==0) { //!mi_option_is_enabled(mi_option_secure)) {
mi_page_free_list_extend(page, bsize, extend, &tld->stats );

View File

@ -49,7 +49,7 @@ bool _mi_os_reset(void* p, size_t size, mi_stats_t* stats);
bool _mi_os_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
// arena.c
void _mi_arena_free(void* p, size_t size, size_t memid, mi_stats_t* stats);
void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_stats_t* stats);
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
@ -187,7 +187,7 @@ static bool mi_region_try_alloc_os(size_t blocks, bool commit, bool allow_large,
const uintptr_t idx = mi_atomic_increment(&regions_count);
if (idx >= MI_REGION_MAX) {
mi_atomic_decrement(&regions_count);
_mi_arena_free(start, MI_REGION_SIZE, arena_memid, tld->stats);
_mi_arena_free(start, MI_REGION_SIZE, arena_memid, region_commit, tld->stats);
_mi_warning_message("maximum regions used: %zu GiB (perhaps recompile with a larger setting for MI_HEAP_REGION_MAX_SIZE)", _mi_divide_up(MI_HEAP_REGION_MAX_SIZE, GiB));
return false;
}
@ -205,6 +205,7 @@ static bool mi_region_try_alloc_os(size_t blocks, bool commit, bool allow_large,
// and share it
mi_region_info_t info;
info.value = 0; // initialize the full union to zero
info.x.valid = true;
info.x.is_large = region_large;
info.x.numa_node = (short)_mi_os_numa_node(tld);
@ -391,7 +392,7 @@ void _mi_mem_free(void* p, size_t size, size_t id, bool full_commit, bool any_re
mem_region_t* region;
if (mi_memid_is_arena(id,&region,&bit_idx,&arena_memid)) {
// was a direct arena allocation, pass through
_mi_arena_free(p, size, arena_memid, tld->stats);
_mi_arena_free(p, size, arena_memid, full_commit, tld->stats);
}
else {
// allocated in a region
@ -454,12 +455,13 @@ void _mi_mem_collect(mi_os_tld_t* tld) {
// on success, free the whole region
uint8_t* start = mi_atomic_read_ptr(uint8_t,&regions[i].start);
size_t arena_memid = mi_atomic_read_relaxed(&regions[i].arena_memid);
uintptr_t commit = mi_atomic_read_relaxed(&regions[i].commit);
memset(&regions[i], 0, sizeof(mem_region_t));
// and release the whole region
mi_atomic_write(&region->info, 0);
if (start != NULL) { // && !_mi_os_is_huge_reserved(start)) {
_mi_abandoned_await_readers(); // ensure no pending reads
_mi_arena_free(start, MI_REGION_SIZE, arena_memid, tld->stats);
_mi_arena_free(start, MI_REGION_SIZE, arena_memid, (~commit == 0), tld->stats);
}
}
}

View File

@ -204,9 +204,9 @@ static void mi_segment_protect(mi_segment_t* segment, bool protect, mi_os_tld_t*
mi_segment_protect_range((uint8_t*)segment + segment->segment_info_size - os_page_size, os_page_size, protect);
if (MI_SECURE <= 1 || segment->capacity == 1) {
// and protect the last (or only) page too
mi_assert_internal(segment->page_kind >= MI_PAGE_LARGE);
mi_assert_internal(MI_SECURE <= 1 || segment->page_kind >= MI_PAGE_LARGE);
uint8_t* start = (uint8_t*)segment + segment->segment_size - os_page_size;
if (protect && !mi_option_is_enabled(mi_option_eager_page_commit)) {
if (protect && !segment->mem_is_committed) {
// ensure secure page is committed
_mi_mem_commit(start, os_page_size, NULL, tld);
}
@ -236,12 +236,8 @@ static void mi_page_reset(mi_segment_t* segment, mi_page_t* page, size_t size, m
void* start = mi_segment_raw_page_start(segment, page, &psize);
page->is_reset = true;
mi_assert_internal(size <= psize);
size_t reset_size = (size == 0 || size > psize ? psize : size);
if (size == 0 && segment->page_kind >= MI_PAGE_LARGE && !mi_option_is_enabled(mi_option_eager_page_commit)) {
mi_assert_internal(page->xblock_size > 0);
reset_size = page->capacity * mi_page_block_size(page);
}
_mi_mem_reset(start, reset_size, tld->os);
size_t reset_size = ((size == 0 || size > psize) ? psize : size);
if (reset_size > 0) _mi_mem_reset(start, reset_size, tld->os);
}
static void mi_page_unreset(mi_segment_t* segment, mi_page_t* page, size_t size, mi_segments_tld_t* tld)
@ -253,12 +249,8 @@ static void mi_page_unreset(mi_segment_t* segment, mi_page_t* page, size_t size,
size_t psize;
uint8_t* start = mi_segment_raw_page_start(segment, page, &psize);
size_t unreset_size = (size == 0 || size > psize ? psize : size);
if (size == 0 && segment->page_kind >= MI_PAGE_LARGE && !mi_option_is_enabled(mi_option_eager_page_commit)) {
mi_assert_internal(page->xblock_size > 0);
unreset_size = page->capacity * mi_page_block_size(page);
}
bool is_zero = false;
_mi_mem_unreset(start, unreset_size, &is_zero, tld->os);
if (unreset_size > 0) _mi_mem_unreset(start, unreset_size, &is_zero, tld->os);
if (is_zero) page->is_zero_init = true;
}
@ -475,9 +467,6 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se
if (any_reset && mi_option_is_enabled(mi_option_reset_decommits)) {
fully_committed = false;
}
if (segment->page_kind >= MI_PAGE_LARGE && !mi_option_is_enabled(mi_option_eager_page_commit)) {
fully_committed = false;
}
_mi_mem_free(segment, segment_size, segment->memid, fully_committed, any_reset, tld->os);
}
@ -627,8 +616,13 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
if (!commit) {
// ensure the initial info is committed
bool commit_zero = false;
_mi_mem_commit(segment, pre_size, &commit_zero, tld->os);
bool ok = _mi_mem_commit(segment, pre_size, &commit_zero, tld->os);
if (commit_zero) is_zero = true;
if (!ok) {
// commit failed; we cannot touch the memory: free the segment directly and return `NULL`
_mi_mem_free(segment, MI_SEGMENT_SIZE, memid, false, false, os_tld);
return NULL;
}
}
segment->memid = memid;
segment->mem_is_fixed = mem_large;
@ -714,28 +708,28 @@ static bool mi_segment_has_free(const mi_segment_t* segment) {
return (segment->used < segment->capacity);
}
static void mi_segment_page_claim(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) {
static bool mi_segment_page_claim(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) {
mi_assert_internal(_mi_page_segment(page) == segment);
mi_assert_internal(!page->segment_in_use);
// set in-use before doing unreset to prevent delayed reset
mi_pages_reset_remove(page, tld);
page->segment_in_use = true;
segment->used++;
// check commit
if (!page->is_committed) {
mi_assert_internal(!segment->mem_is_fixed);
mi_assert_internal(!page->is_reset);
size_t psize;
uint8_t* start = mi_segment_raw_page_start(segment, page, &psize);
bool is_zero = false;
const size_t gsize = (MI_SECURE >= 2 ? _mi_os_page_size() : 0);
bool ok = _mi_mem_commit(start, psize + gsize, &is_zero, tld->os);
if (!ok) return false; // failed to commit!
if (gsize > 0) { mi_segment_protect_range(start + psize, gsize, true); }
if (is_zero) { page->is_zero_init = true; }
page->is_committed = true;
if (segment->page_kind < MI_PAGE_LARGE
|| !mi_option_is_enabled(mi_option_eager_page_commit)) {
size_t psize;
uint8_t* start = mi_segment_raw_page_start(segment, page, &psize);
bool is_zero = false;
const size_t gsize = (MI_SECURE >= 2 ? _mi_os_page_size() : 0);
_mi_mem_commit(start, psize + gsize, &is_zero, tld->os);
if (gsize > 0) { mi_segment_protect_range(start + psize, gsize, true); }
if (is_zero) { page->is_zero_init = true; }
}
}
// set in-use before doing unreset to prevent delayed reset
page->segment_in_use = true;
segment->used++;
// check reset
if (page->is_reset) {
mi_page_unreset(segment, page, 0, tld); // todo: only unreset the part that was reset?
}
@ -746,6 +740,7 @@ static void mi_segment_page_claim(mi_segment_t* segment, mi_page_t* page, mi_seg
mi_assert_internal(!mi_segment_has_free(segment));
mi_segment_remove_from_free_queue(segment, tld);
}
return true;
}
@ -1212,8 +1207,8 @@ static mi_page_t* mi_segment_find_free(mi_segment_t* segment, mi_segments_tld_t*
for (size_t i = 0; i < segment->capacity; i++) { // TODO: use a bitmap instead of search?
mi_page_t* page = &segment->pages[i];
if (!page->segment_in_use) {
mi_segment_page_claim(segment, page, tld);
return page;
bool ok = mi_segment_page_claim(segment, page, tld);
if (ok) return page;
}
}
mi_assert(false);

View File

@ -25,5 +25,8 @@ terms of the MIT license. A copy of the license can be found in the file
#include "alloc.c"
#include "alloc-aligned.c"
#include "alloc-posix.c"
#if MI_OSX_ZONE
#include "alloc-override-osx.c"
#endif
#include "init.c"
#include "options.c"

View File

@ -237,9 +237,51 @@ static void mi_stats_print_bins(mi_stat_count_t* all, const mi_stat_count_t* bin
#endif
//------------------------------------------------------------
// Use an output wrapper for line-buffered output
// (which is nice when using loggers etc.)
//------------------------------------------------------------
typedef struct buffered_s {
mi_output_fun* out; // original output function
void* arg; // and state
char* buf; // local buffer of at least size `count+1`
size_t used; // currently used chars `used <= count`
size_t count; // total chars available for output
} buffered_t;
static void mi_buffered_flush(buffered_t* buf) {
buf->buf[buf->used] = 0;
_mi_fputs(buf->out, buf->arg, NULL, buf->buf);
buf->used = 0;
}
static void mi_buffered_out(const char* msg, void* arg) {
buffered_t* buf = (buffered_t*)arg;
if (msg==NULL || buf==NULL) return;
for (const char* src = msg; *src != 0; src++) {
char c = *src;
if (buf->used >= buf->count) mi_buffered_flush(buf);
mi_assert_internal(buf->used < buf->count);
buf->buf[buf->used++] = c;
if (c == '\n') mi_buffered_flush(buf);
}
}
//------------------------------------------------------------
// Print statistics
//------------------------------------------------------------
static void mi_process_info(mi_msecs_t* utime, mi_msecs_t* stime, size_t* peak_rss, size_t* page_faults, size_t* page_reclaim, size_t* peak_commit);
static void _mi_stats_print(mi_stats_t* stats, mi_msecs_t elapsed, mi_output_fun* out, void* arg) mi_attr_noexcept {
static void _mi_stats_print(mi_stats_t* stats, mi_msecs_t elapsed, mi_output_fun* out0, void* arg0) mi_attr_noexcept {
// wrap the output function to be line buffered
char buf[256];
buffered_t buffer = { out0, arg0, buf, 0, 255 };
mi_output_fun* out = &mi_buffered_out;
void* arg = &buffer;
// and print using that
mi_print_header(out,arg);
#if MI_STAT>1
mi_stat_count_t normal = { 0,0,0,0 };

View File

@ -39,10 +39,12 @@ static size_t use_one_size = 0; // use single object size of `N * s
#ifdef USE_STD_MALLOC
#define custom_mallocn(n,s) malloc(n*s)
#define custom_calloc(n,s) calloc(n,s)
#define custom_realloc(p,s) realloc(p,s)
#define custom_free(p) free(p)
#else
#define custom_mallocn(n,s) mi_mallocn(n,s)
#define custom_calloc(n,s) mi_calloc(n,s)
#define custom_realloc(p,s) mi_realloc(p,s)
#define custom_free(p) mi_free(p)
@ -97,7 +99,7 @@ static void* alloc_items(size_t items, random_t r) {
if (items == 40) items++; // pthreads uses that size for stack increases
if (use_one_size > 0) items = (use_one_size / sizeof(uintptr_t));
if (items==0) items = 1;
uintptr_t* p = (uintptr_t*)custom_calloc(items,sizeof(uintptr_t));
uintptr_t* p = (uintptr_t*)custom_mallocn(items,sizeof(uintptr_t));
if (p != NULL) {
for (uintptr_t i = 0; i < items; i++) {
p[i] = (items - i) ^ cookie;
@ -242,14 +244,18 @@ int main(int argc, char** argv) {
// Run ITER full iterations where half the objects in the transfer buffer survive to the next round.
srand(0x7feb352d);
// mi_stats_reset();
#ifndef NDEBUG
mi_stats_reset();
#endif
#ifdef STRESS
test_stress();
#else
test_leak();
#endif
// mi_collect(true);
#ifndef NDEBUG
mi_collect(true);
mi_heap_check_leak(NULL,NULL,NULL);
#endif
mi_stats_print(NULL);
//bench_end_program();
return 0;