Merge branch 'dev-slice' into dev-slice-reset

This commit is contained in:
daanx 2023-04-03 12:48:30 -07:00
commit e96af1dba0
9 changed files with 17 additions and 17 deletions

View File

@ -1,6 +1,6 @@
set(mi_version_major 2)
set(mi_version_minor 1)
set(mi_version_patch 1)
set(mi_version_patch 2)
set(mi_version ${mi_version_major}.${mi_version_minor})
set(PACKAGE_VERSION ${mi_version})

View File

@ -8,7 +8,7 @@ terms of the MIT license. A copy of the license can be found in the file
#ifndef MIMALLOC_H
#define MIMALLOC_H
#define MI_MALLOC_VERSION 211 // major + 2 digits minor
#define MI_MALLOC_VERSION 212 // major + 2 digits minor
// ------------------------------------------------------
// Compiler specific attributes

View File

@ -12,8 +12,8 @@ is a general purpose allocator with excellent [performance](#performance) charac
Initially developed by Daan Leijen for the runtime systems of the
[Koka](https://koka-lang.github.io) and [Lean](https://github.com/leanprover/lean) languages.
Latest release tag: `v2.1.0` (2023-03-29).
Latest stable tag: `v1.8.0` (2023-03-29).
Latest release tag: `v2.1.1` (2023-04-03).
Latest stable tag: `v1.8.1` (2023-04-03).
mimalloc is a drop-in replacement for `malloc` and can be used in other programs
without code changes, for example, on dynamically linked ELF-based systems (Linux, BSD, etc.) you can use it as:
@ -78,6 +78,8 @@ Note: the `v2.x` version has a new algorithm for managing internal mimalloc page
and fragmentation compared to mimalloc `v1.x` (especially for large workloads). Should otherwise have similar performance
(see [below](#performance)); please report if you observe any significant performance regression.
* 2023-04-03, `v1.8.1`, `v2.1.1`: Fixes build issues on some platforms.
* 2023-03-29, `v1.8.0`, `v2.1.0`: Improved support dynamic overriding on Windows 11. Improved tracing precision
with [asan](#asan) and [Valgrind](#valgrind), and added Windows event tracing [ETW](#ETW) (contributed by Xinglong He). Created an OS
abstraction layer to make it easier to port and separate platform dependent code (in `src/prim`). Fixed C++ STL compilation on older Microsoft C++ compilers, and various small bug fixes.

View File

@ -75,8 +75,8 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE));
#if (MI_DEBUG>=2)
mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta));
mi_track_mem_defined(padding,sizeof(mi_padding_t)); // note: re-enable since mi_page_usable_block_size may set noaccess
#endif
mi_track_mem_defined(padding,sizeof(mi_padding_t)); // note: re-enable since mi_page_usable_block_size may set noaccess
padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys));
padding->delta = (uint32_t)(delta);
#if MI_PADDING_CHECK
@ -401,8 +401,8 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
#endif
}
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading
if (segment->kind != MI_SEGMENT_HUGE) { // not for huge segments as we just reset the content
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading
if (segment->kind != MI_SEGMENT_HUGE) { // not for huge segments as we just reset the content
memset(block, MI_DEBUG_FREED, mi_usable_size(block));
}
#endif
@ -455,7 +455,7 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block
// owning thread can free a block directly
if mi_unlikely(mi_check_is_double_free(page, block)) return;
mi_check_padding(page, block);
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED && !MI_TSAN
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
if (!mi_page_is_huge(page)) { // huge page content may be already decommitted
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
}
@ -551,7 +551,7 @@ void mi_free(void* p) mi_attr_noexcept
if mi_unlikely(mi_check_is_double_free(page, block)) return;
mi_check_padding(page, block);
mi_stat_free(page, block);
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED && !MI_TSAN
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
#endif
mi_track_free_size(p, mi_page_usable_size_of(page,block)); // faster then mi_usable_size as we already know the page and that p is unaligned

View File

@ -219,7 +219,7 @@ static mi_decl_noinline void* mi_arena_allocate(int numa_node, size_t size, size
bool* is_pinned, bool* is_zero,
mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld )
{
MI_UNUSED_RELEASE(alignment);
MI_UNUSED(alignment);
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
const size_t bcount = mi_block_count_of_size(size);

View File

@ -314,7 +314,7 @@ bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitm
// Helper for masks across fields; returns the mid count, post_mask may be 0
static size_t mi_bitmap_mask_across(mi_bitmap_index_t bitmap_idx, size_t bitmap_fields, size_t count, size_t* pre_mask, size_t* mid_mask, size_t* post_mask) {
MI_UNUSED_RELEASE(bitmap_fields);
MI_UNUSED(bitmap_fields);
const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
if mi_likely(bitidx + count <= MI_BITMAP_FIELD_BITS) {
*pre_mask = mi_bitmap_mask_(count, bitidx);

View File

@ -301,7 +301,7 @@ static void mi_recurse_exit(void) {
}
void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message) {
if (out==NULL || (FILE*)out==stdout || (FILE*)out==stderr) { // TODO: use mi_out_stderr for stderr?
if (out==NULL || (void*)out==(void*)stdout || (void*)out==(void*)stderr) { // TODO: use mi_out_stderr for stderr?
if (!mi_recurse_enter()) return;
out = mi_out_get_default(&arg);
if (prefix != NULL) out(prefix, arg);

View File

@ -403,10 +403,8 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats)
else _mi_stat_decrease(&stats->reset, csize);
if (!reset) return true; // nothing to do on unreset!
#if (MI_DEBUG>1) && !MI_TRACK_ENABLED // && !MI_TSAN
if (MI_SECURE==0) {
memset(start, 0, csize); // pretend it is eagerly reset
}
#if (MI_DEBUG>1) && !MI_SECURE && !MI_TRACK_ENABLED // && !MI_TSAN
memset(start, 0, csize); // pretend it is eagerly reset
#endif
int err = _mi_prim_reset(start, csize);

View File

@ -383,7 +383,7 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE);
mi_page_set_heap(page, NULL);
#if MI_DEBUG>1
#if (MI_DEBUG>1) && !MI_TRACK_ENABLED
// check there are no references left..
for (mi_block_t* block = (mi_block_t*)pheap->thread_delayed_free; block != NULL; block = mi_block_nextx(pheap, block, pheap->keys)) {
mi_assert_internal(_mi_ptr_page(block) != page);