merge segment_init refactoring from dev

This commit is contained in:
Daan Leijen 2022-11-22 19:03:26 -08:00
commit 85b5fa11bc
5 changed files with 302 additions and 131 deletions

View File

@ -401,6 +401,9 @@ mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_n(size_t count, s
mi_decl_nodiscard mi_decl_export void* mi_new_realloc(void* p, size_t newsize) mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export void* mi_new_reallocn(void* p, size_t newcount, size_t size) mi_attr_alloc_size2(2, 3);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(2, 3);
#ifdef __cplusplus
}
#endif
@ -460,6 +463,124 @@ template<class T> struct mi_stl_allocator {
template<class T1,class T2> bool operator==(const mi_stl_allocator<T1>& , const mi_stl_allocator<T2>& ) mi_attr_noexcept { return true; }
template<class T1,class T2> bool operator!=(const mi_stl_allocator<T1>& , const mi_stl_allocator<T2>& ) mi_attr_noexcept { return false; }
#if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11
#include <memory> // std::shared_ptr
// STL allocator allocation in a specific heap
template<class T> struct mi_heap_stl_allocator {
typedef T value_type;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef value_type& reference;
typedef value_type const& const_reference;
typedef value_type* pointer;
typedef value_type const* const_pointer;
template <class U> struct rebind { typedef mi_heap_stl_allocator<U> other; };
mi_heap_stl_allocator() {
mi_heap_t* hp = mi_heap_new();
this->heap.reset(hp, heap_delete);
}
mi_heap_stl_allocator(mi_heap_t* hp) : heap(hp) { } /* will not delete or destroy the passed in heap */
mi_heap_stl_allocator(const mi_heap_stl_allocator& x) mi_attr_noexcept : heap(x.heap) { }
template<class U> mi_heap_stl_allocator(const mi_heap_stl_allocator<U>& x) mi_attr_noexcept : heap(x.heap) { }
mi_heap_stl_allocator select_on_container_copy_construction() const { return *this; }
void deallocate(T* p, size_type) { mi_free(p); }
#if (__cplusplus >= 201703L) // C++17
mi_decl_nodiscard T* allocate(size_type count) { return static_cast<T*>(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(T))); }
mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); }
#else
mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast<pointer>(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(value_type))); }
#endif
#if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11
using propagate_on_container_copy_assignment = std::true_type;
using propagate_on_container_move_assignment = std::true_type;
using propagate_on_container_swap = std::true_type;
using is_always_equal = std::false_type;
template <class U, class ...Args> void construct(U* p, Args&& ...args) { ::new(p) U(std::forward<Args>(args)...); }
template <class U> void destroy(U* p) mi_attr_noexcept { p->~U(); }
#else
void construct(pointer p, value_type const& val) { ::new(p) value_type(val); }
void destroy(pointer p) { p->~value_type(); }
#endif
size_type max_size() const mi_attr_noexcept { return (PTRDIFF_MAX / sizeof(value_type)); }
pointer address(reference x) const { return &x; }
const_pointer address(const_reference x) const { return &x; }
void collect(bool force) { mi_heap_collect(this->heap.get(), force); }
// protected:
std::shared_ptr<mi_heap_t> heap;
private:
static void heap_delete(mi_heap_t* hp) { if (hp != NULL) { mi_heap_delete(hp); } }
};
template<class T1, class T2> bool operator==(const mi_heap_stl_allocator<T1>& x, const mi_heap_stl_allocator<T2>& y) mi_attr_noexcept { return (x.heap == y.heap); }
template<class T1, class T2> bool operator!=(const mi_heap_stl_allocator<T1>& x, const mi_heap_stl_allocator<T2>& y) mi_attr_noexcept { return (x.heap != y.heap); }
// STL allocator allocation in a specific heap, where `free` does nothing and
// the heap is destroyed in one go on destruction -- use with care!
template<class T> struct mi_heap_destroy_stl_allocator {
typedef T value_type;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef value_type& reference;
typedef value_type const& const_reference;
typedef value_type* pointer;
typedef value_type const* const_pointer;
template <class U> struct rebind { typedef mi_heap_destroy_stl_allocator<U> other; };
mi_heap_destroy_stl_allocator() {
mi_heap_t* hp = mi_heap_new();
this->heap.reset(hp, heap_destroy);
}
mi_heap_destroy_stl_allocator(mi_heap_t* hp) : heap(hp) { } /* will not delete or destroy the passed-in heap; nor free any allocated objects it allocates in the heap! */
mi_heap_destroy_stl_allocator(const mi_heap_destroy_stl_allocator& x) mi_attr_noexcept : heap(x.heap) { }
template<class U> mi_heap_destroy_stl_allocator(const mi_heap_destroy_stl_allocator<U>& x) mi_attr_noexcept : heap(x.heap) { }
mi_heap_destroy_stl_allocator select_on_container_copy_construction() const { return *this; }
void deallocate(T* p, size_type) { /* do nothing as we destroy the heap on destruct. */ }
#if (__cplusplus >= 201703L) // C++17
mi_decl_nodiscard T* allocate(size_type count) { return static_cast<T*>(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(T))); }
mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); }
#else
mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast<pointer>(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(value_type))); }
#endif
#if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11
using propagate_on_container_copy_assignment = std::true_type;
using propagate_on_container_move_assignment = std::true_type;
using propagate_on_container_swap = std::true_type;
using is_always_equal = std::false_type;
template <class U, class ...Args> void construct(U* p, Args&& ...args) { ::new(p) U(std::forward<Args>(args)...); }
template <class U> void destroy(U* p) mi_attr_noexcept { p->~U(); }
#else
void construct(pointer p, value_type const& val) { ::new(p) value_type(val); }
void destroy(pointer p) { p->~value_type(); }
#endif
size_type max_size() const mi_attr_noexcept { return (PTRDIFF_MAX / sizeof(value_type)); }
pointer address(reference x) const { return &x; }
const_pointer address(const_reference x) const { return &x; }
// protected:
std::shared_ptr<mi_heap_t> heap;
private:
static void heap_destroy(mi_heap_t* hp) { if (hp != NULL) { mi_heap_destroy(hp); } }
};
template<class T1, class T2> bool operator==(const mi_heap_destroy_stl_allocator<T1>& x, const mi_heap_destroy_stl_allocator<T2>& y) mi_attr_noexcept { return (x.heap == y.heap); }
template<class T1, class T2> bool operator!=(const mi_heap_destroy_stl_allocator<T1>& x, const mi_heap_destroy_stl_allocator<T2>& y) mi_attr_noexcept { return (x.heap != y.heap); }
#endif // C++11
#endif // __cplusplus
#endif

View File

@ -27,6 +27,8 @@ It also has an easy way to override the default allocator in [Windows](#override
to integrate and adapt in other projects. For runtime systems it
provides hooks for a monotonic _heartbeat_ and deferred freeing (for
bounded worst-case times with reference counting).
Partly due to its simplicity, mimalloc has been ported to many systems (Windows, macOS,
Linux, WASM, various BSD's, Haiku, MUSL, etc) and has excellent support for dynamic overriding.
- __free list sharding__: instead of one big free list (per size class) we have
many smaller lists per "mimalloc page" which reduces fragmentation and
increases locality --
@ -42,7 +44,7 @@ It also has an easy way to override the default allocator in [Windows](#override
similar to randomized algorithms like skip lists where adding
a random oracle removes the need for a more complex algorithm.
- __eager page reset__: when a "page" becomes empty (with increased chance
due to free list sharding) the memory is marked to the OS as unused ("reset" or "purged")
due to free list sharding) the memory is marked to the OS as unused (reset or decommitted)
reducing (real) memory pressure and fragmentation, especially in long running
programs.
- __secure__: _mimalloc_ can be built in secure mode, adding guard pages,
@ -52,13 +54,12 @@ It also has an easy way to override the default allocator in [Windows](#override
- __first-class heaps__: efficiently create and use multiple heaps to allocate across different regions.
A heap can be destroyed at once instead of deallocating each object separately.
- __bounded__: it does not suffer from _blowup_ \[1\], has bounded worst-case allocation
times (_wcat_), bounded space overhead (~0.2% meta-data, with low internal fragmentation),
and has no internal points of contention using only atomic operations.
times (_wcat_) (upto OS primitives), bounded space overhead (~0.2% meta-data, with low
internal fragmentation), and has no internal points of contention using only atomic operations.
- __fast__: In our benchmarks (see [below](#performance)),
_mimalloc_ outperforms other leading allocators (_jemalloc_, _tcmalloc_, _Hoard_, etc),
and often uses less memory. A nice property
is that it does consistently well over a wide range of benchmarks. There is also good huge OS page
support for larger server programs.
and often uses less memory. A nice property is that it does consistently well over a wide range
of benchmarks. There is also good huge OS page support for larger server programs.
The [documentation](https://microsoft.github.io/mimalloc) gives a full overview of the API.
You can read more on the design of _mimalloc_ in the [technical report](https://www.microsoft.com/en-us/research/publication/mimalloc-free-list-sharding-in-action) which also has detailed benchmark results.

View File

@ -91,7 +91,10 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
mi_assert(heap != NULL);
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
#if MI_DEBUG
const uintptr_t tid = _mi_thread_id();
mi_assert(heap->thread_id == 0 || heap->thread_id == tid); // heaps are thread local
#endif
mi_assert(size <= MI_SMALL_SIZE_MAX);
#if (MI_PADDING)
if (size == 0) {
@ -916,20 +919,46 @@ static bool mi_try_new_handler(bool nothrow) {
}
#endif
static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow ) {
static mi_decl_noinline void* mi_heap_try_new(mi_heap_t* heap, size_t size, bool nothrow ) {
void* p = NULL;
while(p == NULL && mi_try_new_handler(nothrow)) {
p = mi_malloc(size);
p = mi_heap_malloc(heap,size);
}
return p;
}
mi_decl_nodiscard mi_decl_restrict void* mi_new(size_t size) {
void* p = mi_malloc(size);
if mi_unlikely(p == NULL) return mi_try_new(size,false);
static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow) {
return mi_heap_try_new(mi_get_default_heap(), size, nothrow);
}
mi_decl_nodiscard mi_decl_restrict inline void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) {
void* p = mi_heap_malloc(heap,size);
if mi_unlikely(p == NULL) return mi_heap_try_new(heap, size, false);
return p;
}
mi_decl_nodiscard mi_decl_restrict void* mi_new(size_t size) {
return mi_heap_alloc_new(mi_get_default_heap(), size);
}
mi_decl_nodiscard mi_decl_restrict inline void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) {
size_t total;
if mi_unlikely(mi_count_size_overflow(count, size, &total)) {
mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
return NULL;
}
else {
return mi_heap_alloc_new(heap,total);
}
}
mi_decl_nodiscard mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
return mi_heap_alloc_new_n(mi_get_default_heap(), size, count);
}
mi_decl_nodiscard mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept {
void* p = mi_malloc(size);
if mi_unlikely(p == NULL) return mi_try_new(size, true);
@ -954,17 +983,6 @@ mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, siz
return p;
}
mi_decl_nodiscard mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
size_t total;
if mi_unlikely(mi_count_size_overflow(count, size, &total)) {
mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
return NULL;
}
else {
return mi_new(total);
}
}
mi_decl_nodiscard void* mi_new_realloc(void* p, size_t newsize) {
void* q;
do {

View File

@ -771,11 +771,81 @@ static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_aren
Segment allocation
----------------------------------------------------------- */
static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment, bool eager_delay, mi_arena_id_t req_arena_id,
size_t* psegment_slices, size_t* ppre_size, size_t* pinfo_slices,
mi_commit_mask_t* pcommit_mask, mi_commit_mask_t* pdecommit_mask,
bool* is_zero, bool* pcommit, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
{
// Allocate the segment from the OS
bool mem_large = (!eager_delay && (MI_SECURE==0)); // only allow large OS pages once we are no longer lazy
bool is_pinned = false;
size_t memid = 0;
size_t align_offset = 0;
size_t alignment = MI_SEGMENT_ALIGN;
if (page_alignment > 0) {
// mi_assert_internal(huge_page != NULL);
mi_assert_internal(page_alignment >= MI_SEGMENT_ALIGN);
alignment = page_alignment;
const size_t info_size = (*pinfo_slices) * MI_SEGMENT_SLICE_SIZE;
align_offset = _mi_align_up( info_size, MI_SEGMENT_ALIGN );
const size_t extra = align_offset - info_size;
// recalculate due to potential guard pages
*psegment_slices = mi_segment_calculate_slices(required + extra, ppre_size, pinfo_slices);
//segment_size += _mi_align_up(align_offset - info_size, MI_SEGMENT_SLICE_SIZE);
//segment_slices = segment_size / MI_SEGMENT_SLICE_SIZE;
}
const size_t segment_size = (*psegment_slices) * MI_SEGMENT_SLICE_SIZE;
mi_segment_t* segment = NULL;
// get from cache?
if (page_alignment == 0) {
segment = (mi_segment_t*)_mi_segment_cache_pop(segment_size, pcommit_mask, pdecommit_mask, &mem_large, &is_pinned, is_zero, req_arena_id, &memid, os_tld);
}
// get from OS
if (segment==NULL) {
segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, alignment, align_offset, pcommit, &mem_large, &is_pinned, is_zero, req_arena_id, &memid, os_tld);
if (segment == NULL) return NULL; // failed to allocate
if (*pcommit) {
mi_commit_mask_create_full(pcommit_mask);
}
else {
mi_commit_mask_create_empty(pcommit_mask);
}
}
mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);
const size_t commit_needed = _mi_divide_up((*pinfo_slices)*MI_SEGMENT_SLICE_SIZE, MI_COMMIT_SIZE);
mi_assert_internal(commit_needed>0);
mi_commit_mask_t commit_needed_mask;
mi_commit_mask_create(0, commit_needed, &commit_needed_mask);
if (!mi_commit_mask_all_set(pcommit_mask, &commit_needed_mask)) {
// at least commit the info slices
mi_assert_internal(commit_needed*MI_COMMIT_SIZE >= (*pinfo_slices)*MI_SEGMENT_SLICE_SIZE);
bool ok = _mi_os_commit(segment, commit_needed*MI_COMMIT_SIZE, is_zero, tld->stats);
if (!ok) return NULL; // failed to commit
mi_commit_mask_set(pcommit_mask, &commit_needed_mask);
}
mi_track_mem_undefined(segment,commit_needed);
segment->memid = memid;
segment->mem_is_pinned = is_pinned;
segment->mem_is_large = mem_large;
segment->mem_is_committed = mi_commit_mask_is_full(pcommit_mask);
segment->mem_alignment = alignment;
segment->mem_align_offset = align_offset;
mi_segments_track_size((long)(segment_size), tld);
_mi_segment_map_allocated_at(segment);
return segment;
}
// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page)
static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page)
{
mi_assert_internal((required==0 && huge_page==NULL) || (required>0 && huge_page != NULL));
mi_assert_internal((segment==NULL) || (segment!=NULL && required==0));
// calculate needed sizes first
size_t info_slices;
size_t pre_size;
@ -786,114 +856,42 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, siz
_mi_current_thread_count() > 1 && // do not delay for the first N threads
tld->count < (size_t)mi_option_get(mi_option_eager_commit_delay));
const bool eager = !eager_delay && mi_option_is_enabled(mi_option_eager_commit);
bool commit = eager || (required > 0);
// Try to get from our cache first
bool is_zero = false;
const bool commit_info_still_good = (segment != NULL);
bool commit = eager || (required > 0);
bool is_zero = false;
mi_commit_mask_t commit_mask;
mi_commit_mask_t decommit_mask;
if (segment != NULL) {
commit_mask = segment->commit_mask;
decommit_mask = segment->decommit_mask;
}
else {
mi_commit_mask_create_empty(&commit_mask);
mi_commit_mask_create_empty(&decommit_mask);
}
if (segment==NULL) {
// Allocate the segment from the OS
bool mem_large = (!eager_delay && (MI_SECURE==0)); // only allow large OS pages once we are no longer lazy
bool is_pinned = false;
size_t memid = 0;
size_t align_offset = 0;
size_t alignment = MI_SEGMENT_ALIGN;
if (page_alignment > 0) {
mi_assert_internal(huge_page != NULL);
mi_assert_internal(page_alignment >= MI_SEGMENT_ALIGN);
alignment = page_alignment;
const size_t info_size = info_slices * MI_SEGMENT_SLICE_SIZE;
align_offset = _mi_align_up( info_size, MI_SEGMENT_ALIGN );
const size_t extra = align_offset - info_size;
// recalculate due to potential guard pages
segment_slices = mi_segment_calculate_slices(required + extra, &pre_size, &info_slices);
//segment_size += _mi_align_up(align_offset - info_size, MI_SEGMENT_SLICE_SIZE);
//segment_slices = segment_size / MI_SEGMENT_SLICE_SIZE;
}
const size_t segment_size = segment_slices * MI_SEGMENT_SLICE_SIZE;
mi_commit_mask_create_empty(&commit_mask);
mi_commit_mask_create_empty(&decommit_mask);
// get from cache
if (page_alignment == 0) {
segment = (mi_segment_t*)_mi_segment_cache_pop(segment_size, &commit_mask, &decommit_mask, &mem_large, &is_pinned, &is_zero, req_arena_id, &memid, os_tld);
}
// get from OS
if (segment==NULL) {
segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, alignment, align_offset, &commit, &mem_large, &is_pinned, &is_zero, req_arena_id, &memid, os_tld);
if (segment == NULL) return NULL; // failed to allocate
if (commit) {
mi_commit_mask_create_full(&commit_mask);
}
else {
mi_commit_mask_create_empty(&commit_mask);
}
}
mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);
const size_t commit_needed = _mi_divide_up(info_slices*MI_SEGMENT_SLICE_SIZE, MI_COMMIT_SIZE);
mi_assert_internal(commit_needed>0);
mi_commit_mask_t commit_needed_mask;
mi_commit_mask_create(0, commit_needed, &commit_needed_mask);
if (!mi_commit_mask_all_set(&commit_mask, &commit_needed_mask)) {
// at least commit the info slices
mi_assert_internal(commit_needed*MI_COMMIT_SIZE >= info_slices*MI_SEGMENT_SLICE_SIZE);
bool ok = _mi_os_commit(segment, commit_needed*MI_COMMIT_SIZE, &is_zero, tld->stats);
if (!ok) return NULL; // failed to commit
mi_commit_mask_set(&commit_mask, &commit_needed_mask);
}
mi_track_mem_undefined(segment,commit_needed);
segment->memid = memid;
segment->mem_is_pinned = is_pinned;
segment->mem_is_large = mem_large;
segment->mem_is_committed = mi_commit_mask_is_full(&commit_mask);
segment->mem_alignment = alignment;
segment->mem_align_offset = align_offset;
mi_segments_track_size((long)(segment_size), tld);
_mi_segment_map_allocated_at(segment);
}
// zero the segment info? -- not always needed as it is zero initialized from the OS
// Allocate the segment from the OS
mi_segment_t* segment = mi_segment_os_alloc(required, page_alignment, eager_delay, req_arena_id,
&segment_slices, &pre_size, &info_slices, &commit_mask, &decommit_mask,
&is_zero, &commit, tld, os_tld);
if (segment == NULL) return NULL;
// zero the segment info? -- not always needed as it may be zero initialized from the OS
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); // tsan
if (!is_zero) {
ptrdiff_t ofs = offsetof(mi_segment_t, next);
size_t prefix = offsetof(mi_segment_t, slices) - ofs;
memset((uint8_t*)segment+ofs, 0, prefix + sizeof(mi_slice_t)*(segment_slices+1)); // one more
}
if (!commit_info_still_good) {
segment->commit_mask = commit_mask; // on lazy commit, the initial part is always committed
segment->allow_decommit = (mi_option_is_enabled(mi_option_allow_decommit) && !segment->mem_is_pinned && !segment->mem_is_large);
if (segment->allow_decommit) {
segment->decommit_expire = _mi_clock_now() + mi_option_get(mi_option_decommit_delay);
segment->decommit_mask = decommit_mask;
mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->decommit_mask));
#if MI_DEBUG>2
const size_t commit_needed = _mi_divide_up(info_slices*MI_SEGMENT_SLICE_SIZE, MI_COMMIT_SIZE);
mi_commit_mask_t commit_needed_mask;
mi_commit_mask_create(0, commit_needed, &commit_needed_mask);
mi_assert_internal(!mi_commit_mask_any_set(&segment->decommit_mask, &commit_needed_mask));
#endif
}
else {
mi_assert_internal(mi_commit_mask_is_empty(&decommit_mask));
segment->decommit_expire = 0;
mi_commit_mask_create_empty( &segment->decommit_mask );
mi_assert_internal(mi_commit_mask_is_empty(&segment->decommit_mask));
}
}
segment->commit_mask = commit_mask; // on lazy commit, the initial part is always committed
segment->allow_decommit = (mi_option_is_enabled(mi_option_allow_decommit) && !segment->mem_is_pinned && !segment->mem_is_large);
if (segment->allow_decommit) {
segment->decommit_expire = _mi_clock_now() + mi_option_get(mi_option_decommit_delay);
segment->decommit_mask = decommit_mask;
mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->decommit_mask));
#if MI_DEBUG>2
const size_t commit_needed = _mi_divide_up(info_slices*MI_SEGMENT_SLICE_SIZE, MI_COMMIT_SIZE);
mi_commit_mask_t commit_needed_mask;
mi_commit_mask_create(0, commit_needed, &commit_needed_mask);
mi_assert_internal(!mi_commit_mask_any_set(&segment->decommit_mask, &commit_needed_mask));
#endif
}
// initialize segment info
const size_t slice_entries = (segment_slices > MI_SLICES_PER_SEGMENT ? MI_SLICES_PER_SEGMENT : segment_slices);
segment->segment_slices = segment_slices;
@ -945,12 +943,6 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, siz
}
// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page) {
return mi_segment_init(NULL, required, page_alignment, req_arena_id, tld, os_tld, huge_page);
}
static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) {
MI_UNUSED(force);
mi_assert_internal(segment != NULL);
@ -1532,7 +1524,6 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment,
mi_assert_internal(segment->used==1);
mi_assert_internal(mi_page_block_size(page) >= size);
segment->thread_id = 0; // huge segments are immediately abandoned
#if MI_DEBUG > 3
if (page_alignment > 0) {
size_t psize;
uint8_t* p = _mi_segment_page_start(segment, page, &psize);
@ -1546,7 +1537,6 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment,
_mi_os_decommit(decommit_start, decommit_size, os_tld->stats);
}
}
#endif
// for huge pages we initialize the xblock_size as we may
// overallocate to accommodate large alignments.
size_t psize;

View File

@ -38,6 +38,8 @@ static void tsan_numa_test(); // issue #414
static void strdup_test(); // issue #445
static void bench_alloc_large(void); // issue #xxx
static void test_stl_allocators();
int main() {
mi_stats_reset(); // ignore earlier allocations
@ -50,6 +52,8 @@ int main() {
tsan_numa_test();
strdup_test();
test_stl_allocators();
test_mt_shutdown();
//fail_aslr();
bench_alloc_large();
@ -127,6 +131,43 @@ static bool test_stl_allocator2() {
return vec.size() == 0;
}
static bool test_stl_allocator3() {
std::vector<int, mi_heap_stl_allocator<int> > vec;
vec.push_back(1);
vec.pop_back();
return vec.size() == 0;
}
static bool test_stl_allocator4() {
std::vector<some_struct, mi_heap_stl_allocator<some_struct> > vec;
vec.push_back(some_struct());
vec.pop_back();
return vec.size() == 0;
}
static bool test_stl_allocator5() {
std::vector<int, mi_heap_destroy_stl_allocator<int> > vec;
vec.push_back(1);
vec.pop_back();
return vec.size() == 0;
}
static bool test_stl_allocator6() {
std::vector<some_struct, mi_heap_destroy_stl_allocator<some_struct> > vec;
vec.push_back(some_struct());
vec.pop_back();
return vec.size() == 0;
}
static void test_stl_allocators() {
test_stl_allocator1();
test_stl_allocator2();
test_stl_allocator3();
test_stl_allocator4();
test_stl_allocator5();
test_stl_allocator6();
}
// issue 445
static void strdup_test() {
#ifdef _MSC_VER