diff --git a/doc/doxyfile b/doc/doxyfile index 55cae8bf..d03a70f5 100644 --- a/doc/doxyfile +++ b/doc/doxyfile @@ -466,7 +466,7 @@ LOOKUP_CACHE_SIZE = 0 # than 0 to get more control over the balance between CPU load and processing # speed. At this moment only the input processing can be done using multiple # threads. Since this is still an experimental feature the default is set to 1, -# which efficively disables parallel processing. Please report any issues you +# which effectively disables parallel processing. Please report any issues you # encounter. Generating dot graphs in parallel is controlled by the # DOT_NUM_THREADS setting. # Minimum value: 0, maximum value: 32, default value: 1. diff --git a/doc/mimalloc-doc.h b/doc/mimalloc-doc.h index 9faaa0e7..d79eb2f8 100644 --- a/doc/mimalloc-doc.h +++ b/doc/mimalloc-doc.h @@ -441,7 +441,7 @@ bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_la /// @param pages The number of 1GiB pages to reserve. /// @param numa_nodes The number of nodes do evenly divide the pages over, or 0 for using the actual number of NUMA nodes. /// @param timeout_msecs Maximum number of milli-seconds to try reserving, or 0 for no timeout. -/// @returns 0 if successfull, \a ENOMEM if running out of memory, or \a ETIMEDOUT if timed out. +/// @returns 0 if successful, \a ENOMEM if running out of memory, or \a ETIMEDOUT if timed out. /// /// The reserved memory is used by mimalloc to satisfy allocations. /// May quit before \a timeout_msecs are expired if it estimates it will take more than @@ -455,7 +455,7 @@ int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t /// @param pages The number of 1GiB pages to reserve. /// @param numa_node The NUMA node where the memory is reserved (start at 0). /// @param timeout_msecs Maximum number of milli-seconds to try reserving, or 0 for no timeout. -/// @returns 0 if successfull, \a ENOMEM if running out of memory, or \a ETIMEDOUT if timed out. +/// @returns 0 if successful, \a ENOMEM if running out of memory, or \a ETIMEDOUT if timed out. /// /// The reserved memory is used by mimalloc to satisfy allocations. /// May quit before \a timeout_msecs are expired if it estimates it will take more than @@ -468,7 +468,7 @@ int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msec /// Is the C runtime \a malloc API redirected? /// @returns \a true if all malloc API calls are redirected to mimalloc. /// -/// Currenty only used on Windows. +/// Currently only used on Windows. bool mi_is_redirected(); /// Return process information (time and memory usage). @@ -558,7 +558,7 @@ mi_heap_t* mi_heap_new(); /// Delete a previously allocated heap. /// This will release resources and migrate any -/// still allocated blocks in this heap (efficienty) +/// still allocated blocks in this heap (efficiently) /// to the default heap. /// /// If \a heap is the default heap, the default @@ -888,7 +888,7 @@ void mi_free_aligned(void* p, size_t alignment); /// /// Note: use the `mimalloc-new-delete.h` header to override the \a new /// and \a delete operators globally. The wrappers here are mostly -/// for convience for library writers that need to interface with +/// for convenience for library writers that need to interface with /// mimalloc from C++. /// /// \{ diff --git a/docs/bench.html b/docs/bench.html index d54f5fd6..213ff24b 100644 --- a/docs/bench.html +++ b/docs/bench.html @@ -100,7 +100,7 @@ $(document).ready(function(){initNavTree('bench.html',''); initResizable(); });

We tested mimalloc against many other top allocators over a wide range of benchmarks, ranging from various real world programs to synthetic benchmarks that see how the allocator behaves under more extreme circumstances.

In our benchmarks, mimalloc always outperforms all other leading allocators (jemalloc, tcmalloc, Hoard, etc) (Jan 2021), and usually uses less memory (up to 25% more in the worst case). A nice property is that it does consistently well over the wide range of benchmarks.

-

See the Performance section in the mimalloc repository for benchmark results, or the the technical report for detailed benchmark results.

+

See the Performance section in the mimalloc repository for benchmark results, or the technical report for detailed benchmark results.

diff --git a/include/mimalloc/atomic.h b/include/mimalloc/atomic.h index f4bde7f4..807c4da8 100644 --- a/include/mimalloc/atomic.h +++ b/include/mimalloc/atomic.h @@ -133,7 +133,9 @@ static inline void mi_atomic_maxi64_relaxed(volatile int64_t* p, int64_t x) { #elif defined(_MSC_VER) // MSVC C compilation wrapper that uses Interlocked operations to model C11 atomics. +#ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN +#endif #include #include #ifdef _WIN64 @@ -327,7 +329,9 @@ static inline void mi_atomic_yield(void) { std::this_thread::yield(); } #elif defined(_WIN32) +#ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN +#endif #include static inline void mi_atomic_yield(void) { YieldProcessor(); diff --git a/include/mimalloc/prim.h b/include/mimalloc/prim.h index ebb31df2..f8a40323 100644 --- a/include/mimalloc/prim.h +++ b/include/mimalloc/prim.h @@ -222,7 +222,9 @@ static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept; // Get a unique id for the current thread. #if defined(_WIN32) +#ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN +#endif #include static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { // Windows: works on Intel and ARM in both 32- and 64-bit diff --git a/include/mimalloc/track.h b/include/mimalloc/track.h index 9545f750..a659d940 100644 --- a/include/mimalloc/track.h +++ b/include/mimalloc/track.h @@ -82,7 +82,9 @@ defined, undefined, or not accessible at all: #define MI_TRACK_HEAP_DESTROY 1 #define MI_TRACK_TOOL "ETW" +#ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN +#endif #include #include "../src/prim/windows/etw.h" diff --git a/readme.md b/readme.md index 2772bcb7..4dea1086 100644 --- a/readme.md +++ b/readme.md @@ -91,7 +91,7 @@ Note: the `v2.x` version has a new algorithm for managing internal mimalloc page abstraction layer to make it easier to port and separate platform dependent code (in `src/prim`). Fixed C++ STL compilation on older Microsoft C++ compilers, and various small bug fixes. * 2022-12-23, `v1.7.9`, `v2.0.9`: Supports building with [asan](#asan) and improved [Valgrind](#valgrind) support. - Support abitrary large alignments (in particular for `std::pmr` pools). + Support arbitrary large alignments (in particular for `std::pmr` pools). Added C++ STL allocators attached to a specific heap (thanks @vmarkovtsev). Heap walks now visit all object (including huge objects). Support Windows nano server containers (by Johannes Schindelin,@dscho). Various small bug fixes. @@ -224,7 +224,7 @@ target_link_libraries(myapp PUBLIC mimalloc-static) to link with the static library. See `test\CMakeLists.txt` for an example. For best performance in C++ programs, it is also recommended to override the -global `new` and `delete` operators. For convience, mimalloc provides +global `new` and `delete` operators. For convenience, mimalloc provides [`mimalloc-new-delete.h`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc-new-delete.h) which does this for you -- just include it in a single(!) source file in your project. In C++, mimalloc also provides the `mi_stl_allocator` struct which implements the `std::allocator` interface. diff --git a/src/arena.c b/src/arena.c index db94bd1e..9934ec28 100644 --- a/src/arena.c +++ b/src/arena.c @@ -633,12 +633,12 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi // checks if (arena == NULL) { - _mi_error_message(EINVAL, "trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid); + _mi_error_message(EINVAL, "trying to free from an invalid arena: %p, size %zu, memid: 0x%zx\n", p, size, memid); return; } mi_assert_internal(arena->field_count > mi_bitmap_index_field(bitmap_idx)); if (arena->field_count <= mi_bitmap_index_field(bitmap_idx)) { - _mi_error_message(EINVAL, "trying to free from non-existent arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid); + _mi_error_message(EINVAL, "trying to free from an invalid arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid); return; } diff --git a/src/bitmap.c b/src/bitmap.c index 67953abb..4b6be66b 100644 --- a/src/bitmap.c +++ b/src/bitmap.c @@ -7,7 +7,7 @@ terms of the MIT license. A copy of the license can be found in the file /* ---------------------------------------------------------------------------- Concurrent bitmap that can set/reset sequences of bits atomically, -represeted as an array of fields where each field is a machine word (`size_t`) +represented as an array of fields where each field is a machine word (`size_t`) There are two api's; the standard one cannot have sequences that cross between the bitmap fields (and a sequence must be <= MI_BITMAP_FIELD_BITS). diff --git a/src/bitmap.h b/src/bitmap.h index d01c15be..d8316b83 100644 --- a/src/bitmap.h +++ b/src/bitmap.h @@ -7,7 +7,7 @@ terms of the MIT license. A copy of the license can be found in the file /* ---------------------------------------------------------------------------- Concurrent bitmap that can set/reset sequences of bits atomically, -represeted as an array of fields where each field is a machine word (`size_t`) +represented as an array of fields where each field is a machine word (`size_t`) There are two api's; the standard one cannot have sequences that cross between the bitmap fields (and a sequence must be <= MI_BITMAP_FIELD_BITS). diff --git a/src/free.c b/src/free.c index d42e3f3d..b9cb6346 100644 --- a/src/free.c +++ b/src/free.c @@ -380,7 +380,7 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer? (n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL? { - // Suspicous: decoded value a in block is in the same page (or NULL) -- maybe a double free? + // Suspicious: decoded value a in block is in the same page (or NULL) -- maybe a double free? // (continue in separate function to improve code generation) is_double_free = mi_check_is_double_freex(page, block); } diff --git a/src/heap.c b/src/heap.c index a6477a56..2fb04f7a 100644 --- a/src/heap.c +++ b/src/heap.c @@ -426,7 +426,7 @@ void mi_heap_delete(mi_heap_t* heap) if (heap==NULL || !mi_heap_is_initialized(heap)) return; if (!mi_heap_is_backing(heap)) { - // tranfer still used pages to the backing heap + // transfer still used pages to the backing heap mi_heap_absorb(heap->tld->heap_backing, heap); } else { diff --git a/src/page.c b/src/page.c index bc9837f4..2ea25469 100644 --- a/src/page.c +++ b/src/page.c @@ -463,7 +463,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept { if (index < heap->page_retired_min) heap->page_retired_min = index; if (index > heap->page_retired_max) heap->page_retired_max = index; mi_assert_internal(mi_page_all_free(page)); - return; // dont't free after all + return; // don't free after all } } _mi_page_free(page, pq, false);