mirror of
https://github.com/microsoft/mimalloc.git
synced 2024-12-27 13:33:18 +08:00
Merge pull request #455 from paulip1792/reserve-hugepages-by-numa-node
add option to reserve huge os pages at a specific numa node.
This commit is contained in:
commit
1ebb74cb7a
@ -782,6 +782,7 @@ typedef enum mi_option_e {
|
||||
mi_option_eager_region_commit, ///< Eagerly commit large (256MiB) memory regions (enabled by default, except on Windows)
|
||||
mi_option_large_os_pages, ///< Use large OS pages (2MiB in size) if possible
|
||||
mi_option_reserve_huge_os_pages, ///< The number of huge OS pages (1GiB in size) to reserve at the start of the program.
|
||||
mi_option_reserve_huge_os_pages_at, ///< Reserve huge OS pages at node N.
|
||||
mi_option_segment_cache, ///< The number of segments per thread to keep cached.
|
||||
mi_option_page_reset, ///< Reset page memory after \a mi_option_reset_delay milliseconds when it becomes free.
|
||||
mi_option_segment_reset, ///< Experimental
|
||||
@ -1053,6 +1054,8 @@ or via environment variables.
|
||||
`MIMALLOC_EAGER_COMMIT_DELAY=N` (`N` is 1 by default) to delay the initial `N` segments (of 4MiB)
|
||||
of a thread to not allocate in the huge OS pages; this prevents threads that are short lived
|
||||
and allocate just a little to take up space in the huge OS page area (which cannot be reset).
|
||||
- `MIMALLOC_RESERVE_HUGE_OS_PAGES_AT=N`: where N is the numa node. This reserves the huge pages at a specific numa node.
|
||||
(`N` is -1 by default to reserve huge pages evenly among the given number of numa nodes (or use the available ones as detected))
|
||||
|
||||
Use caution when using `fork` in combination with either large or huge OS pages: on a fork, the OS uses copy-on-write
|
||||
for all pages in the original process including the huge OS pages. When any memory is now written in that area, the
|
||||
|
@ -306,6 +306,7 @@ typedef enum mi_option_e {
|
||||
mi_option_reset_decommits,
|
||||
mi_option_large_os_pages, // implies eager commit
|
||||
mi_option_reserve_huge_os_pages,
|
||||
mi_option_reserve_huge_os_pages_at,
|
||||
mi_option_reserve_os_memory,
|
||||
mi_option_segment_cache,
|
||||
mi_option_page_reset,
|
||||
|
@ -302,6 +302,8 @@ or via environment variables:
|
||||
`MIMALLOC_EAGER_COMMIT_DELAY=N` (`N` is 1 by default) to delay the initial `N` segments (of 4MiB)
|
||||
of a thread to not allocate in the huge OS pages; this prevents threads that are short lived
|
||||
and allocate just a little to take up space in the huge OS page area (which cannot be reset).
|
||||
- `MIMALLOC_RESERVE_HUGE_OS_PAGES_AT=N`: where N is the numa node. This reserves the huge pages at a specific numa node.
|
||||
(`N` is -1 by default to reserve huge pages evenly among the given number of numa nodes (or use the available ones as detected))
|
||||
|
||||
Use caution when using `fork` in combination with either large or huge OS pages: on a fork, the OS uses copy-on-write
|
||||
for all pages in the original process including the huge OS pages. When any memory is now written in that area, the
|
||||
|
@ -494,7 +494,12 @@ void mi_process_init(void) mi_attr_noexcept {
|
||||
|
||||
if (mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
|
||||
size_t pages = mi_option_get(mi_option_reserve_huge_os_pages);
|
||||
mi_reserve_huge_os_pages_interleave(pages, 0, pages*500);
|
||||
long reserve_at = mi_option_get(mi_option_reserve_huge_os_pages_at);
|
||||
if (reserve_at != -1) {
|
||||
mi_reserve_huge_os_pages_at(pages, reserve_at, pages*500);
|
||||
} else {
|
||||
mi_reserve_huge_os_pages_interleave(pages, 0, pages*500);
|
||||
}
|
||||
}
|
||||
if (mi_option_is_enabled(mi_option_reserve_os_memory)) {
|
||||
long ksize = mi_option_get(mi_option_reserve_os_memory);
|
||||
|
@ -76,6 +76,7 @@ static mi_option_desc_t options[_mi_option_last] =
|
||||
#endif
|
||||
{ 0, UNINIT, MI_OPTION(large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
|
||||
{ 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages
|
||||
{ -1, UNINIT, MI_OPTION(reserve_huge_os_pages_at) }, // reserve huge pages at node N
|
||||
{ 0, UNINIT, MI_OPTION(reserve_os_memory) },
|
||||
{ 0, UNINIT, MI_OPTION(segment_cache) }, // cache N segments per thread
|
||||
{ 1, UNINIT, MI_OPTION(page_reset) }, // reset page memory on free
|
||||
|
Loading…
x
Reference in New Issue
Block a user