Merge pull request #455 from paulip1792/reserve-hugepages-by-numa-node

add option to reserve huge os pages at a specific numa node.
This commit is contained in:
Daan 2021-11-14 14:56:13 -08:00 committed by GitHub
commit 1ebb74cb7a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 13 additions and 1 deletions

View File

@ -782,6 +782,7 @@ typedef enum mi_option_e {
mi_option_eager_region_commit, ///< Eagerly commit large (256MiB) memory regions (enabled by default, except on Windows) mi_option_eager_region_commit, ///< Eagerly commit large (256MiB) memory regions (enabled by default, except on Windows)
mi_option_large_os_pages, ///< Use large OS pages (2MiB in size) if possible mi_option_large_os_pages, ///< Use large OS pages (2MiB in size) if possible
mi_option_reserve_huge_os_pages, ///< The number of huge OS pages (1GiB in size) to reserve at the start of the program. mi_option_reserve_huge_os_pages, ///< The number of huge OS pages (1GiB in size) to reserve at the start of the program.
mi_option_reserve_huge_os_pages_at, ///< Reserve huge OS pages at node N.
mi_option_segment_cache, ///< The number of segments per thread to keep cached. mi_option_segment_cache, ///< The number of segments per thread to keep cached.
mi_option_page_reset, ///< Reset page memory after \a mi_option_reset_delay milliseconds when it becomes free. mi_option_page_reset, ///< Reset page memory after \a mi_option_reset_delay milliseconds when it becomes free.
mi_option_segment_reset, ///< Experimental mi_option_segment_reset, ///< Experimental
@ -1053,6 +1054,8 @@ or via environment variables.
`MIMALLOC_EAGER_COMMIT_DELAY=N` (`N` is 1 by default) to delay the initial `N` segments (of 4MiB) `MIMALLOC_EAGER_COMMIT_DELAY=N` (`N` is 1 by default) to delay the initial `N` segments (of 4MiB)
of a thread to not allocate in the huge OS pages; this prevents threads that are short lived of a thread to not allocate in the huge OS pages; this prevents threads that are short lived
and allocate just a little to take up space in the huge OS page area (which cannot be reset). and allocate just a little to take up space in the huge OS page area (which cannot be reset).
- `MIMALLOC_RESERVE_HUGE_OS_PAGES_AT=N`: where N is the numa node. This reserves the huge pages at a specific numa node.
(`N` is -1 by default to reserve huge pages evenly among the given number of numa nodes (or use the available ones as detected))
Use caution when using `fork` in combination with either large or huge OS pages: on a fork, the OS uses copy-on-write Use caution when using `fork` in combination with either large or huge OS pages: on a fork, the OS uses copy-on-write
for all pages in the original process including the huge OS pages. When any memory is now written in that area, the for all pages in the original process including the huge OS pages. When any memory is now written in that area, the

View File

@ -306,6 +306,7 @@ typedef enum mi_option_e {
mi_option_reset_decommits, mi_option_reset_decommits,
mi_option_large_os_pages, // implies eager commit mi_option_large_os_pages, // implies eager commit
mi_option_reserve_huge_os_pages, mi_option_reserve_huge_os_pages,
mi_option_reserve_huge_os_pages_at,
mi_option_reserve_os_memory, mi_option_reserve_os_memory,
mi_option_segment_cache, mi_option_segment_cache,
mi_option_page_reset, mi_option_page_reset,

View File

@ -302,6 +302,8 @@ or via environment variables:
`MIMALLOC_EAGER_COMMIT_DELAY=N` (`N` is 1 by default) to delay the initial `N` segments (of 4MiB) `MIMALLOC_EAGER_COMMIT_DELAY=N` (`N` is 1 by default) to delay the initial `N` segments (of 4MiB)
of a thread to not allocate in the huge OS pages; this prevents threads that are short lived of a thread to not allocate in the huge OS pages; this prevents threads that are short lived
and allocate just a little to take up space in the huge OS page area (which cannot be reset). and allocate just a little to take up space in the huge OS page area (which cannot be reset).
- `MIMALLOC_RESERVE_HUGE_OS_PAGES_AT=N`: where N is the numa node. This reserves the huge pages at a specific numa node.
(`N` is -1 by default to reserve huge pages evenly among the given number of numa nodes (or use the available ones as detected))
Use caution when using `fork` in combination with either large or huge OS pages: on a fork, the OS uses copy-on-write Use caution when using `fork` in combination with either large or huge OS pages: on a fork, the OS uses copy-on-write
for all pages in the original process including the huge OS pages. When any memory is now written in that area, the for all pages in the original process including the huge OS pages. When any memory is now written in that area, the

View File

@ -494,7 +494,12 @@ void mi_process_init(void) mi_attr_noexcept {
if (mi_option_is_enabled(mi_option_reserve_huge_os_pages)) { if (mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
size_t pages = mi_option_get(mi_option_reserve_huge_os_pages); size_t pages = mi_option_get(mi_option_reserve_huge_os_pages);
mi_reserve_huge_os_pages_interleave(pages, 0, pages*500); long reserve_at = mi_option_get(mi_option_reserve_huge_os_pages_at);
if (reserve_at != -1) {
mi_reserve_huge_os_pages_at(pages, reserve_at, pages*500);
} else {
mi_reserve_huge_os_pages_interleave(pages, 0, pages*500);
}
} }
if (mi_option_is_enabled(mi_option_reserve_os_memory)) { if (mi_option_is_enabled(mi_option_reserve_os_memory)) {
long ksize = mi_option_get(mi_option_reserve_os_memory); long ksize = mi_option_get(mi_option_reserve_os_memory);

View File

@ -76,6 +76,7 @@ static mi_option_desc_t options[_mi_option_last] =
#endif #endif
{ 0, UNINIT, MI_OPTION(large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's { 0, UNINIT, MI_OPTION(large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
{ 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages { 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages
{ -1, UNINIT, MI_OPTION(reserve_huge_os_pages_at) }, // reserve huge pages at node N
{ 0, UNINIT, MI_OPTION(reserve_os_memory) }, { 0, UNINIT, MI_OPTION(reserve_os_memory) },
{ 0, UNINIT, MI_OPTION(segment_cache) }, // cache N segments per thread { 0, UNINIT, MI_OPTION(segment_cache) }, // cache N segments per thread
{ 1, UNINIT, MI_OPTION(page_reset) }, // reset page memory on free { 1, UNINIT, MI_OPTION(page_reset) }, // reset page memory on free