llama : allow using mmap without PrefetchVirtualMemory, apply GGML_WIN_VER to llama.cpp sources (#14013)

This commit is contained in:
Diego Devesa
2025-06-05 02:57:42 -07:00
committed by GitHub
parent d01d112abb
commit 3a077146a4
4 changed files with 7 additions and 3 deletions

View File

@ -159,6 +159,11 @@ if (NOT TARGET ggml AND NOT LLAMA_USE_SYSTEM_GGML)
# ... otherwise assume ggml is added by a parent CMakeLists.txt # ... otherwise assume ggml is added by a parent CMakeLists.txt
endif() endif()
if (MINGW)
# Target Windows 8 for PrefetchVirtualMemory
add_compile_definitions(_WIN32_WINNT=${GGML_WIN_VER})
endif()
# #
# build the library # build the library
# #

View File

@ -137,7 +137,7 @@ set(GGML_CPU_ARM_ARCH "" CACHE STRING "ggml: CPU architecture for ARM")
set(GGML_CPU_POWERPC_CPUTYPE "" CACHE STRING "ggml: CPU type for PowerPC") set(GGML_CPU_POWERPC_CPUTYPE "" CACHE STRING "ggml: CPU type for PowerPC")
if (WIN32) if (MINGW)
set(GGML_WIN_VER "0x602" CACHE STRING "ggml: Windows version") set(GGML_WIN_VER "0x602" CACHE STRING "ggml: Windows version")
endif() endif()

View File

@ -125,7 +125,6 @@ if (NOT MSVC)
endif() endif()
if (MINGW) if (MINGW)
# Target Windows 8 for PrefetchVirtualMemory
add_compile_definitions(_WIN32_WINNT=${GGML_WIN_VER}) add_compile_definitions(_WIN32_WINNT=${GGML_WIN_VER})
endif() endif()

View File

@ -401,7 +401,7 @@ struct llama_mmap::impl {
} }
} }
#else #else
throw std::runtime_error("PrefetchVirtualMemory unavailable"); LLAMA_LOG_DEBUG("skipping PrefetchVirtualMemory because _WIN32_WINNT < 0x602\n");
#endif #endif
} }
} }