mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-26 19:55:04 +00:00
cmake : enable separable compilation for CUDA
ggml-ci
This commit is contained in:
@ -279,6 +279,10 @@ if (LLAMA_CUBLAS)
|
||||
if (LLAMA_CUDA_FORCE_MMQ)
|
||||
add_compile_definitions(GGML_CUDA_FORCE_MMQ)
|
||||
endif()
|
||||
|
||||
# required for dynamic parallelism
|
||||
set(CMAKE_CUDA_SEPARABLE_COMPILATION ON)
|
||||
|
||||
add_compile_definitions(GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X})
|
||||
add_compile_definitions(GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y})
|
||||
if (DEFINED LLAMA_CUDA_DMMV_Y)
|
||||
|
Reference in New Issue
Block a user