mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-07-27 03:33:46 -04:00
cuda : fix CUDA_FLAGS not being applied (#10403)
This commit is contained in:
@ -75,7 +75,6 @@ if (BLAS_FOUND)
|
|||||||
|
|
||||||
message(STATUS "BLAS found, Includes: ${BLAS_INCLUDE_DIRS}")
|
message(STATUS "BLAS found, Includes: ${BLAS_INCLUDE_DIRS}")
|
||||||
|
|
||||||
#add_compile_options(${BLAS_LINKER_FLAGS})
|
|
||||||
target_compile_options(ggml-blas PRIVATE ${BLAS_LINKER_FLAGS})
|
target_compile_options(ggml-blas PRIVATE ${BLAS_LINKER_FLAGS})
|
||||||
|
|
||||||
if (${BLAS_INCLUDE_DIRS} MATCHES "mkl" AND (${GGML_BLAS_VENDOR} MATCHES "Generic" OR ${GGML_BLAS_VENDOR} MATCHES "Intel"))
|
if (${BLAS_INCLUDE_DIRS} MATCHES "mkl" AND (${GGML_BLAS_VENDOR} MATCHES "Generic" OR ${GGML_BLAS_VENDOR} MATCHES "Intel"))
|
||||||
|
@ -149,7 +149,7 @@ if (CUDAToolkit_FOUND)
|
|||||||
list(APPEND CUDA_FLAGS -Xcompiler ${CUDA_CXX_FLAGS_JOINED})
|
list(APPEND CUDA_FLAGS -Xcompiler ${CUDA_CXX_FLAGS_JOINED})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
add_compile_options("$<$<COMPILE_LANGUAGE:CUDA>:${CUDA_FLAGS}>")
|
target_compile_options(ggml-cuda PRIVATE "$<$<COMPILE_LANGUAGE:CUDA>:${CUDA_FLAGS}>")
|
||||||
else()
|
else()
|
||||||
message(FATAL_ERROR "CUDA Toolkit not found")
|
message(FATAL_ERROR "CUDA Toolkit not found")
|
||||||
endif()
|
endif()
|
||||||
|
Reference in New Issue
Block a user