mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-29 12:35:16 +00:00
arg : bring back missing ifdef (#9411)
* arg : bring back missing ifdef * replace with llama_supports_gpu_offload
This commit is contained in:
@ -56,14 +56,6 @@
|
||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||
#endif
|
||||
|
||||
#if (defined(GGML_USE_CUDA) || defined(GGML_USE_SYCL))
|
||||
#define GGML_USE_CUDA_SYCL
|
||||
#endif
|
||||
|
||||
#if (defined(GGML_USE_CUDA) || defined(GGML_USE_SYCL)) || defined(GGML_USE_VULKAN)
|
||||
#define GGML_USE_CUDA_SYCL_VULKAN
|
||||
#endif
|
||||
|
||||
#if defined(LLAMA_USE_CURL)
|
||||
#ifdef __linux__
|
||||
#include <linux/limits.h>
|
||||
|
Reference in New Issue
Block a user