mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-08-16 21:22:37 -04:00
kompute : llama-bench support and ggml_cpu_has_kompute() (#5226)
This commit is contained in:
1
ggml.h
1
ggml.h
@@ -2266,6 +2266,7 @@ extern "C" {
|
||||
GGML_API int ggml_cpu_has_cublas (void);
|
||||
GGML_API int ggml_cpu_has_clblast (void);
|
||||
GGML_API int ggml_cpu_has_vulkan (void);
|
||||
GGML_API int ggml_cpu_has_kompute (void);
|
||||
GGML_API int ggml_cpu_has_gpublas (void);
|
||||
GGML_API int ggml_cpu_has_sse3 (void);
|
||||
GGML_API int ggml_cpu_has_ssse3 (void);
|
||||
|
Reference in New Issue
Block a user