From fb26e95ae7a09b4abd9d434e4e04ad3e0df22abd Mon Sep 17 00:00:00 2001 From: Gabe Goodhart Date: Wed, 28 May 2025 06:48:53 -0600 Subject: [PATCH] refactor: rename *_is_hybrid -> *_is_hybrid_recurrent The implementation of the hybrid cache intentionally does not specify the types of the child caches, so there was a naming mismatch with these predicate functions that used "hybrid" to imply "hybrid recurrent." Branch: HybridCache Signed-off-by: Gabe Goodhart --- include/llama.h | 2 +- src/llama-arch.cpp | 2 +- src/llama-arch.h | 2 +- src/llama-model.cpp | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/llama.h b/include/llama.h index 168059cdc..10f58b278 100644 --- a/include/llama.h +++ b/include/llama.h @@ -573,7 +573,7 @@ extern "C" { LLAMA_API bool llama_model_is_recurrent(const struct llama_model * model); // Returns true if the model is hybrid-recurrent (like Jamba, Bamba, etc.) - LLAMA_API bool llama_model_is_hybrid(const struct llama_model * model); + LLAMA_API bool llama_model_is_hybrid_recurrent(const struct llama_model * model); // Returns 0 on success LLAMA_API uint32_t llama_model_quantize( diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index 0bc60565d..cc1e3beba 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -1831,7 +1831,7 @@ bool llm_arch_is_recurrent(const llm_arch & arch) { } } -bool llm_arch_is_hybrid(const llm_arch & arch) { +bool llm_arch_is_hybrid_recurrent(const llm_arch & arch) { // TODO: There are currently no hybrid models! Once there are, this will be // the place to identify them switch (arch) { diff --git a/src/llama-arch.h b/src/llama-arch.h index 82b57d2df..4c1c03d6b 100644 --- a/src/llama-arch.h +++ b/src/llama-arch.h @@ -442,4 +442,4 @@ llm_arch llm_arch_from_string(const std::string & name); const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor); bool llm_arch_is_recurrent(const llm_arch& arch); -bool llm_arch_is_hybrid(const llm_arch& arch); +bool llm_arch_is_hybrid_recurrent(const llm_arch& arch); diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 39cf6631c..a49a2c09f 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -14384,8 +14384,8 @@ bool llama_model_is_recurrent(const llama_model * model) { return llm_arch_is_recurrent(model->arch); } -bool llama_model_is_hybrid(const llama_model * model) { - return llm_arch_is_hybrid(model->arch); +bool llama_model_is_hybrid_recurrent(const llama_model * model) { + return llm_arch_is_hybrid_recurrent(model->arch); } const std::vector> & llama_internal_get_tensor_map(const llama_model * model) {