From 2c90da4c7ec694797f524042aaafbb047a7e65ff Mon Sep 17 00:00:00 2001 From: zhangkaihuo Date: Fri, 30 May 2025 16:31:48 +0800 Subject: [PATCH] llama : use llm_build_granite for minicpm (#13911) --- src/llama-model.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llama-model.cpp b/src/llama-model.cpp index ecaae6bf0..a1aa51412 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -13260,7 +13260,6 @@ llm_graph_result_ptr llama_model::build_graph( switch (arch) { case LLM_ARCH_LLAMA: - case LLM_ARCH_MINICPM: { llm = std::make_unique(*this, params, gf); } break; @@ -13501,6 +13500,7 @@ llm_graph_result_ptr llama_model::build_graph( } break; case LLM_ARCH_GRANITE: case LLM_ARCH_GRANITE_MOE: + case LLM_ARCH_MINICPM: { llm = std::make_unique(*this, params, gf); } break;