llama : use llm_build_granite for minicpm (#13911)

This commit is contained in:
zhangkaihuo
2025-05-30 16:31:48 +08:00
committed by GitHub
parent ec9e0301fe
commit 2c90da4c7e

View File

@ -13260,7 +13260,6 @@ llm_graph_result_ptr llama_model::build_graph(
switch (arch) { switch (arch) {
case LLM_ARCH_LLAMA: case LLM_ARCH_LLAMA:
case LLM_ARCH_MINICPM:
{ {
llm = std::make_unique<llm_build_llama>(*this, params, gf); llm = std::make_unique<llm_build_llama>(*this, params, gf);
} break; } break;
@ -13501,6 +13500,7 @@ llm_graph_result_ptr llama_model::build_graph(
} break; } break;
case LLM_ARCH_GRANITE: case LLM_ARCH_GRANITE:
case LLM_ARCH_GRANITE_MOE: case LLM_ARCH_GRANITE_MOE:
case LLM_ARCH_MINICPM:
{ {
llm = std::make_unique<llm_build_granite>(*this, params, gf); llm = std::make_unique<llm_build_granite>(*this, params, gf);
} break; } break;