mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-27 20:05:20 +00:00
llama : use llm_build_granite for minicpm (#13911)
This commit is contained in:
@ -13260,7 +13260,6 @@ llm_graph_result_ptr llama_model::build_graph(
|
||||
|
||||
switch (arch) {
|
||||
case LLM_ARCH_LLAMA:
|
||||
case LLM_ARCH_MINICPM:
|
||||
{
|
||||
llm = std::make_unique<llm_build_llama>(*this, params, gf);
|
||||
} break;
|
||||
@ -13501,6 +13500,7 @@ llm_graph_result_ptr llama_model::build_graph(
|
||||
} break;
|
||||
case LLM_ARCH_GRANITE:
|
||||
case LLM_ARCH_GRANITE_MOE:
|
||||
case LLM_ARCH_MINICPM:
|
||||
{
|
||||
llm = std::make_unique<llm_build_granite>(*this, params, gf);
|
||||
} break;
|
||||
|
Reference in New Issue
Block a user