mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-28 04:15:21 +00:00
llama : use llm_build_granite for minicpm (#13911)
This commit is contained in:
@ -13260,7 +13260,6 @@ llm_graph_result_ptr llama_model::build_graph(
|
|||||||
|
|
||||||
switch (arch) {
|
switch (arch) {
|
||||||
case LLM_ARCH_LLAMA:
|
case LLM_ARCH_LLAMA:
|
||||||
case LLM_ARCH_MINICPM:
|
|
||||||
{
|
{
|
||||||
llm = std::make_unique<llm_build_llama>(*this, params, gf);
|
llm = std::make_unique<llm_build_llama>(*this, params, gf);
|
||||||
} break;
|
} break;
|
||||||
@ -13501,6 +13500,7 @@ llm_graph_result_ptr llama_model::build_graph(
|
|||||||
} break;
|
} break;
|
||||||
case LLM_ARCH_GRANITE:
|
case LLM_ARCH_GRANITE:
|
||||||
case LLM_ARCH_GRANITE_MOE:
|
case LLM_ARCH_GRANITE_MOE:
|
||||||
|
case LLM_ARCH_MINICPM:
|
||||||
{
|
{
|
||||||
llm = std::make_unique<llm_build_granite>(*this, params, gf);
|
llm = std::make_unique<llm_build_granite>(*this, params, gf);
|
||||||
} break;
|
} break;
|
||||||
|
Reference in New Issue
Block a user