From fbdfefe74e736f1a3687283c25ac21b11ba07b2e Mon Sep 17 00:00:00 2001 From: Xuan-Son Nguyen Date: Sat, 22 Mar 2025 23:28:19 +0100 Subject: [PATCH] llama : gemma3 : use output tensor if it exists in model weight (#12506) * llama : gemma3 : use output tensor if it exists in model weight * also add to the llm_tensor_names --- gguf-py/gguf/constants.py | 1 + src/llama-arch.cpp | 1 + src/llama-model.cpp | 7 ++++++- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index cc48913d9..13cca7ab0 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -1113,6 +1113,7 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { ], MODEL_ARCH.GEMMA3: [ MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT, MODEL_TENSOR.OUTPUT_NORM, MODEL_TENSOR.ATTN_Q, MODEL_TENSOR.ATTN_Q_NORM, diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index 9debb56cc..8664f8963 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -778,6 +778,7 @@ static const std::map> LLM_TENSOR_N { { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 26ac5e99b..0ae754154 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -2571,7 +2571,12 @@ bool llama_model::load_tensors(llama_model_loader & ml) { // output output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); - output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } for (int i = 0; i < n_layer; ++i) { auto & layer = layers[i];