mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-29 12:35:16 +00:00
fix errors in conversion.
This commit is contained in:
@ -6305,8 +6305,11 @@ class SmolLM3Model(LlamaModel):
|
||||
def set_gguf_parameters(self):
|
||||
super().set_gguf_parameters()
|
||||
|
||||
if self.model.config.no_rope_layers is not None:
|
||||
self.gguf_writer.add_array("smollm3.no_rope_layers", self.model.config.no_rope_layers, gguf.GGUFValueType.INT32)
|
||||
# if self.model.config.no_rope_layers is not None:
|
||||
# self.gguf_writer.add_array("smollm3.no_rope_layers", self.model.config.no_rope_layers, gguf.GGUFValueType.INT32)
|
||||
no_rope_layers = self.hparams.get("no_rope_layers")
|
||||
if no_rope_layers is not None:
|
||||
self.gguf_writer.add_array("smollm3.no_rope_layers", no_rope_layers)
|
||||
|
||||
###### CONVERSION LOGIC ######
|
||||
|
||||
|
@ -2114,6 +2114,7 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
||||
MODEL_TENSOR.ATTN_V,
|
||||
MODEL_TENSOR.ATTN_OUT,
|
||||
MODEL_TENSOR.ATTN_ROT_EMBD,
|
||||
MODEL_TENSOR.FFN_NORM,
|
||||
MODEL_TENSOR.FFN_GATE,
|
||||
MODEL_TENSOR.FFN_DOWN,
|
||||
MODEL_TENSOR.FFN_UP,
|
||||
|
Reference in New Issue
Block a user