mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-08-15 20:53:00 -04:00
Add LLaDA 8b Diffusion model (#14771)
* Add support for Llada-8b: diffusion model * Add README * Fix README and convert_hf_to_gguf * convert_hf_to_gguf.py: address review comments * Make everything in a single example * Remove model-specific sampling * Remove unused argmax * Remove braced initializers, improve README.md a bit * Add diffusion specific gguf params in set_vocab, remove setting rope_theta and rms_norm_eps * Remove adding the mask token * Move add_add_bos_token to set_vocab * use add_bool in gguf_writer.py
This commit is contained in:
@@ -32,6 +32,7 @@ class TensorNameMap:
|
||||
"model.word_embeddings", # bailingmoe
|
||||
"language_model.model.embed_tokens", # llama4
|
||||
"encoder", # neobert
|
||||
"model.transformer.wte", # llada
|
||||
),
|
||||
|
||||
# Token type embeddings
|
||||
@@ -71,6 +72,7 @@ class TensorNameMap:
|
||||
"head", # rwkv
|
||||
"head.out", # wavtokenizer
|
||||
"lm_head", # llama4
|
||||
"model.transformer.ff_out", # llada
|
||||
),
|
||||
|
||||
# Output norm
|
||||
@@ -94,6 +96,7 @@ class TensorNameMap:
|
||||
"model.ln_out", # rwkv7
|
||||
"backbone.final_layer_norm", # wavtokenizer
|
||||
"model.norm", # llama4
|
||||
"model.transformer.ln_f", # llada
|
||||
),
|
||||
|
||||
# Rope frequencies
|
||||
@@ -139,6 +142,7 @@ class TensorNameMap:
|
||||
"model.layers.{bid}.input_layernorm", # llama4
|
||||
"transformer_encoder.{bid}.attention_norm", # neobert
|
||||
"model.layers.{bid}.operator_norm", # lfm2
|
||||
"model.transformer.blocks.{bid}.attn_norm", # llada
|
||||
),
|
||||
|
||||
# Attention norm 2
|
||||
@@ -183,6 +187,7 @@ class TensorNameMap:
|
||||
"transformer.decoder_layer.{bid}.multi_head_attention.query",# Grok
|
||||
"transformer.h.{bid}.attn.attention.q_proj", # exaone
|
||||
"model.layers.{bid}.self_attn.q_proj", # llama4
|
||||
"model.transformer.blocks.{bid}.q_proj", # llada
|
||||
),
|
||||
|
||||
# Attention key
|
||||
@@ -199,6 +204,7 @@ class TensorNameMap:
|
||||
"transformer.decoder_layer.{bid}.multi_head_attention.key",# Grok
|
||||
"transformer.h.{bid}.attn.attention.k_proj", # exaone
|
||||
"model.layers.{bid}.self_attn.k_proj", # llama4
|
||||
"model.transformer.blocks.{bid}.k_proj", # llada
|
||||
),
|
||||
|
||||
# Attention value
|
||||
@@ -214,6 +220,7 @@ class TensorNameMap:
|
||||
"transformer.decoder_layer.{bid}.multi_head_attention.value",# Grok
|
||||
"transformer.h.{bid}.attn.attention.v_proj", # exaone
|
||||
"model.layers.{bid}.self_attn.v_proj", # llama4
|
||||
"model.transformer.blocks.{bid}.v_proj", # llada
|
||||
),
|
||||
|
||||
# Attention output
|
||||
@@ -246,6 +253,7 @@ class TensorNameMap:
|
||||
"transformer.h.{bid}.attn.attention.out_proj", # exaone
|
||||
"model.layers.{bid}.self_attn.o_proj", # llama4
|
||||
"transformer_encoder.{bid}.wo", # neobert
|
||||
"model.transformer.blocks.{bid}.attn_out", # llada
|
||||
),
|
||||
|
||||
# Attention output norm
|
||||
@@ -291,6 +299,7 @@ class TensorNameMap:
|
||||
"model.layers.{bid}.post_attention_layernorm", # llama4
|
||||
"transformer_encoder.{bid}.ffn_norm", # neobert
|
||||
"model.layers.layers.{bid}.pre_mlp_norm", # plamo2
|
||||
"model.transformer.blocks.{bid}.ff_norm", # llada
|
||||
),
|
||||
|
||||
# Post feed-forward norm
|
||||
@@ -364,6 +373,7 @@ class TensorNameMap:
|
||||
"model.layers.{bid}.feed_forward.up_proj", # llama4 jamba granite-hybrid
|
||||
"transformer_encoder.{bid}.ffn.w12", # neobert
|
||||
"model.layers.{bid}.block_sparse_moe.up", # smallthinker
|
||||
"model.transformer.blocks.{bid}.up_proj", # llada
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_UP_EXP: (
|
||||
@@ -405,6 +415,7 @@ class TensorNameMap:
|
||||
"transformer.h.{bid}.mlp.c_fc_0", # exaone
|
||||
"model.layers.{bid}.feed_forward.gate_proj", # llama4 jamba granite-hybrid
|
||||
"model.layers.{bid}.block_sparse_moe.gate", # smallthinker
|
||||
"model.transformer.blocks.{bid}.ff_proj", # llada
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_GATE_EXP: (
|
||||
@@ -454,6 +465,7 @@ class TensorNameMap:
|
||||
"model.layers.{bid}.feed_forward.down_proj", # llama4 jamba granite-hybrid
|
||||
"transformer_encoder.{bid}.ffn.w3", # neobert
|
||||
"model.layers.{bid}.block_sparse_moe.down", # smallthinker
|
||||
"model.transformer.blocks.{bid}.ff_out", # llada
|
||||
),
|
||||
|
||||
MODEL_TENSOR.FFN_DOWN_EXP: (
|
||||
|
Reference in New Issue
Block a user