mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-27 03:55:20 +00:00
* kv-cache : prepare for SWA ggml-ci * kv-cache : initial iSWA implementation ggml-ci * kv-cache : rework error recovery logic ggml-ci * models : fix Phi-3 SWA parameters ggml-ci * model : adjust Granite to rope factor changes ggml-ci * server : check if context can do shifts ggml-ci * iswa : for now, always enable shifts (experiment) ggml-ci * kv-cache : simplify SWA logic ggml-ci * kv-cache : apply defrag when we fail to find slots for the batch ggml-ci * llama : update docs about llama_decode ggml-ci * kv-cache : update warning logs when no space for the batch is available ggml-ci * llama : add llama_kv_self_seq_pos_min() * kv-cache : keep track of partial SWA computes and print warnings * server : disallow use cases involving partial SWA context ggml-ci * llama : add param to control SWA cache size ggml-ci * minor : clean-up ggml-ci
171 lines
5.1 KiB
C++
171 lines
5.1 KiB
C++
#pragma once
|
|
|
|
#include "llama.h"
|
|
|
|
#include <array>
|
|
|
|
// bump if necessary
|
|
#define LLAMA_MAX_LAYERS 512
|
|
#define LLAMA_MAX_EXPERTS 256 // DeepSeekV3
|
|
|
|
enum llama_expert_gating_func_type {
|
|
LLAMA_EXPERT_GATING_FUNC_TYPE_NONE = 0,
|
|
LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX = 1,
|
|
LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID = 2,
|
|
};
|
|
|
|
enum llama_swa_type {
|
|
LLAMA_SWA_TYPE_NONE = 0,
|
|
LLAMA_SWA_TYPE_STANDARD = 1,
|
|
LLAMA_SWA_TYPE_CHUNKED = 2,
|
|
};
|
|
|
|
struct llama_hparams_posnet {
|
|
uint32_t n_embd;
|
|
uint32_t n_layer;
|
|
};
|
|
|
|
struct llama_hparams_convnext {
|
|
uint32_t n_embd;
|
|
uint32_t n_layer;
|
|
};
|
|
|
|
struct llama_hparams {
|
|
bool vocab_only;
|
|
bool rope_finetuned;
|
|
bool use_par_res;
|
|
bool swin_norm;
|
|
|
|
uint32_t n_ctx_train; // context size the model was trained on
|
|
uint32_t n_embd;
|
|
uint32_t n_embd_features = 0;
|
|
uint32_t n_layer;
|
|
uint32_t n_rot;
|
|
uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads
|
|
uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head
|
|
uint32_t n_expert = 0;
|
|
uint32_t n_expert_used = 0;
|
|
uint32_t n_rel_attn_bkts = 0;
|
|
|
|
// note: deepseek2 using MLA converts into MQA with larger heads, then decompresses to MHA
|
|
uint32_t n_embd_head_k_mla = 0;
|
|
uint32_t n_embd_head_v_mla = 0;
|
|
|
|
// for WavTokenizer
|
|
struct llama_hparams_posnet posnet;
|
|
struct llama_hparams_convnext convnext;
|
|
|
|
std::array<uint32_t, LLAMA_MAX_LAYERS> n_head_arr;
|
|
std::array<uint32_t, LLAMA_MAX_LAYERS> n_head_kv_arr;
|
|
std::array<uint32_t, LLAMA_MAX_LAYERS> n_ff_arr;
|
|
|
|
uint32_t n_layer_dense_lead = 0;
|
|
uint32_t n_lora_q = 0;
|
|
uint32_t n_lora_kv = 0;
|
|
uint32_t n_ff_exp = 0;
|
|
uint32_t n_ff_shexp = 0;
|
|
uint32_t n_expert_shared = 0;
|
|
uint32_t n_norm_groups = 0;
|
|
|
|
float expert_weights_scale = 0.0;
|
|
bool expert_weights_norm = false;
|
|
uint32_t expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_NONE;
|
|
uint32_t moe_every_n_layers = 0;
|
|
|
|
float f_norm_eps;
|
|
float f_norm_rms_eps;
|
|
float f_norm_group_eps;
|
|
|
|
float f_attn_logit_softcapping = 50.0f;
|
|
float f_final_logit_softcapping = 30.0f;
|
|
|
|
// for RWKV
|
|
uint32_t rescale_every_n_layers = 0;
|
|
uint32_t time_mix_extra_dim = 0;
|
|
uint32_t time_decay_extra_dim = 0;
|
|
uint32_t wkv_head_size = 0;
|
|
uint32_t token_shift_count = 2;
|
|
uint32_t n_lora_decay = 0;
|
|
uint32_t n_lora_iclr = 0;
|
|
uint32_t n_lora_value_res_mix = 0;
|
|
uint32_t n_lora_gate = 0;
|
|
|
|
float rope_attn_factor = 1.0f;
|
|
float rope_freq_base_train;
|
|
float rope_freq_base_train_swa;
|
|
float rope_freq_scale_train;
|
|
float rope_freq_scale_train_swa;
|
|
uint32_t n_ctx_orig_yarn;
|
|
float rope_yarn_log_mul;
|
|
|
|
std::array<int, 4> rope_sections;
|
|
|
|
// Sliding Window Attention (SWA)
|
|
llama_swa_type swa_type = LLAMA_SWA_TYPE_NONE;
|
|
|
|
uint32_t n_swa = 0; // the size of the sliding window (0 - no SWA)
|
|
uint32_t n_swa_pattern = 1; // by default, all layers use non-sliding-window attention
|
|
|
|
// for State Space Models
|
|
uint32_t ssm_d_conv = 0;
|
|
uint32_t ssm_d_inner = 0;
|
|
uint32_t ssm_d_state = 0;
|
|
uint32_t ssm_dt_rank = 0;
|
|
|
|
bool ssm_dt_b_c_rms = false;
|
|
|
|
float f_clamp_kqv = 0.0f;
|
|
float f_max_alibi_bias = 0.0f;
|
|
float f_logit_scale = 0.0f;
|
|
|
|
// Additional scale factors (Granite/Granite MoE)
|
|
float f_residual_scale = 0.0f;
|
|
float f_embedding_scale = 0.0f;
|
|
float f_attention_scale = 0.0f;
|
|
|
|
bool causal_attn = true;
|
|
bool use_alibi = false;
|
|
bool attn_soft_cap = false;
|
|
bool use_kq_norm = true;
|
|
|
|
// llama4
|
|
uint32_t n_moe_layer_step = 0;
|
|
uint32_t n_no_rope_layer_step = 4;
|
|
uint32_t n_attn_temp_floor_scale = 8192;
|
|
float f_attn_temp_scale = 0.1;
|
|
|
|
// needed by encoder-decoder models (e.g. T5, FLAN-T5)
|
|
// ref: https://github.com/ggerganov/llama.cpp/pull/8141
|
|
llama_token dec_start_token_id = LLAMA_TOKEN_NULL;
|
|
|
|
enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_NONE;
|
|
enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE;
|
|
enum llama_rope_scaling_type rope_scaling_type_train = LLAMA_ROPE_SCALING_TYPE_NONE;
|
|
|
|
uint32_t n_head(uint32_t il = 0) const;
|
|
|
|
uint32_t n_head_kv(uint32_t il = 0) const;
|
|
|
|
uint32_t n_ff(uint32_t il = 0) const;
|
|
|
|
uint32_t n_gqa(uint32_t il = 0) const;
|
|
|
|
// dimension of key embeddings across all k-v heads
|
|
uint32_t n_embd_k_gqa(uint32_t il = 0) const;
|
|
|
|
// dimension of value embeddings across all k-v heads
|
|
uint32_t n_embd_v_gqa(uint32_t il = 0) const;
|
|
|
|
// dimension of the rolling state embeddings
|
|
// corresponds to Mamba's conv_states size or RWKV's token_shift states size
|
|
uint32_t n_embd_k_s() const;
|
|
|
|
// dimension of the recurrent state embeddings
|
|
uint32_t n_embd_v_s() const;
|
|
|
|
bool is_swa(uint32_t il) const;
|
|
};
|
|
|
|
static_assert(std::is_trivially_copyable<llama_hparams>::value, "llama_hparams must be trivially copyable");
|
|
|