2025-01-03 10:18:53 +02:00
|
|
|
#include "llama-hparams.h"
|
|
|
|
|
|
|
|
#include "ggml.h"
|
|
|
|
|
2025-05-23 17:07:04 +02:00
|
|
|
void llama_hparams::set_swa_pattern(uint32_t n_pattern) {
|
|
|
|
for (uint32_t il = 0; il < n_layer; ++il) {
|
|
|
|
swa_layers[il] = n_pattern == 0 || (il % n_pattern < (n_pattern - 1));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool llama_hparams::is_swa_any() const {
|
|
|
|
for (uint32_t il = 0; il < n_layer; ++il) {
|
|
|
|
if (swa_layers[il]) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2025-01-03 10:18:53 +02:00
|
|
|
uint32_t llama_hparams::n_head(uint32_t il) const {
|
|
|
|
if (il < n_layer) {
|
|
|
|
return n_head_arr[il];
|
|
|
|
}
|
|
|
|
|
|
|
|
GGML_ABORT("fatal error");
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t llama_hparams::n_head_kv(uint32_t il) const {
|
|
|
|
if (il < n_layer) {
|
|
|
|
return n_head_kv_arr[il];
|
|
|
|
}
|
|
|
|
|
|
|
|
GGML_ABORT("fatal error");
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t llama_hparams::n_ff(uint32_t il) const {
|
|
|
|
if (il < n_layer) {
|
|
|
|
return n_ff_arr[il];
|
|
|
|
}
|
|
|
|
|
|
|
|
GGML_ABORT("fatal error");
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t llama_hparams::n_gqa(uint32_t il) const {
|
|
|
|
const uint32_t n_head = this->n_head(il);
|
|
|
|
const uint32_t n_head_kv = this->n_head_kv(il);
|
|
|
|
|
|
|
|
if (n_head_kv == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return n_head/n_head_kv;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t llama_hparams::n_embd_k_gqa(uint32_t il) const {
|
|
|
|
const uint32_t n_head_kv = this->n_head_kv(il);
|
|
|
|
|
|
|
|
return n_embd_head_k * n_head_kv;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t llama_hparams::n_embd_v_gqa(uint32_t il) const {
|
|
|
|
const uint32_t n_head_kv = this->n_head_kv(il);
|
|
|
|
|
|
|
|
return n_embd_head_v * n_head_kv;
|
|
|
|
}
|
|
|
|
|
2025-06-19 00:08:14 -05:00
|
|
|
uint32_t llama_hparams::n_embd_r() const {
|
2025-01-03 10:18:53 +02:00
|
|
|
if (wkv_head_size != 0) {
|
|
|
|
// for RWKV models
|
2025-01-10 09:58:08 +08:00
|
|
|
return token_shift_count * n_embd;
|
2025-01-03 10:18:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: maybe support other convolution strides than 1
|
|
|
|
// NOTE: since the first column of the conv_state is shifted out each time, it's not actually needed
|
llama : initial Mamba-2 support (#9126)
* llama : initial Mamba-2 support
* ggml : SIMD ggml_ssm_scan for Mamba-2
* ggml : improve ggml_mul speed when masking recurrent states
* llama : support running Mamba-Codestral-7B-v0.1
* llama : fix Mamba-2 conv state saving
* ggml : make the ggml_mul fast broadcast path more consistently formatted
* llama : remove unused variable
* llama : add missing break
* convert_hf : prefer SentencePiece tokenizer for Mamba-2 when present
The tokenzier.json of Mamba-Codestral-7B-v0.1 otherwise requires
workarounds to work correctly.
* llama : avoid redundant state copy for Mamba 1 and 2
* metal : attempt to adapt SSM_SCAN for Mamba-2
* metal : fix SSM_SCAN pipeline scope
* metal : use log and exp instead of log1pf and expf in SSM_SCAN
* metal : remove unused arguments for SSM_SCAN
The max index is 31, so trimming the arguments is necessary.
* metal : add back n_seqs to SSM_SCAN args
Whoops, this is needed for the offset in the concatenated output.
* metal : fix SSM_SCAN state head offset
* metal : fix wrong number of tokens per sequence in SSM_SCAN
* ggml : remove unused fast broadcast path in GGML_MUL
This was initially added because states were masked with ggml_mul,
but this is no longer done and so this "optimisation" is no longer
necessary, or at least not worth the additional code complexity.
* ggml : avoid multiply by D in GGML_OP_SSM_SCAN
This makes the weight buft detection in src/llama.cpp simpler.
* convert : transpose Mamba-2 A, D and reshape SSM_NORM
This breaks existing conversions of Mamba-2 models
to avoid some reshapes.
Not sure if it's a good idea,
but it makes the graph slightly cleaner.
* llama : more appropriate SSM_SCAN and SSM_CONV buft support checks
* convert : fix flake8 lint
* metal : fix confusion between ; and ,
* metal : add missing args for nb references in ssm_scan_f32_group
* metal : single-user mamba2 inference works
* kv-cache : remove const_cast when setting inputs for s_copy
And also fix multi-user inference for recurrent models
by using cell_id instead of i as the kv cell index
when populating s_copy.
* convert : avoid AutoConfig for Mamba and Mamba2 hparams
* kv-cache : allow context shift for recurrent models
* graph : fix recurrent state copies when avoiding copies
Works, but using lambda functions might not be that clean.
* ggml : fix mamba2 ssm scan when compiled with SVE
* ggml-cpu : reorder SVE FMA for consistency with other SIMD arches
* cuda : implement ssm scan for Mamba2
There is still room for improvement, but it works!
* cuda : adapt Mamba1 ssm scan to shape changes from Mamba2
* mamba : fix mismatched new and delete size for llm_build_mamba
Subclasses of llm_graph_context cannot have extra fields,
because the called destructor is not the one from the subclass.
This otherwise would cause problems when runnning Mamba-(1|2) inference
when compiled -DGGML_SANITIZE_ADDRESS=ON
* cuda : graceful fallback for Mamba-1 models with weird embd size
2025-07-02 13:10:24 -04:00
|
|
|
// Corresponds to Mamba's conv_states size
|
|
|
|
return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * (ssm_d_inner + 2*ssm_n_group*ssm_d_state);
|
2025-01-03 10:18:53 +02:00
|
|
|
}
|
|
|
|
|
2025-06-19 00:08:14 -05:00
|
|
|
uint32_t llama_hparams::n_embd_s() const {
|
2025-01-03 10:18:53 +02:00
|
|
|
if (wkv_head_size != 0) {
|
|
|
|
// corresponds to RWKV's wkv_states size
|
|
|
|
return n_embd * wkv_head_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
// corresponds to Mamba's ssm_states size
|
|
|
|
return ssm_d_state * ssm_d_inner;
|
|
|
|
}
|
2025-03-13 19:08:07 +02:00
|
|
|
|
2025-06-19 00:08:14 -05:00
|
|
|
bool llama_hparams::is_recurrent(uint32_t il) const {
|
|
|
|
return recurrent_layer_arr[il];
|
|
|
|
}
|
|
|
|
|
2025-06-20 10:14:14 +03:00
|
|
|
uint32_t llama_hparams::n_pos_per_embd() const {
|
|
|
|
return rope_type == LLAMA_ROPE_TYPE_MROPE ? 4 : 1;
|
|
|
|
}
|
|
|
|
|
2025-03-14 09:03:24 +02:00
|
|
|
bool llama_hparams::is_swa(uint32_t il) const {
|
2025-03-13 19:08:07 +02:00
|
|
|
if (il < n_layer) {
|
2025-05-23 17:07:04 +02:00
|
|
|
return swa_layers[il];
|
2025-03-13 19:08:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
GGML_ABORT("fatal error");
|
|
|
|
}
|