mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-08-04 00:08:38 -04:00
* kv-cache : prepare K/V buffers for separation ggml-ci * batched-bench : fix oob write ggml-ci * llama : add "virtual sequences" ggml-ci * llama : use "stream" vs "virtual sequence" ggml-ci * graph : fix stream splitting when KV cache is not used ggml-ci * kv-cache : add multi-stream save/load support ggml-ci * llama : add "--attn-streams" flag ggml-ci * kv-cache : fix handling when find_slot fails ggml-ci * kv-cache : restore find_slot impl ggml-ci * kv-cache : add comments * kv-cache : add bounds checks for sequence id ggml-ci * cont : add n_seq_max to batch allocr ggml-ci * kv-cache : perform stream copies lazily after llama_synchronize ggml-ci * kv-cache : avoid throwing exceptions across the C boundary ggml-ci * CUDA: 4D FlashAttention support (#14628) * CUDA: 4D FlashAttention support * CUDA: fix WMMA FA kernel * llama : rename attn_streams -> kv_unified ggml-ci * common : rename kv_split -> kv_unified ggml-ci --------- Co-authored-by: Johannes Gäßler <johannesg@5d6.de>
150 lines
3.4 KiB
C++
150 lines
3.4 KiB
C++
#include "llama-hparams.h"
|
|
|
|
#include "ggml.h"
|
|
|
|
void llama_hparams::set_swa_pattern(uint32_t n_pattern) {
|
|
for (uint32_t il = 0; il < n_layer; ++il) {
|
|
swa_layers[il] = n_pattern == 0 || (il % n_pattern < (n_pattern - 1));
|
|
}
|
|
}
|
|
|
|
bool llama_hparams::is_swa_any() const {
|
|
for (uint32_t il = 0; il < n_layer; ++il) {
|
|
if (swa_layers[il]) {
|
|
return true;
|
|
}
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
uint32_t llama_hparams::n_head(uint32_t il) const {
|
|
if (il < n_layer) {
|
|
return n_head_arr[il];
|
|
}
|
|
|
|
GGML_ABORT("fatal error");
|
|
}
|
|
|
|
uint32_t llama_hparams::n_head_kv(uint32_t il) const {
|
|
if (il < n_layer) {
|
|
return n_head_kv_arr[il];
|
|
}
|
|
|
|
GGML_ABORT("fatal error");
|
|
}
|
|
|
|
uint32_t llama_hparams::n_ff(uint32_t il) const {
|
|
if (il < n_layer) {
|
|
return n_ff_arr[il];
|
|
}
|
|
|
|
GGML_ABORT("fatal error");
|
|
}
|
|
|
|
uint32_t llama_hparams::n_gqa(uint32_t il) const {
|
|
const uint32_t n_head = this->n_head(il);
|
|
const uint32_t n_head_kv = this->n_head_kv(il);
|
|
|
|
if (n_head_kv == 0) {
|
|
return 0;
|
|
}
|
|
|
|
return n_head/n_head_kv;
|
|
}
|
|
|
|
uint32_t llama_hparams::n_embd_k_gqa(uint32_t il) const {
|
|
const uint32_t n_head_kv = this->n_head_kv(il);
|
|
|
|
return n_embd_head_k * n_head_kv;
|
|
}
|
|
|
|
uint32_t llama_hparams::n_embd_v_gqa(uint32_t il) const {
|
|
const uint32_t n_head_kv = this->n_head_kv(il);
|
|
|
|
return n_embd_head_v * n_head_kv;
|
|
}
|
|
|
|
bool llama_hparams::is_n_embd_k_gqa_variable() const {
|
|
const uint32_t val = n_embd_k_gqa();
|
|
for (uint32_t il = 0; il < n_layer; ++il) {
|
|
if (val != n_embd_k_gqa(il)) {
|
|
return true;
|
|
}
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
bool llama_hparams::is_n_embd_v_gqa_variable() const {
|
|
const uint32_t val = n_embd_v_gqa();
|
|
for (uint32_t il = 0; il < n_layer; ++il) {
|
|
if (val != n_embd_v_gqa(il)) {
|
|
return true;
|
|
}
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
uint32_t llama_hparams::n_embd_k_gqa_max() const {
|
|
uint32_t val = n_embd_k_gqa();
|
|
for (uint32_t il = 0; il < n_layer; ++il) {
|
|
val = std::max(val, n_embd_k_gqa(il));
|
|
}
|
|
|
|
return val;
|
|
}
|
|
|
|
uint32_t llama_hparams::n_embd_v_gqa_max() const {
|
|
uint32_t val = n_embd_v_gqa();
|
|
for (uint32_t il = 0; il < n_layer; ++il) {
|
|
val = std::max(val, n_embd_v_gqa(il));
|
|
}
|
|
|
|
return val;
|
|
}
|
|
|
|
uint32_t llama_hparams::n_embd_r() const {
|
|
if (wkv_head_size != 0) {
|
|
// for RWKV models
|
|
return token_shift_count * n_embd;
|
|
}
|
|
|
|
if (n_shortconv_l_cache != 0) {
|
|
// for LFM2 models
|
|
return n_embd * (n_shortconv_l_cache - 1);
|
|
}
|
|
|
|
// TODO: maybe support other convolution strides than 1
|
|
// NOTE: since the first column of the conv_state is shifted out each time, it's not actually needed
|
|
// Corresponds to Mamba's conv_states size
|
|
return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * (ssm_d_inner + 2*ssm_n_group*ssm_d_state);
|
|
}
|
|
|
|
uint32_t llama_hparams::n_embd_s() const {
|
|
if (wkv_head_size != 0) {
|
|
// corresponds to RWKV's wkv_states size
|
|
return n_embd * wkv_head_size;
|
|
}
|
|
|
|
// corresponds to Mamba's ssm_states size
|
|
return ssm_d_state * ssm_d_inner;
|
|
}
|
|
|
|
bool llama_hparams::is_recurrent(uint32_t il) const {
|
|
return recurrent_layer_arr[il];
|
|
}
|
|
|
|
uint32_t llama_hparams::n_pos_per_embd() const {
|
|
return rope_type == LLAMA_ROPE_TYPE_MROPE ? 4 : 1;
|
|
}
|
|
|
|
bool llama_hparams::is_swa(uint32_t il) const {
|
|
if (il < n_layer) {
|
|
return swa_layers[il];
|
|
}
|
|
|
|
GGML_ABORT("fatal error");
|
|
}
|