context : add cache-less llama_context

ggml-ci
This commit is contained in:
Georgi Gerganov
2025-02-20 15:18:45 +02:00
parent 072280ea6b
commit b1554be1d7
8 changed files with 1073 additions and 355 deletions

View File

@ -952,7 +952,7 @@ struct common_init_result common_init_from_params(common_params & params) {
}
if (params.ctx_shift && !llama_kv_self_can_shift(lctx)) {
LOG_WRN("%s: KV cache shifting is not supported for this model, disabling KV cache shifting\n", __func__);
LOG_WRN("%s: KV cache shifting is not supported for this context, disabling KV cache shifting\n", __func__);
params.ctx_shift = false;
}

File diff suppressed because it is too large Load Diff

View File

@ -20,6 +20,7 @@ class llama_io_write_i;
using llama_loras = std::unordered_map<struct llama_adapter_lora *, float>;
// basic transformer without KV cache
struct llama_context : public llama_graph_i {
llama_context(
const llama_model & model,
@ -38,17 +39,19 @@ struct llama_context : public llama_graph_i {
virtual uint32_t n_ctx_per_seq() const;
virtual uint32_t n_batch() const;
virtual uint32_t n_ubatch() const;
virtual uint32_t n_seq_max() const = 0;
virtual uint32_t n_seq_max() const;
virtual uint32_t n_threads() const;
virtual uint32_t n_threads_batch() const;
virtual int32_t max_nodes() const;
virtual llama_kv_cache * get_kv_self() = 0;
virtual const llama_kv_cache * get_kv_self() const = 0;
// returns nullptr
virtual llama_kv_cache * get_kv_self();
virtual const llama_kv_cache * get_kv_self() const;
virtual void kv_self_update() = 0;
// noop
virtual void kv_self_update();
virtual enum llama_pooling_type pooling_type() const;
@ -109,8 +112,6 @@ struct llama_context : public llama_graph_i {
ggml_cgraph * gf,
bool batched);
virtual void input_set(const llama_ubatch & ubatch);
// Make sure enough space is available for outputs.
// Returns max number of outputs for which space was reserved.
virtual int32_t output_reserve(int32_t n_outputs);
@ -128,7 +129,7 @@ struct llama_context : public llama_graph_i {
// return positive int on warning
// return negative int on error
//
virtual int encode(llama_batch & inp_batch) = 0;
virtual int encode(llama_batch & inp_batch);
// decode a batch of tokens by evaluating the transformer
// in case of unsuccessful decoding (error or warning),
@ -142,7 +143,7 @@ struct llama_context : public llama_graph_i {
// return positive int on warning
// return negative int on error
//
virtual int decode(llama_batch & inp_batch) = 0;
virtual int decode(llama_batch & inp_batch);
//
// graph build API (generic)
@ -204,6 +205,31 @@ struct llama_context : public llama_graph_i {
ggml_context * ctx0,
int32_t n_tokens);
virtual void build_attn_inp(
ggml_context * ctx0,
int32_t n_tokens,
bool causal,
bool swa,
bool worst_case);
virtual ggml_tensor * build_attn(
ggml_context * ctx0,
ggml_cgraph * gf,
ggml_tensor * wo,
ggml_tensor * wo_b,
ggml_tensor * q_cur,
ggml_tensor * k_cur,
ggml_tensor * v_cur,
int32_t n_tokens,
float kq_scale,
int il,
bool worst_case);
// perf
virtual llama_perf_context_data perf_get_data() const;
virtual void perf_reset();
// state save/load
virtual size_t state_get_size();
@ -238,13 +264,7 @@ struct llama_context : public llama_graph_i {
const llama_token * tokens,
size_t n_token_count);
// perf
virtual llama_perf_context_data perf_get_data() const;
virtual void perf_reset();
protected:
// state save/load
virtual size_t state_get_data(llama_io_write_i & io);
@ -253,14 +273,21 @@ protected:
virtual size_t state_seq_get_data(llama_io_write_i & io, llama_seq_id seq_id);
virtual size_t state_seq_set_data(llama_io_read_i & io, llama_seq_id seq_id);
// input tensors
// input
struct ggml_tensor * inp_tokens; // I32 [n_batch]
struct ggml_tensor * inp_embd; // F32 [n_embd, n_batch]
struct ggml_tensor * inp_pos; // I32 [n_batch]
struct ggml_tensor * inp_out_ids; // I32 [n_outputs]
struct ggml_tensor * inp_mean; // F32 [n_batch, n_batch]
struct ggml_tensor * inp_cls; // I32 [n_batch]
virtual void input_set(const llama_ubatch & ubatch);
// base input tensors
ggml_tensor * inp_tokens; // I32 [n_batch]
ggml_tensor * inp_embd; // F32 [n_embd, n_batch]
ggml_tensor * inp_pos; // I32 [n_batch]
ggml_tensor * inp_out_ids; // I32 [n_outputs]
ggml_tensor * inp_mean; // F32 [n_batch, n_batch]
ggml_tensor * inp_cls; // I32 [n_batch]
// KQ mask input tensors
ggml_tensor * inp_kq_mask; // F32 [n_tokens, n_batch]
ggml_tensor * inp_kq_mask_cnv; // [n_tokens, n_batch]
// members
@ -337,8 +364,6 @@ public:
virtual ~llama_context_kv_self();
virtual uint32_t n_seq_max() const override;
virtual llama_kv_cache * get_kv_self() override;
virtual const llama_kv_cache * get_kv_self() const override;
@ -346,8 +371,6 @@ public:
virtual ggml_cgraph * graph_init() override;
virtual void input_set(const llama_ubatch & ubatch) override;
virtual int encode(llama_batch & inp_batch) override;
virtual int decode(llama_batch & inp_batch) override;
@ -357,17 +380,7 @@ public:
// certain implementations could require a padding for the context size
uint32_t get_ctx_padding(const llama_cparams & cparams) const;
// === KV cache ===
llama_kv_cache kv_self;
ggml_tensor * inp_KQ_mask; // F32 [kv_size, n_batch]
ggml_tensor * inp_KQ_mask_cnv; // [kv_size, n_batch]
ggml_tensor * inp_KQ_mask_swa; // F32 [kv_size, n_batch]
ggml_tensor * inp_KQ_mask_swa_cnv; // [kv_size, n_batch]
ggml_tensor * inp_k_shift; // I32 [kv_size]
virtual ggml_tensor * build_inp_k_shift(ggml_context * ctx0) override;
virtual ggml_tensor * build_inp_self_k_shift(ggml_context * ctx0) override;
virtual void build_attn_inp(
ggml_context * ctx0,
@ -389,11 +402,6 @@ public:
int il,
bool worst_case) override;
virtual ggml_tensor * build_attn_soft_max(
ggml_context * ctx0,
ggml_tensor * kq,
float kq_scale) override;
virtual void build_kv_self_shift(
ggml_context * ctx0,
ggml_cgraph * gf) override;
@ -414,14 +422,14 @@ public:
struct ggml_tensor * inp_embd_enc; // F32 [n_embd, n_outputs_enc]
struct ggml_tensor * inp_pos_bucket; // I32 [n_batch|n_kv, n_batch]
struct ggml_tensor * inp_KQ_mask_cross; // F32 [n_outputs_enc, n_batch]
struct ggml_tensor * inp_kq_mask_cross; // F32 [n_outputs_enc, n_batch]
virtual ggml_tensor * build_inp_embd_enc(
ggml_context * ctx0,
int32_t n_tokens,
bool worst_case) override;
virtual ggml_tensor * build_inp_KQ_mask_cross(
virtual ggml_tensor * build_inp_kq_mask_cross(
ggml_context * ctx0,
int32_t n_tokens,
bool worst_case) override;
@ -432,6 +440,16 @@ protected:
virtual size_t state_seq_get_data(llama_io_write_i & io, llama_seq_id seq_id) override;
virtual size_t state_seq_set_data(llama_io_read_i & io, llama_seq_id seq_id) override;
virtual void input_set(const llama_ubatch & ubatch) override;
llama_kv_cache kv_self;
ggml_tensor * inp_self_kq_mask; // F32 [kv_size, n_batch]
ggml_tensor * inp_self_kq_mask_cnv; // [kv_size, n_batch]
ggml_tensor * inp_self_kq_mask_swa; // F32 [kv_size, n_batch]
ggml_tensor * inp_self_kq_mask_swa_cnv; // [kv_size, n_batch]
ggml_tensor * inp_self_k_shift; // I32 [kv_size]
};
// a recurrent transformer (ie.e RWKV, Mamba)
@ -447,8 +465,6 @@ public:
virtual ggml_cgraph * graph_init() override;
virtual void input_set(const llama_ubatch & ubatch) override;
virtual ggml_tensor * build_inp_s_copy(
ggml_context * ctx0,
bool worst_case) override;
@ -506,6 +522,8 @@ public:
bool worst_case) override;
protected:
virtual void input_set(const llama_ubatch & ubatch) override;
struct ggml_tensor * inp_s_copy; // I32 [kv_size]
struct ggml_tensor * inp_s_mask; // F32 [1, n_kv]

View File

@ -2,6 +2,84 @@
#include "llama-impl.h"
ggml_tensor * llama_graph_i::build_attn(
ggml_context * ctx0,
ggml_cgraph * gf,
ggml_tensor * wo,
ggml_tensor * wo_b,
ggml_tensor * q_cur,
ggml_tensor * k_cur,
ggml_tensor * v_cur,
int32_t n_tokens,
float kq_scale,
int il,
bool worst_case) {
GGML_UNUSED(ctx0);
GGML_UNUSED(gf);
GGML_UNUSED(wo);
GGML_UNUSED(wo_b);
GGML_UNUSED(q_cur);
GGML_UNUSED(k_cur);
GGML_UNUSED(v_cur);
GGML_UNUSED(n_tokens);
GGML_UNUSED(kq_scale);
GGML_UNUSED(il);
GGML_UNUSED(worst_case);
LLAMA_LOG_ERROR("%s: not implemented\n", __func__);
return nullptr;
}
void llama_graph_i::build_kv_self_shift(
ggml_context * ctx0,
ggml_cgraph * gf) {
GGML_UNUSED(ctx0);
GGML_UNUSED(gf);
LLAMA_LOG_ERROR("%s: not implemented\n", __func__);
}
void llama_graph_i::build_kv_self_defrag(
ggml_context * ctx0,
ggml_cgraph * gf) {
GGML_UNUSED(ctx0);
GGML_UNUSED(gf);
LLAMA_LOG_ERROR("%s: not implemented\n", __func__);
}
ggml_tensor * llama_graph_i::build_inp_self_k_shift(
ggml_context * ctx0) {
GGML_UNUSED(ctx0);
LLAMA_LOG_ERROR("%s: not implemented\n", __func__);
return nullptr;
}
ggml_tensor * llama_graph_i::build_inp_embd_enc(
ggml_context * ctx0,
int32_t n_tokens,
bool worst_case) {
GGML_UNUSED(ctx0);
GGML_UNUSED(n_tokens);
GGML_UNUSED(worst_case);
LLAMA_LOG_ERROR("%s: not implemented\n", __func__);
return nullptr;
}
ggml_tensor * llama_graph_i::build_inp_kq_mask_cross(
ggml_context * ctx0,
int32_t n_tokens,
bool worst_case) {
GGML_UNUSED(ctx0);
GGML_UNUSED(n_tokens);
GGML_UNUSED(worst_case);
LLAMA_LOG_ERROR("%s: not implemented\n", __func__);
return nullptr;
}
ggml_tensor * llama_graph_i::build_inp_s_copy (
ggml_context * ctx0,
bool worst_case) {

View File

@ -99,34 +99,29 @@ public:
int32_t n_tokens,
float kq_scale,
int il,
bool worst_case) = 0;
virtual ggml_tensor * build_attn_soft_max(
ggml_context * ctx0,
ggml_tensor * kq,
float kq_scale) = 0;
bool worst_case);
virtual void build_kv_self_shift(
ggml_context * ctx0,
ggml_cgraph * gf) = 0;
ggml_cgraph * gf);
// find holes from the beginning of the KV cache and fill them by moving data from the end of the cache
virtual void build_kv_self_defrag(
ggml_context * ctx0,
ggml_cgraph * gf) = 0;
ggml_cgraph * gf);
virtual ggml_tensor * build_inp_k_shift(
ggml_context * ctx0) = 0;
virtual ggml_tensor * build_inp_self_k_shift(
ggml_context * ctx0);
virtual ggml_tensor * build_inp_embd_enc(
ggml_context * ctx0,
int32_t n_tokens,
bool worst_case) = 0;
bool worst_case);
virtual ggml_tensor * build_inp_KQ_mask_cross(
virtual ggml_tensor * build_inp_kq_mask_cross(
ggml_context * ctx0,
int32_t n_tokens,
bool worst_case) = 0;
bool worst_case);
virtual ggml_tensor * build_inp_s_copy(
ggml_context * ctx0,

View File

@ -1079,14 +1079,26 @@ bool llama_kv_cache::state_read_data(llama_io_read_i & io, uint32_t cell_count)
//
int32_t llama_kv_cache_n_tokens(const llama_kv_cache * kv) {
if (!kv) {
return 0;
}
return kv->n_tokens();
}
int32_t llama_kv_cache_used_cells(const llama_kv_cache * kv) {
if (!kv) {
return 0;
}
return kv->used;
}
void llama_kv_cache_clear(llama_kv_cache * kv) {
if (!kv) {
return;
}
kv->clear();
}
@ -1095,6 +1107,10 @@ bool llama_kv_cache_seq_rm(
llama_seq_id seq_id,
llama_pos p0,
llama_pos p1) {
if (!kv) {
return true;
}
return kv->seq_rm(seq_id, p0, p1);
}
@ -1104,10 +1120,18 @@ void llama_kv_cache_seq_cp(
llama_seq_id seq_id_dst,
llama_pos p0,
llama_pos p1) {
if (!kv) {
return;
}
kv->seq_cp(seq_id_src, seq_id_dst, p0, p1);
}
void llama_kv_cache_seq_keep(llama_kv_cache * kv, llama_seq_id seq_id) {
if (!kv) {
return;
}
kv->seq_keep(seq_id);
}
@ -1117,6 +1141,10 @@ void llama_kv_cache_seq_add(
llama_pos p0,
llama_pos p1,
llama_pos delta) {
if (!kv) {
return;
}
kv->seq_add(seq_id, p0, p1, delta);
}
@ -1126,18 +1154,34 @@ void llama_kv_cache_seq_div(
llama_pos p0,
llama_pos p1,
int d) {
if (!kv) {
return;
}
kv->seq_div(seq_id, p0, p1, d);
}
llama_pos llama_kv_cache_seq_pos_max(llama_kv_cache * kv, llama_seq_id seq_id) {
if (!kv) {
return 0;
}
return kv->seq_pos_max(seq_id);
}
void llama_kv_cache_defrag(llama_kv_cache * kv) {
if (!kv) {
return;
}
kv->defrag();
}
bool llama_kv_cache_can_shift(const llama_kv_cache * kv) {
if (!kv) {
return false;
}
return kv->can_shift;
}

View File

@ -3956,8 +3956,8 @@ struct llm_build_context {
}
// TODO: tmp
struct ggml_tensor * build_inp_KQ_mask_cross() {
ggml_tensor * cur = lgf->build_inp_KQ_mask_cross(ctx0, n_tokens, worst_case);
struct ggml_tensor * build_inp_kq_mask_cross() {
ggml_tensor * cur = lgf->build_inp_kq_mask_cross(ctx0, n_tokens, worst_case);
cb(cur, "KQ_mask_cross", -1);
return cur;
@ -5568,7 +5568,6 @@ struct llm_build_context {
// self-attention
if (model.arch == LLM_ARCH_BERT || model.arch == LLM_ARCH_JINA_BERT_V2) {
Qcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wq, cur), model.layers[il].bq);
cb(Qcur, "Qcur", il);
if (model.layers[il].attn_q_norm) {
Qcur = build_norm(Qcur,
@ -5578,7 +5577,6 @@ struct llm_build_context {
}
Kcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wk, cur), model.layers[il].bk);
cb(Kcur, "Kcur", il);
if (model.layers[il].attn_k_norm) {
Kcur = build_norm(Kcur,
@ -5586,11 +5584,12 @@ struct llm_build_context {
model.layers[il].attn_k_norm_b,
LLM_NORM, il);
}
Vcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wv, cur), model.layers[il].bv);
cb(Vcur, "Vcur", il);
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
} else {
// compute Q and K and RoPE them
cur = build_lora_mm(model.layers[il].wqkv, cur);
@ -5600,10 +5599,6 @@ struct llm_build_context {
Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
cb(Qcur, "Qcur", il);
cb(Kcur, "Kcur", il);
cb(Vcur, "Vcur", il);
Qcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
@ -5617,40 +5612,17 @@ struct llm_build_context {
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Kcur, "Kcur", il);
Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
}
struct ggml_tensor * q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3);
struct ggml_tensor * k = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 0, 2, 1, 3));
cb(Qcur, "Qcur", il);
cb(Kcur, "Kcur", il);
cb(Vcur, "Vcur", il);
struct ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
cb(kq, "kq", il);
//kq = ggml_soft_max_ext(ctx0, kq, KQ_mask, 1.0f/sqrtf(float(n_embd_head)), hparams.f_max_alibi_bias);
kq = lgf->build_attn_soft_max(ctx0, kq, 1.0f/sqrtf(float(n_embd_head)));
cb(kq, "kq_soft_max_ext", il);
struct ggml_tensor * v = ggml_cont(ctx0, ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcur, n_embd_gqa, n_tokens)));
cb(v, "v", il);
struct ggml_tensor * kqv = ggml_mul_mat(ctx0, ggml_reshape_3d(ctx0, v, n_tokens, n_embd_head, n_head_kv), kq);
cb(kqv, "kqv", il);
struct ggml_tensor * kqv_merged = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
cb(kqv_merged, "kqv_merged", il);
cur = ggml_cont_2d(ctx0, kqv_merged, n_embd_gqa, n_tokens);
cb(cur, "kqv_merged_cont", il);
ggml_build_forward_expand(gf, cur);
cur = build_lora_mm(model.layers[il].wo, cur);
if (model.layers[il].bo) {
cb(cur, "kqv_wo", il);
}
if (model.layers[il].bo) {
cur = ggml_add(ctx0, cur, model.layers[il].bo);
}
cur = build_attn(gf,
model.layers[il].wo, model.layers[il].bo,
Qcur, Kcur, Vcur, n_tokens, 1.0f/sqrtf(float(n_embd_head)), il);
cb(cur, "kqv_out", il);
if (il == n_layer - 1 && pooling_type == LLAMA_POOLING_TYPE_NONE) {
@ -9652,7 +9624,7 @@ struct llm_build_context {
// struct ggml_tensor * pos_bucket_enc = build_pos_bucket(false);
// // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
// struct ggml_tensor * KQ_mask_enc = build_inp_KQ_mask(false);
// struct ggml_tensor * KQ_mask_enc = build_inp_kq_mask(false);
// for (int il = 0; il < n_layer; ++il) {
// struct ggml_tensor * inpSA = inpL;
@ -9781,8 +9753,8 @@ struct llm_build_context {
// struct ggml_tensor * embd_enc = build_inp_embd_enc();
// struct ggml_tensor * pos_bucket_dec = build_pos_bucket(true);
// struct ggml_tensor * KQ_mask_dec = build_inp_KQ_mask();
// struct ggml_tensor * KQ_mask_cross = build_inp_KQ_mask_cross();
// struct ggml_tensor * KQ_mask_dec = build_inp_kq_mask();
// struct ggml_tensor * KQ_mask_cross = build_inp_kq_mask_cross();
// for (int il = 0; il < n_layer; ++il) {
// struct ggml_tensor * inpSA = inpL;

View File

@ -328,6 +328,11 @@ struct llama_context * llama_init_from_model(
try {
// TODO: make static method of llama_context
switch (model->arch) {
case LLM_ARCH_BERT:
case LLM_ARCH_JINA_BERT_V2:
case LLM_ARCH_NOMIC_BERT:
ctx = new llama_context(*model, params);
break;
case LLM_ARCH_RWKV6:
case LLM_ARCH_RWKV6QWEN2:
case LLM_ARCH_MAMBA: