llama : use n_swa + n_ubatch cells for SWA cache (#13833)

* llama : use n_swa + n_ubatch cells for SWA cache

ggml-ci

* llama : add warning about multi-sqeuence SWA contexts
This commit is contained in:
Georgi Gerganov
2025-05-31 15:57:44 +03:00
committed by GitHub
parent c7e0a2054b
commit 3600cc2886
6 changed files with 24 additions and 11 deletions

View File

@ -13230,7 +13230,7 @@ llama_memory_i * llama_model::create_memory(const llama_memory_params & params,
params.swa_full,
cparams.n_ctx,
cparams.n_seq_max,
cparams.n_batch,
cparams.n_ubatch,
padding);
} else {
GGML_ASSERT(!hparams.is_swa_any());
@ -13593,6 +13593,10 @@ int32_t llama_model_n_head_kv(const llama_model * model) {
return model->hparams.n_head_kv();
}
int32_t llama_model_n_swa(const llama_model * model) {
return model->hparams.n_swa;
}
// deprecated
int32_t llama_n_ctx_train(const llama_model * model) {
return llama_model_n_ctx_train(model);