mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-28 20:25:20 +00:00
llama : expose llama_model_n_head_kv in the API (#11997)
It's useful to be able to have this from the library layer as it's a key parameter of the model (e.g. to figure out how much KV cache memory is needed).
This commit is contained in:
@ -3838,6 +3838,10 @@ int32_t llama_model_n_head(const struct llama_model * model) {
|
||||
return model->hparams.n_head();
|
||||
}
|
||||
|
||||
int32_t llama_model_n_head_kv(const struct llama_model * model) {
|
||||
return model->hparams.n_head_kv();
|
||||
}
|
||||
|
||||
// deprecated
|
||||
int32_t llama_n_ctx_train(const struct llama_model * model) {
|
||||
return llama_model_n_ctx_train(model);
|
||||
|
Reference in New Issue
Block a user