mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-07-18 08:37:43 +00:00
context : add get_ctx_padding()
ggml-ci
This commit is contained in:
@ -64,6 +64,10 @@ llama_pos llama_context::pos_max() const {
|
||||
return kv_self.pos_max();
|
||||
}
|
||||
|
||||
uint32_t llama_context::get_ctx_padding(const llama_cparams & cparams) const {
|
||||
return kv_self.get_padding(cparams);
|
||||
}
|
||||
|
||||
// TODO: improve
|
||||
void llama_context::reset() {
|
||||
inp_tokens = nullptr;
|
||||
|
Reference in New Issue
Block a user