tools : tmp adjustments (TMP)

ggml-ci
This commit is contained in:
Georgi Gerganov
2025-06-24 15:02:58 +03:00
parent 52b9007176
commit 132143938f
2 changed files with 15 additions and 9 deletions

View File

@ -235,7 +235,7 @@ int main(int argc, char ** argv) {
// the max batch size is as large as the context to handle cases where we get very long input prompt from multiple
// users. regardless of the size, the main loop will chunk the batch into a maximum of params.n_batch tokens at a time
llama_batch batch = llama_batch_init(n_ctx, 0, 1);
llama_batch batch = llama_batch_init(n_ctx*n_clients, 0, 1);
int32_t n_total_prompt = 0;
int32_t n_total_gen = 0;
@ -289,8 +289,11 @@ int main(int argc, char ** argv) {
// all sequences have ended - clear the entire KV cache
for (int i = 1; i <= n_clients; ++i) {
llama_memory_seq_rm(mem, i, -1, -1);
// but keep the system prompt
llama_memory_seq_cp(mem, 0, i, -1, -1);
if (is_sp_shared) {
// but keep the system prompt
llama_memory_seq_cp(mem, 0, i, -1, -1);
}
}
LOG_INF("%s: clearing the KV cache\n", __func__);
@ -449,8 +452,11 @@ int main(int argc, char ** argv) {
}
// delete only the generated part of the sequence, i.e. keep the system prompt in the cache
llama_memory_seq_rm(mem, client.id + 1, -1, -1);
llama_memory_seq_cp(mem, 0, client.id + 1, -1, -1);
llama_memory_seq_rm(mem, client.id + 1, -1, -1);
if (is_sp_shared) {
llama_memory_seq_cp(mem, 0, client.id + 1, -1, -1);
}
const auto t_main_end = ggml_time_us();