mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-08-29 11:39:14 -04:00
batched-bench : fix unified KV cache handling + pp timing (#15562)
* batched-bench : fix unified KV cache handling + pp timing * cont : run dummy token only with split KV cache
This commit is contained in:
@@ -124,7 +124,7 @@ int main(int argc, char ** argv) {
|
||||
const int tg = n_tg[i_tg];
|
||||
const int pl = n_pl[i_pl];
|
||||
|
||||
const int n_ctx_req = is_pp_shared ? pp + pl*tg : pl*(pp + tg);
|
||||
const int n_ctx_req = is_pp_shared ? (params.kv_unified ? pp : pl*pp) + pl*tg : pl*(pp + tg);
|
||||
|
||||
if (n_ctx_req > n_kv_max) {
|
||||
continue;
|
||||
@@ -147,13 +147,24 @@ int main(int argc, char ** argv) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
const auto t_pp_end = ggml_time_us();
|
||||
|
||||
if (is_pp_shared) {
|
||||
for (int32_t i = 1; i < pl; ++i) {
|
||||
llama_memory_seq_cp(mem, 0, i, -1, -1);
|
||||
}
|
||||
}
|
||||
|
||||
const auto t_pp_end = ggml_time_us();
|
||||
if (!params.kv_unified) {
|
||||
// run one dummy token to apply the memory copy
|
||||
common_batch_clear(batch);
|
||||
common_batch_add(batch, get_token_rand(), pp + 0, { 0 }, true);
|
||||
if (!decode_helper(ctx, batch, ctx_params.n_batch)) {
|
||||
LOG_ERR("%s: llama_decode() failed\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
llama_memory_seq_rm(mem, 0, pp, -1);
|
||||
}
|
||||
}
|
||||
|
||||
const auto t_tg_start = ggml_time_us();
|
||||
|
||||
|
Reference in New Issue
Block a user