llama : use "stream" vs "virtual sequence"

ggml-ci
This commit is contained in:
Georgi Gerganov
2025-07-04 12:55:43 +03:00
parent 38479e2642
commit 7b00429295
11 changed files with 197 additions and 177 deletions

View File

@ -61,7 +61,7 @@ int main(int argc, char ** argv) {
const int32_t n_kv_max = llama_n_ctx(ctx);
llama_batch batch = llama_batch_init(n_kv_max*8, 0, 1); // TODO: tmp!!!
llama_batch batch = llama_batch_init(n_kv_max, 0, 1);
// decode in batches of ctx_params.n_batch tokens
auto decode_helper = [](llama_context * ctx, llama_batch & batch, int32_t n_batch) {
@ -119,22 +119,18 @@ int main(int argc, char ** argv) {
const int n_ctx_req = is_pp_shared ? pp + pl*tg : pl*(pp + tg);
//if (n_ctx_req > n_kv_max) {
// continue;
//}
if (n_ctx_req > n_kv_max) {
continue;
}
common_batch_clear(batch);
for (int j = 0; j < (is_pp_shared ? 1 : pl); ++j) {
for (int i = 0; i < pp; ++i) {
common_batch_add(batch, 0, i, { j }, false);
common_batch_add(batch, 0, i, { j }, i == pp - 1);
}
}
if (batch.n_tokens > 0) {
batch.logits[batch.n_tokens - 1] = true;
}
const auto t_pp_start = ggml_time_us();
llama_memory_clear(mem, false);