From aed4a8e980d76f246aaa83ba4f79e4f000c47f53 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Sun, 16 Feb 2025 11:36:50 +0100 Subject: [PATCH] fix server --- examples/server/server.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index caf412341..029bd9777 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -3088,7 +3088,7 @@ struct server_context { const bool need_embd = slot.task_type == SERVER_TASK_TYPE_EMBEDDING && llama_pooling_type(slot.ctx) == LLAMA_POOLING_TYPE_NONE; std::array seq_id = { slot.id }; - llama_batch_ext_add_text_token(batch.get(), prompt_tokens[slot.n_past], slot.n_past, seq_id.data(), seq_id.size(), true); + llama_batch_ext_add_text_token(batch.get(), prompt_tokens[slot.n_past], slot.n_past, seq_id.data(), seq_id.size(), need_embd); if (slot.params.cache_prompt) { slot.cache_tokens.push_back(prompt_tokens[slot.n_past]);