examples : allow extracting embeddings from decoder contexts (#13797)

ggml-ci
This commit is contained in:
Georgi Gerganov
2025-05-26 14:03:54 +03:00
committed by GitHub
parent 22229314fc
commit 79c137f776
4 changed files with 10 additions and 16 deletions

View File

@ -41,8 +41,8 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu
// run model
LOG_INF("%s: n_tokens = %d, n_seq = %d\n", __func__, batch.n_tokens, n_seq);
if (llama_encode(ctx, batch) < 0) {
LOG_ERR("%s : failed to encode\n", __func__);
if (llama_decode(ctx, batch) < 0) {
LOG_ERR("%s : failed to process\n", __func__);
}
for (int i = 0; i < batch.n_tokens; i++) {