mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-28 20:25:20 +00:00
server : do not speculate during prompt processing
ggml-ci
This commit is contained in:
@ -2322,6 +2322,10 @@ struct server_context {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (slot.state != SLOT_STATE_GENERATING) {
|
||||
continue;
|
||||
}
|
||||
|
||||
llama_token id = slot.sampled;
|
||||
|
||||
struct common_speculative_params params_spec;
|
||||
|
Reference in New Issue
Block a user