mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-27 03:55:20 +00:00
llama : support optional tensors (#4283)
This commit is contained in:
@ -1469,7 +1469,7 @@ struct llama_server_context
|
||||
|
||||
int split_multiprompt_task(task_server& multiprompt_task)
|
||||
{
|
||||
auto prompt_count = multiprompt_task.data.at("prompt").size();
|
||||
int prompt_count = multiprompt_task.data.at("prompt").size();
|
||||
assert(prompt_count > 1);
|
||||
|
||||
int multitask_id = id_gen++;
|
||||
|
Reference in New Issue
Block a user