mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-07-02 05:15:47 +00:00
server : Add verbose output to OAI compatible chat endpoint. (#12246)
Add verbose output to server_task_result_cmpl_final::to_json_oaicompat_chat_stream, making it conform with server_task_result_cmpl_final::to_json_oaicompat_chat, as well as the other to_json methods.
This commit is contained in:
@ -830,6 +830,11 @@ struct server_task_result_cmpl_final : server_task_result {
|
|||||||
ret.push_back({"timings", timings.to_json()});
|
ret.push_back({"timings", timings.to_json()});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// extra fields for debugging purposes
|
||||||
|
if (verbose) {
|
||||||
|
ret["__verbose"] = to_json_non_oaicompat();
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
Reference in New Issue
Block a user