mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-28 12:25:03 +00:00
server : proper error handling for missing elements in messages array (OpenAI compatible backend) (#13540)
This commit is contained in:
committed by
GitHub
parent
b2838049cc
commit
c753d7bed0
@ -643,6 +643,18 @@ static json oaicompat_completion_params_parse(
|
|||||||
throw std::runtime_error("Expected 'messages' to be an array");
|
throw std::runtime_error("Expected 'messages' to be an array");
|
||||||
}
|
}
|
||||||
for (auto & msg : messages) {
|
for (auto & msg : messages) {
|
||||||
|
std::string role = json_value(msg, "role", std::string());
|
||||||
|
if (role != "assistant" && !msg.contains("content")) {
|
||||||
|
throw std::runtime_error("All non-assistant messages must contain 'content'");
|
||||||
|
}
|
||||||
|
if (role == "assistant") {
|
||||||
|
if (!msg.contains("content") && !msg.contains("tool_calls")) {
|
||||||
|
throw std::runtime_error("Assistant message must contain either 'content' or 'tool_calls'!");
|
||||||
|
}
|
||||||
|
if (!msg.contains("content")) {
|
||||||
|
continue; // avoid errors with no content
|
||||||
|
}
|
||||||
|
}
|
||||||
json & content = msg.at("content");
|
json & content = msg.at("content");
|
||||||
if (content.is_string() || content.is_null()) {
|
if (content.is_string() || content.is_null()) {
|
||||||
continue;
|
continue;
|
||||||
|
Reference in New Issue
Block a user