mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-29 20:45:04 +00:00
server: fix remove 'image_url'/'input_audio' json-object effectlly for 'llama_params' in multimodal-model-mode (#13853)
[fix]: remove 'image_url'/'input_audio' effectlly for 'llama_params' in multimodal-model-mode
This commit is contained in:
@ -573,7 +573,7 @@ struct oaicompat_parser_options {
|
|||||||
|
|
||||||
// used by /chat/completions endpoint
|
// used by /chat/completions endpoint
|
||||||
static json oaicompat_chat_params_parse(
|
static json oaicompat_chat_params_parse(
|
||||||
const json & body, /* openai api json semantics */
|
json & body, /* openai api json semantics */
|
||||||
const oaicompat_parser_options & opt,
|
const oaicompat_parser_options & opt,
|
||||||
std::vector<raw_buffer> & out_files)
|
std::vector<raw_buffer> & out_files)
|
||||||
{
|
{
|
||||||
@ -624,7 +624,7 @@ static json oaicompat_chat_params_parse(
|
|||||||
if (!body.contains("messages")) {
|
if (!body.contains("messages")) {
|
||||||
throw std::runtime_error("'messages' is required");
|
throw std::runtime_error("'messages' is required");
|
||||||
}
|
}
|
||||||
json messages = body.at("messages");
|
json & messages = body.at("messages");
|
||||||
if (!messages.is_array()) {
|
if (!messages.is_array()) {
|
||||||
throw std::runtime_error("Expected 'messages' to be an array");
|
throw std::runtime_error("Expected 'messages' to be an array");
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user