mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-29 04:35:05 +00:00
server: handle echo=false on /v1/completions (#12060)
This commit is contained in:
@ -521,8 +521,13 @@ static json oaicompat_completion_params_parse(const json & body) {
|
|||||||
throw std::runtime_error("Only one completion choice is allowed");
|
throw std::runtime_error("Only one completion choice is allowed");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Handle "echo" field
|
||||||
|
if (json_value(body, "echo", false)) {
|
||||||
|
throw std::runtime_error("Only no echo is supported");
|
||||||
|
}
|
||||||
|
|
||||||
// Params supported by OAI but unsupported by llama.cpp
|
// Params supported by OAI but unsupported by llama.cpp
|
||||||
static const std::vector<std::string> unsupported_params { "best_of", "echo", "suffix" };
|
static const std::vector<std::string> unsupported_params { "best_of", "suffix" };
|
||||||
for (const auto & param : unsupported_params) {
|
for (const auto & param : unsupported_params) {
|
||||||
if (body.contains(param)) {
|
if (body.contains(param)) {
|
||||||
throw std::runtime_error("Unsupported param: " + param);
|
throw std::runtime_error("Unsupported param: " + param);
|
||||||
|
Reference in New Issue
Block a user