mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-07-27 19:53:42 -04:00
server : allow setting --reverse-prompt
arg (#14799)
Signed-off-by: Molly Sophia <mollysophia379@gmail.com>
This commit is contained in:
@ -1612,7 +1612,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
|
||||
[](common_params & params, const std::string & value) {
|
||||
params.antiprompt.emplace_back(value);
|
||||
}
|
||||
).set_examples({LLAMA_EXAMPLE_MAIN}));
|
||||
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER}));
|
||||
add_opt(common_arg(
|
||||
{"-sp", "--special"},
|
||||
string_format("special tokens output enabled (default: %s)", params.special ? "true" : "false"),
|
||||
|
@ -253,6 +253,7 @@ struct server_task {
|
||||
defaults.sampling = params_base.sampling;
|
||||
defaults.speculative = params_base.speculative;
|
||||
defaults.n_keep = params_base.n_keep;
|
||||
defaults.antiprompt = params_base.antiprompt;
|
||||
|
||||
// enabling this will output extra debug information in the HTTP responses from the server
|
||||
params.verbose = params_base.verbosity > 9;
|
||||
@ -490,6 +491,10 @@ struct server_task {
|
||||
}
|
||||
}
|
||||
}
|
||||
// set reverse prompt from cli args if not set in the request
|
||||
if (params.antiprompt.empty()) {
|
||||
params.antiprompt = defaults.antiprompt;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
|
Reference in New Issue
Block a user