mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-26 19:55:04 +00:00
llama : better rwkv chat template and add missing inputs.use_jinja
setting (#14336)
* llama-cli : add missing `inputs.use_jinja` setting Signed-off-by: Molly Sophia <mollysophia379@gmail.com> * llama : better legacy chat template for rwkv Signed-off-by: Molly Sophia <mollysophia379@gmail.com> --------- Signed-off-by: Molly Sophia <mollysophia379@gmail.com>
This commit is contained in:
@ -528,12 +528,17 @@ int32_t llm_chat_apply_template(
|
||||
}
|
||||
} else if (tmpl == LLM_CHAT_TEMPLATE_RWKV_WORLD) {
|
||||
// this template requires the model to have "\n\n" as EOT token
|
||||
for (auto message : chat) {
|
||||
std::string role(message->role);
|
||||
if (role == "user") {
|
||||
ss << "User: " << message->content << "\n\nAssistant:";
|
||||
} else {
|
||||
ss << message->content << "\n\n";
|
||||
for (size_t i = 0; i < chat.size(); i++) {
|
||||
std::string role(chat[i]->role);
|
||||
if (role == "system") {
|
||||
ss << "System: " << trim(chat[i]->content) << "\n\n";
|
||||
} else if (role == "user") {
|
||||
ss << "User: " << trim(chat[i]->content) << "\n\n";
|
||||
if (i == chat.size() - 1) {
|
||||
ss << "Assistant:";
|
||||
}
|
||||
} else if (role == "assistant") {
|
||||
ss << "Assistant: " << trim(chat[i]->content) << "\n\n";
|
||||
}
|
||||
}
|
||||
} else if (tmpl == LLM_CHAT_TEMPLATE_GRANITE) {
|
||||
|
@ -292,6 +292,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
if (!params.system_prompt.empty() || !params.prompt.empty()) {
|
||||
common_chat_templates_inputs inputs;
|
||||
inputs.use_jinja = g_params->use_jinja;
|
||||
inputs.messages = chat_msgs;
|
||||
inputs.add_generation_prompt = !params.prompt.empty();
|
||||
|
||||
|
Reference in New Issue
Block a user