mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-08-20 06:36:48 -04:00
Add chatml fallback for cpp llama_chat_apply_template
(#8160)
* add chatml fallback for cpp `llama_chat_apply_template` * remove redundant code
This commit is contained in:
@@ -380,6 +380,8 @@ struct llama_chat_msg {
|
||||
bool llama_chat_verify_template(const std::string & tmpl);
|
||||
|
||||
// CPP wrapper for llama_chat_apply_template
|
||||
// If the built-in template is not supported, we default to chatml
|
||||
// If the custom "tmpl" is not supported, we throw an error
|
||||
std::string llama_chat_apply_template(const struct llama_model * model,
|
||||
const std::string & tmpl,
|
||||
const std::vector<llama_chat_msg> & chat,
|
||||
|
Reference in New Issue
Block a user