mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-27 03:55:20 +00:00
llama : fix llama_model_chat_template with template name (LLM_KV with suffix) (#14050)
This commit is contained in:
@ -200,7 +200,6 @@ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
|
|||||||
{ LLM_KV_TOKENIZER_HF_JSON, "tokenizer.huggingface.json" },
|
{ LLM_KV_TOKENIZER_HF_JSON, "tokenizer.huggingface.json" },
|
||||||
{ LLM_KV_TOKENIZER_RWKV, "tokenizer.rwkv.world" },
|
{ LLM_KV_TOKENIZER_RWKV, "tokenizer.rwkv.world" },
|
||||||
{ LLM_KV_TOKENIZER_CHAT_TEMPLATE, "tokenizer.chat_template" },
|
{ LLM_KV_TOKENIZER_CHAT_TEMPLATE, "tokenizer.chat_template" },
|
||||||
{ LLM_KV_TOKENIZER_CHAT_TEMPLATE_N, "tokenizer.chat_template.%s" },
|
|
||||||
{ LLM_KV_TOKENIZER_FIM_PRE_ID, "tokenizer.ggml.fim_pre_token_id" },
|
{ LLM_KV_TOKENIZER_FIM_PRE_ID, "tokenizer.ggml.fim_pre_token_id" },
|
||||||
{ LLM_KV_TOKENIZER_FIM_SUF_ID, "tokenizer.ggml.fim_suf_token_id" },
|
{ LLM_KV_TOKENIZER_FIM_SUF_ID, "tokenizer.ggml.fim_suf_token_id" },
|
||||||
{ LLM_KV_TOKENIZER_FIM_MID_ID, "tokenizer.ggml.fim_mid_token_id" },
|
{ LLM_KV_TOKENIZER_FIM_MID_ID, "tokenizer.ggml.fim_mid_token_id" },
|
||||||
@ -1707,8 +1706,14 @@ static const std::map<llm_tensor, llm_tensor_info> LLM_TENSOR_INFOS = {
|
|||||||
LLM_KV::LLM_KV(llm_arch arch, const char * suffix) : arch(arch), suffix(suffix) {}
|
LLM_KV::LLM_KV(llm_arch arch, const char * suffix) : arch(arch), suffix(suffix) {}
|
||||||
|
|
||||||
std::string LLM_KV::operator()(llm_kv kv) const {
|
std::string LLM_KV::operator()(llm_kv kv) const {
|
||||||
return suffix ? ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch), suffix)
|
std::string name = ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch));
|
||||||
: ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch));
|
|
||||||
|
if (suffix != nullptr) {
|
||||||
|
name += ".";
|
||||||
|
name += suffix;
|
||||||
|
}
|
||||||
|
|
||||||
|
return name;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string LLM_TN_IMPL::str() const {
|
std::string LLM_TN_IMPL::str() const {
|
||||||
|
@ -196,7 +196,6 @@ enum llm_kv {
|
|||||||
LLM_KV_TOKENIZER_HF_JSON,
|
LLM_KV_TOKENIZER_HF_JSON,
|
||||||
LLM_KV_TOKENIZER_RWKV,
|
LLM_KV_TOKENIZER_RWKV,
|
||||||
LLM_KV_TOKENIZER_CHAT_TEMPLATE,
|
LLM_KV_TOKENIZER_CHAT_TEMPLATE,
|
||||||
LLM_KV_TOKENIZER_CHAT_TEMPLATE_N,
|
|
||||||
LLM_KV_TOKENIZER_FIM_PRE_ID,
|
LLM_KV_TOKENIZER_FIM_PRE_ID,
|
||||||
LLM_KV_TOKENIZER_FIM_SUF_ID,
|
LLM_KV_TOKENIZER_FIM_SUF_ID,
|
||||||
LLM_KV_TOKENIZER_FIM_MID_ID,
|
LLM_KV_TOKENIZER_FIM_MID_ID,
|
||||||
|
@ -13788,7 +13788,7 @@ uint64_t llama_model_size(const llama_model * model) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const char * llama_model_chat_template(const llama_model * model, const char * name) {
|
const char * llama_model_chat_template(const llama_model * model, const char * name) {
|
||||||
const auto key = name ? LLM_KV(model->arch, name)(LLM_KV_TOKENIZER_CHAT_TEMPLATE_N)
|
const auto key = name ? LLM_KV(model->arch, name)(LLM_KV_TOKENIZER_CHAT_TEMPLATE)
|
||||||
: LLM_KV(model->arch)(LLM_KV_TOKENIZER_CHAT_TEMPLATE);
|
: LLM_KV(model->arch)(LLM_KV_TOKENIZER_CHAT_TEMPLATE);
|
||||||
const auto & it = model->gguf_kv.find(key);
|
const auto & it = model->gguf_kv.find(key);
|
||||||
if (it == model->gguf_kv.end()) {
|
if (it == model->gguf_kv.end()) {
|
||||||
|
Reference in New Issue
Block a user