diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index bd9cd8144..dd80a4a05 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -4408,9 +4408,6 @@ class Gemma3NModel(Gemma3Model): ] def set_vocab(self): - with open(self.dir_model / "chat_template.jinja") as f: - # quick hack to make sure chat template is added - self.gguf_writer.add_chat_template(f.read()) super().set_vocab() def set_gguf_parameters(self): diff --git a/gguf-py/gguf/vocab.py b/gguf-py/gguf/vocab.py index 3f541b0c0..635fcef35 100644 --- a/gguf-py/gguf/vocab.py +++ b/gguf-py/gguf/vocab.py @@ -245,9 +245,18 @@ class SpecialVocab: if not tokenizer_config: return True chat_template_alt = None - chat_template_file = path / 'chat_template.json' - if chat_template_file.is_file(): - with open(chat_template_file, encoding = 'utf-8') as f: + chat_template_json = path / 'chat_template.json' + chat_template_jinja = path / 'chat_template.jinja' + if chat_template_jinja.is_file(): + with open(chat_template_jinja, encoding = 'utf-8') as f: + chat_template_alt = f.read() + if additional_templates := list((path / 'additional_chat_templates').glob('*.jinja')): + chat_template_alt = [{'name': 'default', 'template': chat_template_alt}] + for template_path in additional_templates: + with open(template_path, encoding = 'utf-8') as fp: + chat_template_alt.append({'name': template_path.stem, 'template': fp.read()}) + elif chat_template_json.is_file(): + with open(chat_template_json, encoding = 'utf-8') as f: chat_template_alt = json.load(f).get('chat_template') chat_template = tokenizer_config.get('chat_template', chat_template_alt) if chat_template is None or isinstance(chat_template, (str, list)):