mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-08-02 07:26:38 -04:00
gguf-py : export chat templates (#4125)
* gguf-py : export chat templates * llama.cpp : escape new lines in gguf kv info prints * gguf-py : bump version * gguf-py : check chat_template type * gguf-py : initialize chat_template
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "gguf"
|
||||
version = "0.5.3"
|
||||
version = "0.6.0"
|
||||
description = "Read and write ML models in GGUF for GGML"
|
||||
authors = ["GGML <ggml@ggml.ai>"]
|
||||
packages = [
|
||||
|
Reference in New Issue
Block a user