mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-08-15 12:42:40 -04:00
Fix baichuan convert script not detecing model (#3739)
It seems nobody objects.
This commit is contained in:
@@ -110,7 +110,7 @@ print("gguf: loading model "+dir_model.name)
|
|||||||
with open(dir_model / "config.json", "r", encoding="utf-8") as f:
|
with open(dir_model / "config.json", "r", encoding="utf-8") as f:
|
||||||
hparams = json.load(f)
|
hparams = json.load(f)
|
||||||
print("hello print: ",hparams["architectures"][0])
|
print("hello print: ",hparams["architectures"][0])
|
||||||
if hparams["architectures"][0] != "BaichuanForCausalLM":
|
if hparams["architectures"][0] != "BaichuanForCausalLM" and hparams["architectures"][0] != "BaiChuanForCausalLM":
|
||||||
print("Model architecture not supported: " + hparams["architectures"][0])
|
print("Model architecture not supported: " + hparams["architectures"][0])
|
||||||
|
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
Reference in New Issue
Block a user