export separate lora ggufs instead

This commit is contained in:
Sigbjørn Skjæret
2025-07-06 22:24:46 +02:00
committed by GitHub
parent 9a39ccb7d9
commit 966d0e0e0b
13 changed files with 85 additions and 82 deletions

View File

@@ -2460,7 +2460,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
{"--lora"}, "FNAME",
"path to LoRA adapter (can be repeated to use multiple adapters)",
[](common_params & params, const std::string & value) {
params.lora_adapters.push_back({ std::string(value), 1.0, nullptr });
params.lora_adapters.push_back({ std::string(value), 1.0, "", "", nullptr });
}
// we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg
).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}));
@@ -2468,7 +2468,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
{"--lora-scaled"}, "FNAME", "SCALE",
"path to LoRA adapter with user defined scaling (can be repeated to use multiple adapters)",
[](common_params & params, const std::string & fname, const std::string & scale) {
params.lora_adapters.push_back({ fname, std::stof(scale), nullptr });
params.lora_adapters.push_back({ fname, std::stof(scale), "", "", nullptr });
}
// we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg
).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}));

View File

@@ -993,6 +993,8 @@ struct common_init_result common_init_from_params(common_params & params) {
}
la.ptr = lora.get();
la.task_name = llama_adapter_lora_task_name(la.ptr);
la.prompt_prefix = llama_adapter_lora_prompt_prefix(la.ptr);
iparams.lora.emplace_back(std::move(lora)); // copy to list of loaded adapters
}

View File

@@ -31,6 +31,9 @@ struct common_adapter_lora_info {
std::string path;
float scale;
std::string task_name;
std::string prompt_prefix;
struct llama_adapter_lora * ptr;
};