mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-26 19:55:04 +00:00
common : add load_progress_callback (#13617)
This commit is contained in:
@ -1102,6 +1102,9 @@ struct llama_model_params common_model_params_to_llama(common_params & params) {
|
|||||||
mparams.tensor_buft_overrides = params.tensor_buft_overrides.data();
|
mparams.tensor_buft_overrides = params.tensor_buft_overrides.data();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mparams.progress_callback = params.load_progress_callback;
|
||||||
|
mparams.progress_callback_user_data = params.load_progress_callback_user_data;
|
||||||
|
|
||||||
return mparams;
|
return mparams;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -428,6 +428,11 @@ struct common_params {
|
|||||||
|
|
||||||
// common params
|
// common params
|
||||||
std::string out_file; // output filename for all example programs
|
std::string out_file; // output filename for all example programs
|
||||||
|
// optional callback for model loading progress and cancellation:
|
||||||
|
// called with a progress value between 0.0 and 1.0.
|
||||||
|
// return false from callback to abort model loading or true to continue
|
||||||
|
llama_progress_callback load_progress_callback = NULL;
|
||||||
|
void * load_progress_callback_user_data = NULL;
|
||||||
};
|
};
|
||||||
|
|
||||||
// call once at the start of a program if it uses libcommon
|
// call once at the start of a program if it uses libcommon
|
||||||
|
Reference in New Issue
Block a user