mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-28 20:25:20 +00:00
sampling : fix off-by-one in tail-free sampling
ggml-ci
This commit is contained in:
@ -963,7 +963,7 @@ gpt_params_context gpt_params_parser_init(gpt_params & params, llama_example ex,
|
||||
}
|
||||
).set_sparam());
|
||||
add_opt(llama_arg(
|
||||
{"--tfs"}, "N",
|
||||
{"--tfs", "--tfs-z"}, "Z",
|
||||
format("tail free sampling, parameter z (default: %.1f, 1.0 = disabled)", (double)params.sparams.tfs_z),
|
||||
[](gpt_params & params, const std::string & value) {
|
||||
params.sparams.tfs_z = std::stof(value);
|
||||
|
Reference in New Issue
Block a user