mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-29 12:35:16 +00:00
Init llama_context_params properly from CLI (#370)
This commit is contained in:
5
main.cpp
5
main.cpp
@ -194,7 +194,10 @@ int main(int argc, char ** argv) {
|
||||
{
|
||||
auto lparams = llama_context_default_params();
|
||||
|
||||
lparams.f16_kv = params.memory_f16;
|
||||
lparams.n_ctx = params.n_ctx;
|
||||
lparams.n_parts = params.n_parts;
|
||||
lparams.seed = params.seed;
|
||||
lparams.f16_kv = params.memory_f16;
|
||||
lparams.logits_all = params.perplexity;
|
||||
|
||||
ctx = llama_init_from_file(params.model.c_str(), lparams);
|
||||
|
Reference in New Issue
Block a user