mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-26 19:55:04 +00:00
embedding : free the batch after execution (#7297)
This commit is contained in:
@ -211,6 +211,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
// clean up
|
||||
llama_print_timings(ctx);
|
||||
llama_batch_free(batch);
|
||||
llama_free(ctx);
|
||||
llama_free_model(model);
|
||||
llama_backend_free();
|
||||
|
Reference in New Issue
Block a user