mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-09-01 12:52:17 -04:00
gguf : fix resource leaks (#6061)
There several places where a gguf context is allocated. A call to gguf_free is missing in some error paths. Also on linux, llama-bench was missing a fclose.
This commit is contained in:
@@ -211,6 +211,7 @@ static bool gguf_ex_read_1(const std::string & fname) {
|
||||
for (int j = 0; j < ggml_nelements(cur); ++j) {
|
||||
if (data[j] != 100 + i) {
|
||||
fprintf(stderr, "%s: tensor[%d]: data[%d] = %f\n", __func__, i, j, data[j]);
|
||||
gguf_free(ctx);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user