mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-29 20:45:04 +00:00
quantize : fail fast on write errors (#3521)
This commit is contained in:
@ -7194,6 +7194,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::ofstream fout(fname_out, std::ios::binary);
|
std::ofstream fout(fname_out, std::ios::binary);
|
||||||
|
fout.exceptions(std::ofstream::failbit); // fail fast on write errors
|
||||||
|
|
||||||
const size_t meta_size = gguf_get_meta_size(ctx_out);
|
const size_t meta_size = gguf_get_meta_size(ctx_out);
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user