fix: crash when calling llama_state_get_size on a context without a KV cache (#13542)

This commit is contained in:
Gilad S.
2025-05-14 19:18:18 +03:00
committed by GitHub
parent 4696d56749
commit 017f10b5fa

View File

@ -1704,10 +1704,12 @@ size_t llama_context::state_write_data(llama_io_write_i & io) {
} }
} }
LLAMA_LOG_DEBUG("%s: - writing KV self\n", __func__);
llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get()); llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get());
kv_self->state_write(io); if (kv_self != nullptr) {
LLAMA_LOG_DEBUG("%s: - writing KV self\n", __func__);
kv_self->state_write(io);
}
return io.n_bytes(); return io.n_bytes();
} }