From 017f10b5fa630a013ec4f9936e410a60d4f460d5 Mon Sep 17 00:00:00 2001 From: "Gilad S." <7817232+giladgd@users.noreply.github.com> Date: Wed, 14 May 2025 19:18:18 +0300 Subject: [PATCH] fix: crash when calling `llama_state_get_size` on a context without a KV cache (#13542) --- src/llama-context.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 62246c10d..1b76317da 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -1704,10 +1704,12 @@ size_t llama_context::state_write_data(llama_io_write_i & io) { } } - LLAMA_LOG_DEBUG("%s: - writing KV self\n", __func__); llama_kv_cache * kv_self = static_cast(memory.get()); - - kv_self->state_write(io); + + if (kv_self != nullptr) { + LLAMA_LOG_DEBUG("%s: - writing KV self\n", __func__); + kv_self->state_write(io); + } return io.n_bytes(); }