mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-27 12:05:03 +00:00
fix: crash when calling llama_state_get_size
on a context without a KV cache (#13542)
This commit is contained in:
@ -1704,10 +1704,12 @@ size_t llama_context::state_write_data(llama_io_write_i & io) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LLAMA_LOG_DEBUG("%s: - writing KV self\n", __func__);
|
|
||||||
llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get());
|
llama_kv_cache * kv_self = static_cast<llama_kv_cache *>(memory.get());
|
||||||
|
|
||||||
kv_self->state_write(io);
|
if (kv_self != nullptr) {
|
||||||
|
LLAMA_LOG_DEBUG("%s: - writing KV self\n", __func__);
|
||||||
|
kv_self->state_write(io);
|
||||||
|
}
|
||||||
|
|
||||||
return io.n_bytes();
|
return io.n_bytes();
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user