mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-08-28 02:58:27 -04:00
memory : migrate from llama_kv_cache to more generic llama_memory (#14006)
* memory : merge llama_kv_cache into llama_memory + new `llama_memory` API ggml-ci * context : fix casts ggml-ci
This commit is contained in:
@@ -17,7 +17,7 @@ struct ggml_tensor;
|
||||
struct llama_ubatch;
|
||||
struct llama_cparams;
|
||||
|
||||
class llama_memory_state_i;
|
||||
struct llama_memory_state_i;
|
||||
|
||||
class llama_kv_cache_unified_state;
|
||||
class llama_kv_cache_unified_iswa_state;
|
||||
|
Reference in New Issue
Block a user