diff --git a/src/llama-memory-hybrid.cpp b/src/llama-memory-hybrid.cpp index d8e2086c8..e98b4e354 100644 --- a/src/llama-memory-hybrid.cpp +++ b/src/llama-memory-hybrid.cpp @@ -25,6 +25,7 @@ llama_memory_hybrid::llama_memory_hybrid( /* common */ uint32_t n_seq_max, bool offload, + bool unified, /* layer filters */ layer_filter_cb && filter_attn, layer_filter_cb && filter_recr) : @@ -38,7 +39,7 @@ llama_memory_hybrid::llama_memory_hybrid( type_v, v_trans, offload, - 1, + unified, kv_size, n_seq_max, n_pad, diff --git a/src/llama-memory-hybrid.h b/src/llama-memory-hybrid.h index 4ac318175..c2d56cd54 100644 --- a/src/llama-memory-hybrid.h +++ b/src/llama-memory-hybrid.h @@ -39,6 +39,7 @@ public: /* common */ uint32_t n_seq_max, bool offload, + bool unified, /* layer filters */ layer_filter_cb && filter_attn = nullptr, layer_filter_cb && filter_recr = nullptr); diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 6b58fb8a0..60a615c15 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -17598,6 +17598,7 @@ llama_memory_i * llama_model::create_memory(const llama_memory_params & params, /* recurrent_kv_size */ std::max((uint32_t) 1, cparams.n_seq_max), /* n_seq_max */ cparams.n_seq_max, /* offload */ cparams.offload_kqv, + /* unified */ cparams.kv_unified, /* filter_attn */ (arch == LLM_ARCH_FALCON_H1) ? [&](int32_t) { return true; } : (llama_memory_hybrid::layer_filter_cb)nullptr, /* filter_recr */ (arch == LLM_ARCH_FALCON_H1) ? [&](int32_t) { return true; } : (llama_memory_hybrid::layer_filter_cb)nullptr); } else {