mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-27 12:05:03 +00:00
kv-cache : fix shift
ggml-ci
This commit is contained in:
@ -944,11 +944,9 @@ llm_graph_result_ptr llama_kv_cache_unified::build_graph_shift(
|
|||||||
const auto & n_embd_head_k = hparams.n_embd_head_k;
|
const auto & n_embd_head_k = hparams.n_embd_head_k;
|
||||||
//const auto & n_embd_head_v = hparams.n_embd_head_v;
|
//const auto & n_embd_head_v = hparams.n_embd_head_v;
|
||||||
|
|
||||||
//GGML_ASSERT(kv_self->size == n_ctx);
|
|
||||||
|
|
||||||
auto inp = std::make_unique<llm_graph_input_k_shift>(this);
|
auto inp = std::make_unique<llm_graph_input_k_shift>(this);
|
||||||
|
|
||||||
inp->k_shift = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, cparams.n_ctx);
|
inp->k_shift = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, cells.size());
|
||||||
ggml_set_input(inp->k_shift);
|
ggml_set_input(inp->k_shift);
|
||||||
|
|
||||||
for (const auto & layer : layers) {
|
for (const auto & layer : layers) {
|
||||||
|
@ -317,8 +317,6 @@ public:
|
|||||||
pos[i] += d;
|
pos[i] += d;
|
||||||
shift[i] += d;
|
shift[i] += d;
|
||||||
|
|
||||||
seq_pos_add(i);
|
|
||||||
|
|
||||||
has_shift = true;
|
has_shift = true;
|
||||||
|
|
||||||
if (pos[i] < 0) {
|
if (pos[i] < 0) {
|
||||||
@ -326,12 +324,15 @@ public:
|
|||||||
|
|
||||||
seq[i].reset();
|
seq[i].reset();
|
||||||
pos[i] = -1;
|
pos[i] = -1;
|
||||||
|
shift[i] = 0;
|
||||||
|
|
||||||
used.erase(i);
|
used.erase(i);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
seq_pos_add(i);
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user