mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-29 12:35:16 +00:00
llama-graph : fix text position for mrope (#13159)
* llama-graph : fix text position for mrope * fix typo * explicitly set 4th dim in the loop
This commit is contained in:
@ -55,13 +55,16 @@ void llm_graph_input_pos::set_input(const llama_ubatch * ubatch) {
|
|||||||
if (ubatch->pos && pos) {
|
if (ubatch->pos && pos) {
|
||||||
const int64_t n_tokens = ubatch->n_tokens;
|
const int64_t n_tokens = ubatch->n_tokens;
|
||||||
|
|
||||||
if (ubatch->token && n_pos_per_embd > 1) {
|
if (ubatch->token && n_pos_per_embd == 4) {
|
||||||
// in case we're using M-RoPE with text tokens, convert the 1D positions to 4D
|
// in case we're using M-RoPE with text tokens, convert the 1D positions to 4D
|
||||||
// the other dimensions are all 0, they are unused for text tokens
|
// the 3 first dims are the same, and 4th dim is all 0
|
||||||
std::vector<llama_pos> pos_data(n_tokens*n_pos_per_embd, 0);
|
std::vector<llama_pos> pos_data(n_tokens*n_pos_per_embd);
|
||||||
// copy the first dimension
|
// copy the first dimension
|
||||||
for (int i = 0; i < n_tokens; ++i) {
|
for (int i = 0; i < n_tokens; ++i) {
|
||||||
pos_data[i] = ubatch->pos[i];
|
pos_data[ i] = ubatch->pos[i];
|
||||||
|
pos_data[ n_tokens + i] = ubatch->pos[i];
|
||||||
|
pos_data[2 * n_tokens + i] = ubatch->pos[i];
|
||||||
|
pos_data[3 * n_tokens + i] = 0; // 4th dim is 0
|
||||||
}
|
}
|
||||||
ggml_backend_tensor_set(pos, pos_data.data(), 0, pos_data.size()*ggml_element_size(pos));
|
ggml_backend_tensor_set(pos, pos_data.data(), 0, pos_data.size()*ggml_element_size(pos));
|
||||||
} else {
|
} else {
|
||||||
|
Reference in New Issue
Block a user