mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-08-12 03:21:10 -04:00
llama : add 128k yarn context for Qwen (#10698)
* add 128k yarn context for Qwen * added property for model tensors * removing useless line
This commit is contained in:
@@ -761,6 +761,7 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
||||
MODEL_TENSOR.TOKEN_EMBD,
|
||||
MODEL_TENSOR.OUTPUT_NORM,
|
||||
MODEL_TENSOR.OUTPUT,
|
||||
MODEL_TENSOR.ROPE_FREQS,
|
||||
MODEL_TENSOR.ATTN_NORM,
|
||||
MODEL_TENSOR.ATTN_Q,
|
||||
MODEL_TENSOR.ATTN_K,
|
||||
|
Reference in New Issue
Block a user