mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-27 12:05:03 +00:00
CUDA: fix typo in FlashAttention code (#13926)
This commit is contained in:
@ -1246,7 +1246,7 @@ static __global__ void flash_attn_ext_f16(
|
|||||||
NO_DEVICE_CODE;
|
NO_DEVICE_CODE;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#endif __CUDA_ARCH__ == GGML_CUDA_CC_TURING
|
#endif // __CUDA_ARCH__ == GGML_CUDA_CC_TURING
|
||||||
|
|
||||||
static_assert(!mla || DKQ >= DV, "MLA needs DKQ >= DV");
|
static_assert(!mla || DKQ >= DV, "MLA needs DKQ >= DV");
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user