CUDA: fix typo in FlashAttention code (#13926)

This commit is contained in:
Johannes Gäßler
2025-05-30 21:22:03 +02:00
committed by GitHub
parent b47ab7b8e9
commit e562eece7c

View File

@ -1246,7 +1246,7 @@ static __global__ void flash_attn_ext_f16(
NO_DEVICE_CODE;
return;
}
#endif __CUDA_ARCH__ == GGML_CUDA_CC_TURING
#endif // __CUDA_ARCH__ == GGML_CUDA_CC_TURING
static_assert(!mla || DKQ >= DV, "MLA needs DKQ >= DV");