mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-07-26 03:03:25 -04:00
CUDA: fix FA out-of-bounds writes (#7465)
This commit is contained in:
@ -238,6 +238,10 @@ static __global__ void flash_attn_tile_ext_f16(
|
||||
for (int j_VKQ_0 = 0; j_VKQ_0 < ncols; j_VKQ_0 += nwarps) {
|
||||
const int j_VKQ = j_VKQ_0 + threadIdx.y;
|
||||
|
||||
if (ic0 + j_VKQ >= ne01) {
|
||||
return;
|
||||
}
|
||||
|
||||
half kqsum_j = __low2half(kqsum[j_VKQ_0/nwarps]) + __high2half(kqsum[j_VKQ_0/nwarps]);
|
||||
kqsum_j = warp_reduce_sum(kqsum_j);
|
||||
|
||||
|
Reference in New Issue
Block a user