JohannesGaessler commited on
Commit
b38d0f9
·
1 Parent(s): 2e26e3a

CUDA: fix FA out-of-bounds reads (llama/7479)

Browse files
ggml-cuda/fattn-tile-f16.cu CHANGED
@@ -83,7 +83,7 @@ static __global__ void flash_attn_tile_ext_f16(
83
  for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) {
84
  const int i = i0 + threadIdx.x;
85
 
86
- const float2 tmp = Q_f2[j*(nb01/sizeof(float2)) + i];
87
  Q_h2[j][i] = make_half2(scale, scale) * make_half2(tmp.x, tmp.y);
88
  }
89
  }
 
83
  for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) {
84
  const int i = i0 + threadIdx.x;
85
 
86
+ const float2 tmp = ic0 + j < ne01 ? Q_f2[j*(nb01/sizeof(float2)) + i] : make_float2(0.0f, 0.0f);
87
  Q_h2[j][i] = make_half2(scale, scale) * make_half2(tmp.x, tmp.y);
88
  }
89
  }
ggml-cuda/fattn-tile-f32.cu CHANGED
@@ -79,7 +79,7 @@ static __global__ void flash_attn_tile_ext_f32(
79
 
80
  #pragma unroll
81
  for (int i0 = 0; i0 < D; i0 += 2*WARP_SIZE) {
82
- float2 tmp = Q_f2[j*(nb01/sizeof(float2)) + i0/2 + threadIdx.x];
83
  Q_f[j][i0 + 0*WARP_SIZE + threadIdx.x] = tmp.x * scale;
84
  Q_f[j][i0 + 1*WARP_SIZE + threadIdx.x] = tmp.y * scale;
85
  }
 
79
 
80
  #pragma unroll
81
  for (int i0 = 0; i0 < D; i0 += 2*WARP_SIZE) {
82
+ float2 tmp = ic0 + j < ne01 ? Q_f2[j*(nb01/sizeof(float2)) + i0/2 + threadIdx.x] : make_float2(0.0f, 0.0f);
83
  Q_f[j][i0 + 0*WARP_SIZE + threadIdx.x] = tmp.x * scale;
84
  Q_f[j][i0 + 1*WARP_SIZE + threadIdx.x] = tmp.y * scale;
85
  }