JohannesGaessler commited on
Commit
4cf786d
·
1 Parent(s): fd25ba6

CUDA: CUDART < 11.7 workaround for __hmax, __hmax2 (llama/7019)

Browse files
Files changed (2) hide show
  1. ggml-cuda/common.cuh +40 -5
  2. ggml-cuda/fattn.cu +3 -3
ggml-cuda/common.cuh CHANGED
@@ -137,7 +137,8 @@
137
  #define STRINGIZE(...) STRINGIZE_IMPL(__VA_ARGS__)
138
 
139
  #define WARP_SIZE 32
140
- #define CUDART_HMAX 11070 // CUDA 11.7, min. ver. for which __hmax and __hmax2 are known to work (may be higher than needed)
 
141
 
142
  #define CC_PASCAL 600
143
  #define MIN_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products
@@ -293,20 +294,54 @@ static __device__ __forceinline__ float warp_reduce_max(float x) {
293
  return x;
294
  }
295
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296
  static __device__ __forceinline__ half2 warp_reduce_max(half2 x) {
297
- #if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL && CUDART_VERSION >= CUDART_HMAX
298
  #pragma unroll
299
  for (int mask = 16; mask > 0; mask >>= 1) {
300
- x = __hmax2(x, __shfl_xor_sync(0xffffffff, x, mask, 32));
301
  }
302
  return x;
303
  #else
304
  GGML_UNUSED(x);
305
  NO_DEVICE_CODE;
306
- #endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL && CUDART_VERSION >= CUDART_HMAX
307
  }
308
 
309
- #if CUDART_VERSION < 12000
310
  static __device__ __forceinline__ uint32_t __hgt2_mask(const half2 a, const half2 b) {
311
  const uint32_t mask_low = 0x0000FFFF * (float( __low2half(a)) > float( __low2half(b)));
312
  const uint32_t mask_high = 0xFFFF0000 * (float(__high2half(a)) > float(__high2half(b)));
 
137
  #define STRINGIZE(...) STRINGIZE_IMPL(__VA_ARGS__)
138
 
139
  #define WARP_SIZE 32
140
+ #define CUDART_HMAX 11070 // CUDA 11.7, min. ver. for which __hmax and __hmax2 are known to work (may be higher than needed)
141
+ #define CUDART_HMASK 12000 // CUDA 12.0, min. ver. for half2 -> uint mask comparisons
142
 
143
  #define CC_PASCAL 600
144
  #define MIN_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products
 
294
  return x;
295
  }
296
 
297
+ static __device__ __forceinline__ half ggml_cuda_hmax(const half a, const half b) {
298
+ #if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
299
+
300
+ #if CUDART_VERSION >= CUDART_HMAX
301
+ return __hmax(a, b);
302
+ #else
303
+ return __half2float(a) > __half2float(b) ? a : b;
304
+ #endif // CUDART_VERSION >= CUDART_HMAX
305
+
306
+ #else
307
+ GGML_UNUSED(a);
308
+ GGML_UNUSED(b);
309
+ NO_DEVICE_CODE;
310
+ #endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && CUDART_VERSION < CUDART_HMAX
311
+ }
312
+ static __device__ __forceinline__ half2 ggml_cuda_hmax2(const half2 a, const half2 b) {
313
+ #if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
314
+
315
+ #if CUDART_VERSION >= CUDART_HMAX
316
+ return __hmax2(a, b);
317
+ #else
318
+ half2 ret;
319
+ reinterpret_cast<half&>(ret.x) = __low2float(a) > __low2float(b) ? __low2half(a) : __low2half(b);
320
+ reinterpret_cast<half&>(ret.y) = __high2float(a) > __high2float(b) ? __high2half(a) : __high2half(b);
321
+ return ret;
322
+ #endif // CUDART_VERSION >= CUDART_HMAX
323
+
324
+ #else
325
+ GGML_UNUSED(a);
326
+ GGML_UNUSED(b);
327
+ NO_DEVICE_CODE;
328
+ #endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && CUDART_VERSION < CUDART_HMAX
329
+ }
330
+
331
  static __device__ __forceinline__ half2 warp_reduce_max(half2 x) {
332
+ #if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL
333
  #pragma unroll
334
  for (int mask = 16; mask > 0; mask >>= 1) {
335
+ x = ggml_cuda_hmax2(x, __shfl_xor_sync(0xffffffff, x, mask, 32));
336
  }
337
  return x;
338
  #else
339
  GGML_UNUSED(x);
340
  NO_DEVICE_CODE;
341
+ #endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL
342
  }
343
 
344
+ #if CUDART_VERSION < CUDART_HMASK
345
  static __device__ __forceinline__ uint32_t __hgt2_mask(const half2 a, const half2 b) {
346
  const uint32_t mask_low = 0x0000FFFF * (float( __low2half(a)) > float( __low2half(b)));
347
  const uint32_t mask_high = 0xFFFF0000 * (float(__high2half(a)) > float(__high2half(b)));
ggml-cuda/fattn.cu CHANGED
@@ -116,7 +116,7 @@ static __global__ void flash_attn_vec_ext_f16(
116
  sum2 = warp_reduce_sum(sum2);
117
  half sum = __low2half(sum2) + __high2half(sum2);
118
  sum += mask ? maskh[k_VKQ_0 + i_KQ] : __float2half(0.0f);
119
- kqmax_new = __hmax(kqmax_new, sum);
120
  if (threadIdx.x == 0) {
121
  KQ[i_KQ] = sum;
122
  }
@@ -416,9 +416,9 @@ static __global__ void flash_attn_ext_f16(
416
  const int k = k0 + threadIdx.x;
417
 
418
  KQ2_tmp[k0/WARP_SIZE] += mask ? mask2[(j*ne11 + k_VKQ_0)/2 + k] : make_half2(0.0f, 0.0f);
419
- KQ_max_new = __hmax2(KQ_max_new, KQ2_tmp[k0/WARP_SIZE]);
420
  }
421
- KQ_max_new = __half2half2(warp_reduce_max(__hmax(__low2half(KQ_max_new), __high2half(KQ_max_new))));
422
  const half2 diff = KQ_max_h2[j0/nwarps] - KQ_max_new;
423
  KQ_max_scale_h2[j0/nwarps] = h2exp(diff);
424
  const uint32_t ftz_mask = __hgt2_mask(diff, make_half2(SOFTMAX_FTZ_THRESHOLD, SOFTMAX_FTZ_THRESHOLD));
 
116
  sum2 = warp_reduce_sum(sum2);
117
  half sum = __low2half(sum2) + __high2half(sum2);
118
  sum += mask ? maskh[k_VKQ_0 + i_KQ] : __float2half(0.0f);
119
+ kqmax_new = ggml_cuda_hmax(kqmax_new, sum);
120
  if (threadIdx.x == 0) {
121
  KQ[i_KQ] = sum;
122
  }
 
416
  const int k = k0 + threadIdx.x;
417
 
418
  KQ2_tmp[k0/WARP_SIZE] += mask ? mask2[(j*ne11 + k_VKQ_0)/2 + k] : make_half2(0.0f, 0.0f);
419
+ KQ_max_new = ggml_cuda_hmax2(KQ_max_new, KQ2_tmp[k0/WARP_SIZE]);
420
  }
421
+ KQ_max_new = __half2half2(warp_reduce_max(ggml_cuda_hmax(__low2half(KQ_max_new), __high2half(KQ_max_new))));
422
  const half2 diff = KQ_max_h2[j0/nwarps] - KQ_max_new;
423
  KQ_max_scale_h2[j0/nwarps] = h2exp(diff);
424
  const uint32_t ftz_mask = __hgt2_mask(diff, make_half2(SOFTMAX_FTZ_THRESHOLD, SOFTMAX_FTZ_THRESHOLD));