jeffbolznv commited on
Commit
3331abd
·
1 Parent(s): 4576ce0

vulkan: scalar flash attention implementation (llama/13324)

Browse files

* vulkan: scalar flash attention implementation

* vulkan: always use fp32 for scalar flash attention

* vulkan: use vector loads in scalar flash attention shader

* vulkan: remove PV matrix, helps with register usage

* vulkan: reduce register usage in scalar FA, but perf may be slightly worse

* vulkan: load each Q value once. optimize O reduction. more tuning

* vulkan: support q4_0/q8_0 KV in scalar FA

* CI: increase timeout to accommodate newly-supported tests

* vulkan: for scalar FA, select between 1 and 8 rows

* vulkan: avoid using Float16 capability in scalar FA

ggml/src/ggml-vulkan/ggml-vulkan.cpp CHANGED
@@ -275,6 +275,7 @@ struct vk_device_struct {
275
  bool prefer_host_memory;
276
  bool float_controls_rte_fp16;
277
  bool subgroup_add;
 
278
 
279
  bool integer_dot_product;
280
 
@@ -402,12 +403,20 @@ struct vk_device_struct {
402
  vk_pipeline pipeline_conv2d_dw_cwhn_f32;
403
 
404
  // [2][2][2] is for {f16acc,f32acc}x{large,small_rows}x{unaligned, aligned}
 
 
 
 
 
 
 
405
  vk_pipeline pipeline_flash_attn_f32_f16_D64[GGML_TYPE_COUNT][2][2][2];
406
  vk_pipeline pipeline_flash_attn_f32_f16_D80[GGML_TYPE_COUNT][2][2][2];
407
  vk_pipeline pipeline_flash_attn_f32_f16_D96[GGML_TYPE_COUNT][2][2][2];
408
  vk_pipeline pipeline_flash_attn_f32_f16_D112[GGML_TYPE_COUNT][2][2][2];
409
  vk_pipeline pipeline_flash_attn_f32_f16_D128[GGML_TYPE_COUNT][2][2][2];
410
  vk_pipeline pipeline_flash_attn_f32_f16_D256[GGML_TYPE_COUNT][2][2][2];
 
411
  vk_pipeline pipeline_flash_attn_split_k_reduce;
412
 
413
  std::unordered_map<std::string, vk_pipeline_ref> pipelines;
@@ -1581,13 +1590,29 @@ static void ggml_vk_wait_events(vk_context& ctx, std::vector<vk::Event>&& events
1581
 
1582
  // number of rows/cols for flash attention shader
1583
  static constexpr uint32_t flash_attention_num_small_rows = 32;
1584
- static std::array<uint32_t, 2> fa_rows_cols(uint32_t D, uint32_t clamp, ggml_type type, bool small_rows) {
 
 
 
 
 
 
 
1585
  GGML_UNUSED(clamp);
1586
 
 
 
 
 
 
 
 
 
1587
  // small rows, large cols
1588
  if (small_rows) {
1589
- return {flash_attention_num_small_rows, 64};
1590
  }
 
1591
  // small cols to reduce register count
1592
  if (ggml_is_quantized(type) || D == 256) {
1593
  return {64, 32};
@@ -1882,65 +1907,66 @@ static void ggml_vk_load_shaders(vk_device& device) {
1882
  parameter_count, wg_denoms, specialization_constants, disable_robustness, require_full_subgroups, required_subgroup_size));
1883
  };
1884
 
1885
- #if defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
1886
- if (device->coopmat2) {
1887
-
1888
- auto const &fa_wg_denoms = [&](uint32_t D, uint32_t clamp, ggml_type type, bool small_rows) -> std::array<uint32_t, 3> {
1889
- return {fa_rows_cols(D, clamp, type, small_rows)[0], 1, 1};
1890
- };
1891
 
1892
- auto const &fa_spec_constants = [&](uint32_t D, uint32_t clamp, ggml_type type, bool small_rows) -> std::vector<uint32_t> {
1893
- // For large number of rows, 128 invocations seems to work best.
1894
- // For small number of rows (e.g. N==1), 256 works better. But matrix granularity for 256 is 32, so we
1895
- // can't use 256 for D==80.
1896
- uint32_t wg_size = (small_rows && (D % 32) == 0) ? 256 : 128;
1897
- auto rows_cols = fa_rows_cols(D, clamp, type, small_rows);
1898
- // mask dim1 is padded to 64, we rely on this to avoid clamping mask loads
1899
- GGML_ASSERT((GGML_KQ_MASK_PAD % rows_cols[0]) == 0);
1900
- return {wg_size, rows_cols[0], rows_cols[1], (D), clamp};
1901
- };
 
 
 
 
 
 
 
1902
 
1903
- #define CREATE_FA2(TYPE, NAMELC, D) \
1904
- ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][0][0][0], "flash_attn_f32_f16_D" #D "_f16acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_len, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,1,TYPE,false), fa_spec_constants(D,1,TYPE,false), 1); \
1905
- ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][0][0][1], "flash_attn_f32_f16_D" #D "_aligned_f16acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_len, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,0,TYPE,false), fa_spec_constants(D,0,TYPE,false), fa_rows_cols(D,0,TYPE,false)[1]); \
1906
- ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][1][0][0], "flash_attn_f32_f16_D" #D "_f32acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _cm2_len, flash_attn_f32_f16_ ## NAMELC ## _cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,1,TYPE,false), fa_spec_constants(D,1,TYPE,false), 1); \
1907
- ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][1][0][1], "flash_attn_f32_f16_D" #D "_aligned_f32acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _cm2_len, flash_attn_f32_f16_ ## NAMELC ## _cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,0,TYPE,false), fa_spec_constants(D,0,TYPE,false), fa_rows_cols(D,0,TYPE,false)[1]); \
1908
- ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][0][1][0], "flash_attn_f32_f16_D" #D "_f16acc_smallrows" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_len, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,1,TYPE,true), fa_spec_constants(D,1,TYPE,true), 1); \
1909
- ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][0][1][1], "flash_attn_f32_f16_D" #D "_aligned_f16acc_smallrows" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_len, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,0,TYPE,true), fa_spec_constants(D,0,TYPE,true), fa_rows_cols(D,0,TYPE,true)[1]); \
1910
- ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][1][1][0], "flash_attn_f32_f16_D" #D "_f32acc_smallrows" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _cm2_len, flash_attn_f32_f16_ ## NAMELC ## _cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,1,TYPE,true), fa_spec_constants(D,1,TYPE,true), 1); \
1911
- ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][1][1][1], "flash_attn_f32_f16_D" #D "_aligned_f32acc_smallrows" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _cm2_len, flash_attn_f32_f16_ ## NAMELC ## _cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,0,TYPE,true), fa_spec_constants(D,0,TYPE,true), fa_rows_cols(D,0,TYPE,true)[1]); \
1912
-
1913
- #define CREATE_FA(TYPE, NAMELC) \
1914
- CREATE_FA2(TYPE, NAMELC, 64) \
1915
- CREATE_FA2(TYPE, NAMELC, 80) \
1916
- CREATE_FA2(TYPE, NAMELC, 96) \
1917
- CREATE_FA2(TYPE, NAMELC, 112) \
1918
- CREATE_FA2(TYPE, NAMELC, 128) \
1919
- CREATE_FA2(TYPE, NAMELC, 256)
1920
-
1921
- CREATE_FA(GGML_TYPE_F16, f16)
1922
- CREATE_FA(GGML_TYPE_Q4_0, q4_0)
1923
- CREATE_FA(GGML_TYPE_Q4_1, q4_1)
1924
- CREATE_FA(GGML_TYPE_Q5_0, q5_0)
1925
- CREATE_FA(GGML_TYPE_Q5_1, q5_1)
1926
- CREATE_FA(GGML_TYPE_Q8_0, q8_0)
1927
- // K dequants currently disabled because D dimension is rounded up to 256 and runs inefficiently
1928
- //CREATE_FA(GGML_TYPE_Q2_K, q2_k)
1929
- //CREATE_FA(GGML_TYPE_Q3_K, q3_k)
1930
- //CREATE_FA(GGML_TYPE_Q4_K, q4_k)
1931
- //CREATE_FA(GGML_TYPE_Q5_K, q5_k)
1932
- //CREATE_FA(GGML_TYPE_Q6_K, q6_k)
1933
- //CREATE_FA(GGML_TYPE_IQ1_S, iq1_s)
1934
- //CREATE_FA(GGML_TYPE_IQ1_M, iq1_m)
1935
- //CREATE_FA(GGML_TYPE_IQ2_XXS, iq2_xxs)
1936
- //CREATE_FA(GGML_TYPE_IQ2_XS, iq2_xs)
1937
- //CREATE_FA(GGML_TYPE_IQ2_S, iq2_s)
1938
- //CREATE_FA(GGML_TYPE_IQ3_XXS, iq3_xxs)
1939
- //CREATE_FA(GGML_TYPE_IQ3_S, iq3_s)
1940
- //CREATE_FA(GGML_TYPE_IQ4_XS, iq4_xs)
1941
- CREATE_FA(GGML_TYPE_IQ4_NL, iq4_nl)
1942
  #undef CREATE_FA
1943
 
 
 
 
1944
  // Create 6 variants, {s,m,l}x{unaligned,aligned}
1945
  #define CREATE_MM(PIPELINE_NAME, NAMELC, F16ACC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT) \
1946
  ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->l, #NAMELC #F16ACC "_l", NAMELC ## F16ACC ## _cm2_len, NAMELC ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1); \
@@ -2837,6 +2863,9 @@ static vk_device ggml_vk_get_device(size_t idx) {
2837
  device->subgroup_add = (vk11_props.subgroupSupportedStages & vk::ShaderStageFlagBits::eCompute) &&
2838
  (vk11_props.subgroupSupportedOperations & vk::SubgroupFeatureFlagBits::eArithmetic);
2839
 
 
 
 
2840
  const bool force_disable_f16 = getenv("GGML_VK_DISABLE_F16") != nullptr;
2841
 
2842
  device->fp16 = !force_disable_f16 && fp16_storage && fp16_compute;
@@ -5709,20 +5738,57 @@ static void ggml_vk_flash_attn(ggml_backend_vk_context * ctx, vk_context& subctx
5709
  assert(q->type == GGML_TYPE_F32);
5710
  assert(k->type == v->type);
5711
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5712
  vk_pipeline *pipelines;
5713
  // XXX TODO other backends may be changing accumulator precision to default to f32 soon
5714
- bool f32acc = dst->op_params[3] == GGML_PREC_F32;
5715
- bool small_rows = N <= flash_attention_num_small_rows;
5716
- switch (D) {
5717
- case 64: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D64[k->type][f32acc][small_rows][0]; break;
5718
- case 80: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D80[k->type][f32acc][small_rows][0]; break;
5719
- case 96: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D96[k->type][f32acc][small_rows][0]; break;
5720
- case 112: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D112[k->type][f32acc][small_rows][0]; break;
5721
- case 128: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D128[k->type][f32acc][small_rows][0]; break;
5722
- case 256: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D256[k->type][f32acc][small_rows][0]; break;
5723
- default:
5724
- assert(!"unsupported D value");
5725
- return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5726
  }
5727
  assert(pipelines);
5728
 
@@ -5740,27 +5806,14 @@ static void ggml_vk_flash_attn(ggml_backend_vk_context * ctx, vk_context& subctx
5740
  vk_pipeline pipeline = pipelines[aligned];
5741
  assert(pipeline);
5742
 
5743
- uint32_t gqa_ratio = 1;
5744
- uint32_t qk_ratio = neq2 / nek2;
5745
- uint32_t workgroups_x = (uint32_t)neq1;
5746
- uint32_t workgroups_y = (uint32_t)neq2;
5747
- uint32_t workgroups_z = (uint32_t)neq3;
5748
-
5749
- if (N == 1 && qk_ratio > 1 && gqa_ratio <= flash_attention_num_small_rows &&
5750
- qk_ratio * nek2 == neq2 && nek2 == nev2 && neq3 == 1 && nek3 == 1 && nev3 == 1) {
5751
- // grouped query attention - make the N dimension equal to gqa_ratio, reduce
5752
- // workgroups proportionally in y dimension. The shader will detect gqa_ratio > 1
5753
- // and change addressing calculations to index Q's dimension 2.
5754
- gqa_ratio = qk_ratio;
5755
- N = gqa_ratio;
5756
- workgroups_y /= N;
5757
- }
5758
-
5759
  uint32_t split_kv = KV;
5760
  uint32_t split_k = 1;
5761
 
 
 
 
5762
  // Try to use split_k when KV is large enough to be worth the overhead
5763
- if (workgroups_x == 1 && ctx->device->shader_core_count > 0 && KV >= 512) {
5764
  // Try to run two workgroups per SM.
5765
  split_k = ctx->device->shader_core_count * 2 / workgroups_y;
5766
  if (split_k > 1) {
@@ -9530,9 +9583,8 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm
9530
  case GGML_OP_FLASH_ATTN_EXT:
9531
  {
9532
  ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
9533
- if (!ggml_vk_get_device(ctx->device)->coopmat2) {
9534
- return false;
9535
- }
9536
  switch (op->src[0]->ne[0]) {
9537
  case 64:
9538
  case 80:
@@ -9540,7 +9592,6 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm
9540
  case 112:
9541
  case 128:
9542
  case 256:
9543
- case 575: // DeepSeek MLA
9544
  break;
9545
  default:
9546
  return false;
@@ -9566,10 +9617,12 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm
9566
  switch (op->src[1]->type) {
9567
  case GGML_TYPE_F16:
9568
  case GGML_TYPE_Q4_0:
 
 
 
9569
  case GGML_TYPE_Q4_1:
9570
  case GGML_TYPE_Q5_0:
9571
  case GGML_TYPE_Q5_1:
9572
- case GGML_TYPE_Q8_0:
9573
  // K dequants currently disabled because D dimension is rounded up to 256 and runs inefficiently
9574
  //case GGML_TYPE_Q2_K:
9575
  //case GGML_TYPE_Q3_K:
@@ -9585,10 +9638,18 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm
9585
  //case GGML_TYPE_IQ3_S:
9586
  //case GGML_TYPE_IQ4_XS:
9587
  case GGML_TYPE_IQ4_NL:
 
 
 
 
9588
  break;
9589
  default:
9590
  return false;
9591
  }
 
 
 
 
9592
  return true;
9593
  }
9594
  case GGML_OP_GET_ROWS:
 
275
  bool prefer_host_memory;
276
  bool float_controls_rte_fp16;
277
  bool subgroup_add;
278
+ bool subgroup_shuffle;
279
 
280
  bool integer_dot_product;
281
 
 
403
  vk_pipeline pipeline_conv2d_dw_cwhn_f32;
404
 
405
  // [2][2][2] is for {f16acc,f32acc}x{large,small_rows}x{unaligned, aligned}
406
+ vk_pipeline pipeline_flash_attn_f32_f16_D64_cm2[GGML_TYPE_COUNT][2][2][2];
407
+ vk_pipeline pipeline_flash_attn_f32_f16_D80_cm2[GGML_TYPE_COUNT][2][2][2];
408
+ vk_pipeline pipeline_flash_attn_f32_f16_D96_cm2[GGML_TYPE_COUNT][2][2][2];
409
+ vk_pipeline pipeline_flash_attn_f32_f16_D112_cm2[GGML_TYPE_COUNT][2][2][2];
410
+ vk_pipeline pipeline_flash_attn_f32_f16_D128_cm2[GGML_TYPE_COUNT][2][2][2];
411
+ vk_pipeline pipeline_flash_attn_f32_f16_D256_cm2[GGML_TYPE_COUNT][2][2][2];
412
+
413
  vk_pipeline pipeline_flash_attn_f32_f16_D64[GGML_TYPE_COUNT][2][2][2];
414
  vk_pipeline pipeline_flash_attn_f32_f16_D80[GGML_TYPE_COUNT][2][2][2];
415
  vk_pipeline pipeline_flash_attn_f32_f16_D96[GGML_TYPE_COUNT][2][2][2];
416
  vk_pipeline pipeline_flash_attn_f32_f16_D112[GGML_TYPE_COUNT][2][2][2];
417
  vk_pipeline pipeline_flash_attn_f32_f16_D128[GGML_TYPE_COUNT][2][2][2];
418
  vk_pipeline pipeline_flash_attn_f32_f16_D256[GGML_TYPE_COUNT][2][2][2];
419
+
420
  vk_pipeline pipeline_flash_attn_split_k_reduce;
421
 
422
  std::unordered_map<std::string, vk_pipeline_ref> pipelines;
 
1590
 
1591
  // number of rows/cols for flash attention shader
1592
  static constexpr uint32_t flash_attention_num_small_rows = 32;
1593
+ static constexpr uint32_t scalar_flash_attention_num_small_rows = 1;
1594
+ static constexpr uint32_t scalar_flash_attention_num_large_rows = 8;
1595
+
1596
+ static uint32_t get_fa_num_small_rows(bool scalar) {
1597
+ return scalar ? scalar_flash_attention_num_small_rows : flash_attention_num_small_rows;
1598
+ }
1599
+
1600
+ static std::array<uint32_t, 2> fa_rows_cols(bool scalar, uint32_t D, uint32_t clamp, ggml_type type, bool small_rows) {
1601
  GGML_UNUSED(clamp);
1602
 
1603
+ if (scalar) {
1604
+ if (small_rows) {
1605
+ return {scalar_flash_attention_num_small_rows, 64};
1606
+ } else {
1607
+ return {scalar_flash_attention_num_large_rows, 32};
1608
+ }
1609
+ }
1610
+
1611
  // small rows, large cols
1612
  if (small_rows) {
1613
+ return {get_fa_num_small_rows(scalar), 32};
1614
  }
1615
+
1616
  // small cols to reduce register count
1617
  if (ggml_is_quantized(type) || D == 256) {
1618
  return {64, 32};
 
1907
  parameter_count, wg_denoms, specialization_constants, disable_robustness, require_full_subgroups, required_subgroup_size));
1908
  };
1909
 
1910
+ auto const &fa_wg_denoms = [&](bool scalar, uint32_t D, uint32_t clamp, ggml_type type, bool small_rows) -> std::array<uint32_t, 3> {
1911
+ return {fa_rows_cols(scalar, D, clamp, type, small_rows)[0], 1, 1};
1912
+ };
 
 
 
1913
 
1914
+ auto const &fa_spec_constants = [&](bool scalar, uint32_t D, uint32_t clamp, ggml_type type, bool small_rows) -> std::vector<uint32_t> {
1915
+ // For large number of rows, 128 invocations seems to work best.
1916
+ // For small number of rows (e.g. N==1), 256 works better. But matrix granularity for 256 is 32, so we
1917
+ // can't use 256 for D==80.
1918
+ // For scalar, use 128 (arbitrary)
1919
+ uint32_t wg_size = scalar ? 128 : ((small_rows && (D % 32) == 0) ? 256 : 128);
1920
+ auto rows_cols = fa_rows_cols(scalar, D, clamp, type, small_rows);
1921
+
1922
+ // D_split can't be larger than a subgroup because we use subgroupShuffle to reduce it.
1923
+ // D_split can't be larger than the LSB of D divided by 4 due to vectorization in the shader.
1924
+ const uint32_t D_lsb = D ^ (D & (D-1));
1925
+ uint32_t D_split = std::min(std::min(device->subgroup_size, 8u), D_lsb / 4);
1926
+
1927
+ // mask dim1 is padded to 64, we rely on this to avoid clamping mask loads
1928
+ GGML_ASSERT((GGML_KQ_MASK_PAD % rows_cols[0]) == 0);
1929
+ return {wg_size, rows_cols[0], rows_cols[1], (D), clamp, D_split};
1930
+ };
1931
 
1932
+ #define CREATE_FA2(TYPE, NAMELC, SCALAR, SUFFIX, D) \
1933
+ ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D ## SUFFIX[TYPE][0][0][0], "flash_attn_f32_f16_D" #D "_f16acc" #NAMELC #SUFFIX, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(SCALAR, D,1,TYPE,false), fa_spec_constants(SCALAR, D,1,TYPE,false), 1, true); \
1934
+ ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D ## SUFFIX[TYPE][0][0][1], "flash_attn_f32_f16_D" #D "_aligned_f16acc" #NAMELC #SUFFIX, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(SCALAR, D,0,TYPE,false), fa_spec_constants(SCALAR, D,0,TYPE,false), fa_rows_cols(SCALAR,D,0,TYPE,false)[1], true); \
1935
+ ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D ## SUFFIX[TYPE][1][0][0], "flash_attn_f32_f16_D" #D "_f32acc" #NAMELC #SUFFIX, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(SCALAR, D,1,TYPE,false), fa_spec_constants(SCALAR, D,1,TYPE,false), 1, true); \
1936
+ ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D ## SUFFIX[TYPE][1][0][1], "flash_attn_f32_f16_D" #D "_aligned_f32acc" #NAMELC #SUFFIX, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(SCALAR, D,0,TYPE,false), fa_spec_constants(SCALAR, D,0,TYPE,false), fa_rows_cols(SCALAR,D,0,TYPE,false)[1], true); \
1937
+ ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D ## SUFFIX[TYPE][0][1][0], "flash_attn_f32_f16_D" #D "_f16acc_smallrows" #NAMELC #SUFFIX, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(SCALAR, D,1,TYPE,true), fa_spec_constants(SCALAR, D,1,TYPE,true), 1, true); \
1938
+ ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D ## SUFFIX[TYPE][0][1][1], "flash_attn_f32_f16_D" #D "_aligned_f16acc_smallrows" #NAMELC #SUFFIX, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(SCALAR, D,0,TYPE,true), fa_spec_constants(SCALAR, D,0,TYPE,true), fa_rows_cols(SCALAR,D,0,TYPE,true)[1], true); \
1939
+ ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D ## SUFFIX[TYPE][1][1][0], "flash_attn_f32_f16_D" #D "_f32acc_smallrows" #NAMELC #SUFFIX, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(SCALAR, D,1,TYPE,true), fa_spec_constants(SCALAR, D,1,TYPE,true), 1, true); \
1940
+ ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D ## SUFFIX[TYPE][1][1][1], "flash_attn_f32_f16_D" #D "_aligned_f32acc_smallrows" #NAMELC #SUFFIX, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(SCALAR, D,0,TYPE,true), fa_spec_constants(SCALAR, D,0,TYPE,true), fa_rows_cols(SCALAR,D,0,TYPE,true)[1], true); \
1941
+
1942
+ #define CREATE_FA(TYPE, NAMELC, SCALAR, SUFFIX) \
1943
+ CREATE_FA2(TYPE, NAMELC, SCALAR, SUFFIX, 64) \
1944
+ CREATE_FA2(TYPE, NAMELC, SCALAR, SUFFIX, 80) \
1945
+ CREATE_FA2(TYPE, NAMELC, SCALAR, SUFFIX, 96) \
1946
+ CREATE_FA2(TYPE, NAMELC, SCALAR, SUFFIX, 112) \
1947
+ CREATE_FA2(TYPE, NAMELC, SCALAR, SUFFIX, 128) \
1948
+ CREATE_FA2(TYPE, NAMELC, SCALAR, SUFFIX, 256)
1949
+
1950
+ CREATE_FA(GGML_TYPE_F16, f16, true, )
1951
+ CREATE_FA(GGML_TYPE_Q4_0, q4_0, true, )
1952
+ CREATE_FA(GGML_TYPE_Q8_0, q8_0, true, )
1953
+ #if defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
1954
+ if (device->coopmat2) {
1955
+ CREATE_FA(GGML_TYPE_F16, f16, false, _cm2)
1956
+ CREATE_FA(GGML_TYPE_Q4_0, q4_0, false, _cm2)
1957
+ CREATE_FA(GGML_TYPE_Q4_1, q4_1, false, _cm2)
1958
+ CREATE_FA(GGML_TYPE_Q5_0, q5_0, false, _cm2)
1959
+ CREATE_FA(GGML_TYPE_Q5_1, q5_1, false, _cm2)
1960
+ CREATE_FA(GGML_TYPE_Q8_0, q8_0, false, _cm2)
1961
+ CREATE_FA(GGML_TYPE_IQ4_NL, iq4_nl, false, _cm2)
1962
+ }
1963
+ #endif
1964
+ #undef CREATE_FA2
 
 
 
 
 
 
1965
  #undef CREATE_FA
1966
 
1967
+ #if defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
1968
+ if (device->coopmat2) {
1969
+
1970
  // Create 6 variants, {s,m,l}x{unaligned,aligned}
1971
  #define CREATE_MM(PIPELINE_NAME, NAMELC, F16ACC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT) \
1972
  ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->l, #NAMELC #F16ACC "_l", NAMELC ## F16ACC ## _cm2_len, NAMELC ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1); \
 
2863
  device->subgroup_add = (vk11_props.subgroupSupportedStages & vk::ShaderStageFlagBits::eCompute) &&
2864
  (vk11_props.subgroupSupportedOperations & vk::SubgroupFeatureFlagBits::eArithmetic);
2865
 
2866
+ device->subgroup_shuffle = (vk11_props.subgroupSupportedStages & vk::ShaderStageFlagBits::eCompute) &&
2867
+ (vk11_props.subgroupSupportedOperations & vk::SubgroupFeatureFlagBits::eShuffle);
2868
+
2869
  const bool force_disable_f16 = getenv("GGML_VK_DISABLE_F16") != nullptr;
2870
 
2871
  device->fp16 = !force_disable_f16 && fp16_storage && fp16_compute;
 
5738
  assert(q->type == GGML_TYPE_F32);
5739
  assert(k->type == v->type);
5740
 
5741
+ bool scalar = !ctx->device->coopmat2;
5742
+
5743
+ uint32_t gqa_ratio = 1;
5744
+ uint32_t qk_ratio = neq2 / nek2;
5745
+ uint32_t workgroups_x = (uint32_t)neq1;
5746
+ uint32_t workgroups_y = (uint32_t)neq2;
5747
+ uint32_t workgroups_z = (uint32_t)neq3;
5748
+
5749
+ // For scalar FA, we can use the "large" size to accommodate qga.
5750
+ // For coopmat FA, we always use the small size (which is still pretty large for gqa).
5751
+ const uint32_t max_gqa = scalar ? scalar_flash_attention_num_large_rows : get_fa_num_small_rows(false);
5752
+
5753
+ if (N == 1 && qk_ratio > 1 && qk_ratio <= max_gqa &&
5754
+ qk_ratio * nek2 == neq2 && nek2 == nev2 && neq3 == 1 && nek3 == 1 && nev3 == 1) {
5755
+ // grouped query attention - make the N dimension equal to gqa_ratio, reduce
5756
+ // workgroups proportionally in y dimension. The shader will detect gqa_ratio > 1
5757
+ // and change addressing calculations to index Q's dimension 2.
5758
+ gqa_ratio = qk_ratio;
5759
+ N = gqa_ratio;
5760
+ workgroups_y /= N;
5761
+ }
5762
+
5763
  vk_pipeline *pipelines;
5764
  // XXX TODO other backends may be changing accumulator precision to default to f32 soon
5765
+ bool f32acc = scalar || dst->op_params[3] == GGML_PREC_F32;
5766
+ bool small_rows = N <= get_fa_num_small_rows(scalar);
5767
+
5768
+ if (scalar) {
5769
+ switch (D) {
5770
+ case 64: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D64[k->type][f32acc][small_rows][0]; break;
5771
+ case 80: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D80[k->type][f32acc][small_rows][0]; break;
5772
+ case 96: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D96[k->type][f32acc][small_rows][0]; break;
5773
+ case 112: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D112[k->type][f32acc][small_rows][0]; break;
5774
+ case 128: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D128[k->type][f32acc][small_rows][0]; break;
5775
+ case 256: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D256[k->type][f32acc][small_rows][0]; break;
5776
+ default:
5777
+ GGML_ASSERT(!"unsupported D value");
5778
+ return;
5779
+ }
5780
+ } else {
5781
+ switch (D) {
5782
+ case 64: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D64_cm2[k->type][f32acc][small_rows][0]; break;
5783
+ case 80: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D80_cm2[k->type][f32acc][small_rows][0]; break;
5784
+ case 96: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D96_cm2[k->type][f32acc][small_rows][0]; break;
5785
+ case 112: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D112_cm2[k->type][f32acc][small_rows][0]; break;
5786
+ case 128: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D128_cm2[k->type][f32acc][small_rows][0]; break;
5787
+ case 256: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D256_cm2[k->type][f32acc][small_rows][0]; break;
5788
+ default:
5789
+ GGML_ASSERT(!"unsupported D value");
5790
+ return;
5791
+ }
5792
  }
5793
  assert(pipelines);
5794
 
 
5806
  vk_pipeline pipeline = pipelines[aligned];
5807
  assert(pipeline);
5808
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5809
  uint32_t split_kv = KV;
5810
  uint32_t split_k = 1;
5811
 
5812
+ // Use a placeholder core count if one isn't available. split_k is a big help for perf.
5813
+ const uint32_t shader_core_count = ctx->device->shader_core_count ? ctx->device->shader_core_count : 16;
5814
+
5815
  // Try to use split_k when KV is large enough to be worth the overhead
5816
+ if (workgroups_x == 1 && shader_core_count > 0 && KV >= 512) {
5817
  // Try to run two workgroups per SM.
5818
  split_k = ctx->device->shader_core_count * 2 / workgroups_y;
5819
  if (split_k > 1) {
 
9583
  case GGML_OP_FLASH_ATTN_EXT:
9584
  {
9585
  ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
9586
+ auto device = ggml_vk_get_device(ctx->device);
9587
+ bool coopmat2 = device->coopmat2;
 
9588
  switch (op->src[0]->ne[0]) {
9589
  case 64:
9590
  case 80:
 
9592
  case 112:
9593
  case 128:
9594
  case 256:
 
9595
  break;
9596
  default:
9597
  return false;
 
9617
  switch (op->src[1]->type) {
9618
  case GGML_TYPE_F16:
9619
  case GGML_TYPE_Q4_0:
9620
+ case GGML_TYPE_Q8_0:
9621
+ // supported in scalar and coopmat2 paths
9622
+ break;
9623
  case GGML_TYPE_Q4_1:
9624
  case GGML_TYPE_Q5_0:
9625
  case GGML_TYPE_Q5_1:
 
9626
  // K dequants currently disabled because D dimension is rounded up to 256 and runs inefficiently
9627
  //case GGML_TYPE_Q2_K:
9628
  //case GGML_TYPE_Q3_K:
 
9638
  //case GGML_TYPE_IQ3_S:
9639
  //case GGML_TYPE_IQ4_XS:
9640
  case GGML_TYPE_IQ4_NL:
9641
+ // currently supported only in coopmat2 path
9642
+ if (!coopmat2) {
9643
+ return false;
9644
+ }
9645
  break;
9646
  default:
9647
  return false;
9648
  }
9649
+ if (!coopmat2 && !device->subgroup_shuffle) {
9650
+ // scalar FA uses subgroupShuffle
9651
+ return false;
9652
+ }
9653
  return true;
9654
  }
9655
  case GGML_OP_GET_ROWS:
ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #version 450
2
+
3
+ #extension GL_EXT_control_flow_attributes : enable
4
+ #extension GL_EXT_shader_16bit_storage : require
5
+
6
+ #extension GL_EXT_shader_explicit_arithmetic_types_float16 : require
7
+ #extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
8
+
9
+ #extension GL_KHR_shader_subgroup_shuffle : enable
10
+
11
+ #include "types.comp"
12
+
13
+ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
14
+
15
+ layout (constant_id = 1) const uint32_t Br = 1;
16
+ layout (constant_id = 2) const uint32_t Bc = 32;
17
+ layout (constant_id = 3) const uint32_t D = 32;
18
+
19
+ layout (constant_id = 5) const uint32_t D_split = 16;
20
+ const uint32_t D_per_thread = D / D_split;
21
+
22
+ const uint32_t cols_per_iter = gl_WorkGroupSize.x / D_split;
23
+ const uint32_t cols_per_thread = Bc / cols_per_iter;
24
+
25
+ layout (push_constant) uniform parameter {
26
+ uint32_t N;
27
+ uint32_t KV;
28
+
29
+ uint32_t ne1;
30
+ uint32_t ne2;
31
+ uint32_t ne3;
32
+
33
+ uint32_t neq2;
34
+ uint32_t neq3;
35
+ uint32_t nek2;
36
+ uint32_t nek3;
37
+ uint32_t nev2;
38
+ uint32_t nev3;
39
+ uint32_t nem1;
40
+
41
+ uint32_t nb01;
42
+ uint32_t nb02;
43
+ uint32_t nb03;
44
+ uint32_t nb11;
45
+ uint32_t nb12;
46
+ uint32_t nb13;
47
+ uint32_t nb21;
48
+ uint32_t nb22;
49
+ uint32_t nb23;
50
+ uint32_t nb31;
51
+
52
+ float scale;
53
+ float max_bias;
54
+ float logit_softcap;
55
+
56
+ uint32_t mask;
57
+ uint32_t n_head_log2;
58
+ float m0;
59
+ float m1;
60
+
61
+ uint32_t gqa_ratio;
62
+ uint32_t split_kv;
63
+ uint32_t k_num;
64
+ } p;
65
+
66
+ layout (binding = 0) readonly buffer Q {float data_q[];};
67
+ layout (binding = 0) readonly buffer QV4 {vec4 data_qv4[];};
68
+ layout (binding = 1) readonly buffer K {float16_t data_k[];};
69
+ layout (binding = 1) readonly buffer KV4 {f16vec4 data_kv4[];};
70
+ layout (binding = 2) readonly buffer V {float16_t data_v[];};
71
+ layout (binding = 2) readonly buffer VV4 {f16vec4 data_vv4[];};
72
+ layout (binding = 3) readonly buffer M {float16_t data_m[];};
73
+ layout (binding = 4) writeonly buffer O {D_TYPE data_o[];};
74
+
75
+ #if defined(A_TYPE_PACKED16)
76
+ #define BINDING_IDX_K 0
77
+ #define BINDING_IDX_V 1
78
+ layout (binding = 1) readonly buffer KV_PACKED16 {A_TYPE_PACKED16 data_packed16[];} kv_packed[2];
79
+ #endif
80
+
81
+ #if defined(DATA_A_Q4_0)
82
+ #define BLOCK_BYTE_SIZE 18
83
+
84
+ vec4 dequantize4(uint ib, uint iqs, uint a_offset, uint binding_idx) {
85
+ uint vui_lo = uint(kv_packed[binding_idx].data_packed16[a_offset + ib].qs[(iqs & 0xF) / 2 + 0]);
86
+ uint vui_hi = uint(kv_packed[binding_idx].data_packed16[a_offset + ib].qs[(iqs & 0xF) / 2 + 1]);
87
+ uint shift = (iqs & 0x10) >> 2;
88
+ vui_lo >>= shift;
89
+ vui_hi >>= shift;
90
+
91
+ return float(kv_packed[binding_idx].data_packed16[a_offset + ib].d) * (vec4(vui_lo & 0xF, (vui_lo >> 8) & 0xF, vui_hi & 0xF, (vui_hi >> 8) & 0xF) - 8.0f);
92
+ }
93
+ #endif
94
+
95
+ #if defined(DATA_A_Q8_0)
96
+ #define BLOCK_BYTE_SIZE 34
97
+ vec4 dequantize4(uint ib, uint iqs, uint a_offset, uint binding_idx) {
98
+ const i8vec2 v0 = unpack8(int32_t(kv_packed[binding_idx].data_packed16[a_offset + ib].qs[iqs / 2])).xy; // vec4 used due to #12147
99
+ const i8vec2 v1 = unpack8(int32_t(kv_packed[binding_idx].data_packed16[a_offset + ib].qs[iqs / 2 + 1])).xy;
100
+
101
+ return float(kv_packed[binding_idx].data_packed16[a_offset + ib].d) * vec4(v0.x, v0.y, v1.x, v1.y);
102
+ }
103
+ #endif
104
+
105
+ #define CEIL_DIV(a, b) (((a) + (b) - 1) / (b))
106
+
107
+ // Store the output when doing grouped query attention.
108
+ // Rows index by Q's dimension 2, and the first N rows are valid.
109
+ D_TYPE perElemOpGqaStore(const in uint32_t r, const in uint32_t c, const in D_TYPE elem, const in uint32_t o_offset, const in uint32_t iq2, const in uint32_t N)
110
+ {
111
+ uint32_t offset = (iq2 + r) * D + c;
112
+ data_o[o_offset + offset] = D_TYPE(elem);
113
+ return elem;
114
+ }
115
+
116
+ // Store column zero. This is used to save per-row m and L values for split_k.
117
+ ACC_TYPE perElemOpStoreCol0(const in uint32_t r, const in uint32_t c, const in ACC_TYPE elem, const in uint32_t o_offset, const in uint32_t iq2, const in uint32_t N)
118
+ {
119
+ if (r < N && c == 0) {
120
+ uint32_t offset = iq2 + r;
121
+ data_o[o_offset + offset] = D_TYPE(elem);
122
+ }
123
+ return elem;
124
+ }
125
+
126
+ // Load the slope matrix, indexed by Q's dimension 2.
127
+ ACC_TYPE perElemOpComputeSlope(const in uint32_t r, const in uint32_t c, const in ACC_TYPE elem, const in uint32_t iq2)
128
+ {
129
+ const uint32_t h = iq2 + (r % p.gqa_ratio);
130
+
131
+ const ACC_TYPE base = ACC_TYPE(h < p.n_head_log2 ? p.m0 : p.m1);
132
+ const int exph = int(h < p.n_head_log2 ? h + 1 : 2*(h - p.n_head_log2) + 1);
133
+
134
+ return ACC_TYPE(pow(base, ACC_TYPE(exph)));
135
+ }
136
+
137
+ shared FLOAT_TYPE tmpsh[gl_WorkGroupSize.x];
138
+ shared vec4 tmpshv4[gl_WorkGroupSize.x];
139
+
140
+ shared float masksh[Bc][Br];
141
+ shared vec4 Qf[Br][D / 4];
142
+
143
+ void main() {
144
+ #ifdef NEEDS_INIT_IQ_SHMEM
145
+ init_iq_shmem(gl_WorkGroupSize);
146
+ #endif
147
+
148
+ const uint32_t tid = gl_LocalInvocationIndex;
149
+ const uint32_t N = p.N;
150
+ const uint32_t KV = p.KV;
151
+
152
+ const uint32_t d_tid = gl_LocalInvocationIndex % D_split;
153
+ const uint32_t col_tid = gl_LocalInvocationIndex / D_split;
154
+
155
+ uint32_t i = gl_WorkGroupID.x;
156
+ uint32_t split_k_index = 0;
157
+
158
+ if (p.k_num > 1) {
159
+ i = 0;
160
+ split_k_index = gl_WorkGroupID.x;
161
+ }
162
+
163
+ const uint32_t Tr = CEIL_DIV(N, Br);
164
+
165
+ const uint32_t start_j = split_k_index * p.split_kv / Bc;
166
+ const uint32_t end_j = CEIL_DIV(min(KV, (split_k_index + 1) * p.split_kv), Bc);
167
+
168
+ // When not using grouped query attention, all rows share the same iq2, equal to gl_WorkGroupID.y.
169
+ // When using grouped query attention, each workgroup does gqa_ratio consecutive values of iq2.
170
+ const uint32_t iq2 = gl_WorkGroupID.y * p.gqa_ratio;
171
+ const uint32_t iq3 = gl_WorkGroupID.z;
172
+
173
+ // broadcast factors
174
+ const uint32_t rk2 = p.neq2/p.nek2;
175
+ const uint32_t rk3 = p.neq3/p.nek3;
176
+
177
+ const uint32_t rv2 = p.neq2/p.nev2;
178
+ const uint32_t rv3 = p.neq3/p.nev3;
179
+
180
+ // k indices
181
+ const uint32_t ik3 = iq3 / rk3;
182
+ const uint32_t ik2 = iq2 / rk2;
183
+
184
+ // v indices
185
+ const uint32_t iv3 = iq3 / rv3;
186
+ const uint32_t iv2 = iq2 / rv2;
187
+
188
+ // nb?1 are already divided by the type size and are in units of elements.
189
+ // When using grouped query attention, Q is indexed by iq2, so the stride
190
+ // should be nb02 (which is in bytes).
191
+ uint32_t q_stride = p.gqa_ratio > 1 ? (p.nb02 / 4) : p.nb01;
192
+ uint32_t k_stride = p.nb11;
193
+ uint32_t v_stride = p.nb21;
194
+ // When using grouped query attention, all rows use the same mask (stride 0).
195
+ // "p.gqa_ratio >> 16" is just a roundabout way of writing zero
196
+ // that prevents the compiler from folding the "&" through the select
197
+ // and breaking the alignment detection.
198
+ uint32_t m_stride = (p.gqa_ratio > 1) ? (p.gqa_ratio >> 16) : KV;
199
+
200
+ uint32_t q_offset = (iq2*p.nb02+iq3*p.nb03) / 4;
201
+
202
+ [[unroll]] for (uint32_t idx = 0; idx < Br * D / 4; idx += gl_WorkGroupSize.x) {
203
+ uint32_t d = (idx + tid) % (D / 4);
204
+ uint32_t r = (idx + tid) / (D / 4);
205
+ if (r < Br && d < D / 4 &&
206
+ i * Br + r < N) {
207
+ Qf[r][d] = vec4(data_qv4[q_offset / 4 + (i * Br + r) * q_stride / 4 + d]) * p.scale;
208
+ }
209
+ }
210
+ barrier();
211
+
212
+ vec4 Of[Br][D_per_thread / 4];
213
+ [[unroll]] for (uint32_t d = 0; d < D_per_thread / 4; ++d) {
214
+ [[unroll]] for (uint32_t r = 0; r < Br; ++r) {
215
+ Of[r][d] = vec4(0.0);
216
+ }
217
+ }
218
+
219
+ float Lf[Br], Mf[Br];
220
+
221
+ // Use -FLT_MAX/2 rather than -inf to reduce the possibility of NaNs, e.g. when computing Mold-M.
222
+ const float NEG_FLT_MAX_OVER_2 = uintBitsToFloat(0xFEFFFFFF);
223
+
224
+ [[unroll]] for (uint32_t r = 0; r < Br; ++r) {
225
+ Lf[r] = 0;
226
+ Mf[r] = NEG_FLT_MAX_OVER_2;
227
+ }
228
+
229
+ float slope[Br];
230
+ [[unroll]] for (uint32_t r = 0; r < Br; ++r) {
231
+ slope[r] = 1.0;
232
+ }
233
+
234
+ // ALiBi
235
+ if (p.max_bias > 0.0f) {
236
+ [[unroll]] for (uint32_t r = 0; r < Br; ++r) {
237
+ slope[r] = perElemOpComputeSlope(r, col_tid, ACC_TYPE(0), iq2);
238
+ }
239
+ }
240
+
241
+ #if BLOCK_SIZE > 1
242
+ uint32_t k_offset = (ik2*p.nb12 + ik3*p.nb13) / BLOCK_BYTE_SIZE;
243
+ uint32_t v_offset = (iv2*p.nb22 + iv3*p.nb23) / BLOCK_BYTE_SIZE;
244
+ #else
245
+ uint32_t k_offset = (ik2*p.nb12 + ik3*p.nb13) / 2;
246
+ uint32_t v_offset = (iv2*p.nb22 + iv3*p.nb23) / 2;
247
+ #endif
248
+
249
+ [[dont_unroll]]
250
+ for (uint32_t j = start_j; j < end_j; ++j) {
251
+
252
+ float Sf[Br][cols_per_thread];
253
+ [[unroll]] for (uint32_t r = 0; r < Br; ++r) {
254
+ [[unroll]] for (uint32_t c = 0; c < cols_per_thread; ++c) {
255
+ Sf[r][c] = 0.0;
256
+ }
257
+ }
258
+
259
+
260
+ [[unroll]] for (uint32_t c = 0; c < cols_per_thread; ++c) {
261
+ [[unroll]] for (uint32_t d = 0; d < D_per_thread / 4; ++d) {
262
+ #if BLOCK_SIZE > 1
263
+ uint coord = (j * Bc + c * cols_per_iter + col_tid) * k_stride * BLOCK_SIZE + 4 * (d * D_split + d_tid);
264
+ uint ib = coord / BLOCK_SIZE;
265
+ uint iqs = (coord % BLOCK_SIZE);
266
+ vec4 K_Tf = dequantize4(ib, iqs, k_offset, BINDING_IDX_K);
267
+ #else
268
+ vec4 K_Tf = vec4(data_kv4[k_offset / 4 + (j * Bc + c * cols_per_iter + col_tid) * k_stride / 4 + d * D_split + d_tid]);
269
+ #endif
270
+ [[unroll]] for (uint32_t r = 0; r < Br; ++r) {
271
+ Sf[r][c] += dot(Qf[r][d * D_split + d_tid], K_Tf);
272
+ }
273
+ }
274
+ }
275
+
276
+ [[unroll]] for (uint32_t c = 0; c < cols_per_thread; ++c) {
277
+ // Compute sum across the D_split
278
+ [[unroll]] for (uint s = D_split / 2; s > 0; s >>= 1) {
279
+ [[unroll]] for (uint32_t r = 0; r < Br; ++r) {
280
+ Sf[r][c] += subgroupShuffleXor(Sf[r][c], s);
281
+ }
282
+ }
283
+ }
284
+
285
+ if (p.logit_softcap != 0.0f) {
286
+ [[unroll]] for (uint32_t r = 0; r < Br; ++r) {
287
+ [[unroll]] for (uint32_t c = 0; c < cols_per_thread; ++c) {
288
+ Sf[r][c] = p.logit_softcap * tanh(Sf[r][c]);
289
+ }
290
+ }
291
+ }
292
+
293
+ if (p.mask != 0) {
294
+
295
+ [[unroll]] for (uint32_t idx = 0; idx < Bc * Br; idx += gl_WorkGroupSize.x) {
296
+ uint32_t c = (idx + tid) % Bc;
297
+ uint32_t r = (idx + tid) / Bc;
298
+ if (idx + tid < Bc * Br) {
299
+ masksh[c][r] = float(data_m[(i * Br + r) * m_stride + (j * Bc + c)]);
300
+ }
301
+ }
302
+ barrier();
303
+
304
+ [[unroll]] for (uint32_t c = 0; c < cols_per_thread; ++c) {
305
+ [[unroll]] for (uint32_t r = 0; r < Br; ++r) {
306
+ float mvf = masksh[c * cols_per_iter + col_tid][r];
307
+
308
+ Sf[r][c] += slope[r]*mvf;
309
+ }
310
+ }
311
+ barrier();
312
+ }
313
+
314
+ float rowmaxf[Br], Pf[Br][cols_per_thread], rowsumf[Br], eMf[Br], Moldf[Br];
315
+ [[unroll]] for (uint32_t r = 0; r < Br; ++r) {
316
+ rowmaxf[r] = Sf[r][0];
317
+ [[unroll]] for (uint32_t c = 0; c < cols_per_thread; ++c) {
318
+ rowmaxf[r] = max(rowmaxf[r], Sf[r][c]);
319
+ }
320
+ Moldf[r] = Mf[r];
321
+
322
+ // M = max(rowmax, Mold)
323
+ // P = e^(S - M)
324
+ // eM = e^(Mold - M)
325
+ Mf[r] = max(rowmaxf[r], Moldf[r]);
326
+ [[unroll]] for (uint32_t c = 0; c < cols_per_thread; ++c) {
327
+ Pf[r][c] = exp(Sf[r][c] - Mf[r]);
328
+ }
329
+ eMf[r] = exp(Moldf[r] - Mf[r]);
330
+
331
+ // Compute sum across row of P
332
+ rowsumf[r] = 0.0;
333
+ [[unroll]] for (uint32_t c = 0; c < cols_per_thread; ++c) {
334
+ rowsumf[r] += Pf[r][c];
335
+ }
336
+
337
+ Lf[r] = eMf[r]*Lf[r] + rowsumf[r];
338
+ }
339
+
340
+ [[unroll]] for (uint32_t d = 0; d < D_per_thread / 4; ++d) {
341
+ [[unroll]] for (uint32_t r = 0; r < Br; ++r) {
342
+ Of[r][d] = eMf[r] * Of[r][d];
343
+ }
344
+ }
345
+
346
+ [[unroll]] for (uint32_t c = 0; c < cols_per_thread; ++c) {
347
+ [[unroll]] for (uint32_t d = 0; d < D_per_thread / 4; ++d) {
348
+ #if BLOCK_SIZE > 1
349
+ uint coord = (j * Bc + c * cols_per_iter + col_tid) * v_stride * BLOCK_SIZE + 4 * (d * D_split + d_tid);
350
+ uint ib = coord / BLOCK_SIZE;
351
+ uint iqs = (coord % BLOCK_SIZE);
352
+ vec4 Vf = dequantize4(ib, iqs, v_offset, BINDING_IDX_V);
353
+ #else
354
+ vec4 Vf = vec4(data_vv4[v_offset / 4 + (j * Bc + c * cols_per_iter + col_tid) * v_stride / 4 + d * D_split + d_tid]);
355
+ #endif
356
+ [[unroll]] for (uint32_t r = 0; r < Br; ++r) {
357
+ Of[r][d] += Pf[r][c] * Vf;
358
+ }
359
+ }
360
+ }
361
+
362
+ barrier();
363
+ }
364
+
365
+ // reduce across threads
366
+
367
+ [[unroll]] for (uint32_t r = 0; r < Br; ++r) {
368
+ float rowmaxf, eMf;
369
+
370
+ tmpsh[tid] = Mf[r];
371
+ // Compute max across the row
372
+ barrier();
373
+ [[unroll]] for (int s = int(gl_WorkGroupSize.x) / 2; s >= D_split; s >>= 1) {
374
+ if (tid < s) {
375
+ tmpsh[tid] = max(tmpsh[tid], tmpsh[tid + s]);
376
+ }
377
+ barrier();
378
+ }
379
+ rowmaxf = tmpsh[d_tid];
380
+ barrier();
381
+
382
+ float Moldf = Mf[r];
383
+
384
+ // M = max(rowmax, Mold)
385
+ // eM = e^(Mold - M)
386
+ Mf[r] = max(rowmaxf, Moldf);
387
+ eMf = exp(Moldf - Mf[r]);
388
+
389
+ Lf[r] = eMf*Lf[r];
390
+
391
+ tmpsh[tid] = Lf[r];
392
+
393
+ // Compute sum across the row
394
+ barrier();
395
+ [[unroll]] for (int s = int(gl_WorkGroupSize.x) / 2; s >= D_split; s >>= 1) {
396
+ if (tid < s) {
397
+ tmpsh[tid] = tmpsh[tid] + tmpsh[tid + s];
398
+ }
399
+ barrier();
400
+ }
401
+ Lf[r] = tmpsh[d_tid];
402
+ barrier();
403
+
404
+ [[unroll]] for (uint32_t d = 0; d < D_per_thread / 4; ++d) {
405
+
406
+ Of[r][d] = eMf * Of[r][d];
407
+ tmpshv4[tid] = Of[r][d];
408
+
409
+ barrier();
410
+ [[unroll]] for (int s = int(gl_WorkGroupSize.x) / 2; s >= D_split; s >>= 1) {
411
+ if (tid < s) {
412
+ Of[r][d] += tmpshv4[tid + s];
413
+ tmpshv4[tid] = Of[r][d];
414
+ }
415
+ barrier();
416
+ }
417
+ Of[r][d] = tmpshv4[d_tid];
418
+ barrier();
419
+ }
420
+ }
421
+
422
+
423
+ // If there is split_k, then the split_k resolve shader does the final
424
+ // division by L. Store the intermediate O value and per-row m and L values.
425
+ if (p.k_num > 1) {
426
+ uint32_t o_offset = D * p.ne1 * split_k_index;
427
+
428
+ [[unroll]] for (uint32_t r = 0; r < Br; ++r) {
429
+ if (r < N) {
430
+ [[unroll]] for (uint32_t d = 0; d < D_per_thread / 4; ++d) {
431
+ [[unroll]] for (uint32_t comp = 0; comp < 4; ++comp) {
432
+ perElemOpGqaStore(r, 4*(d * D_split + d_tid) + comp, Of[r][d][comp], o_offset, iq2, N);
433
+ }
434
+ }
435
+ }
436
+ }
437
+
438
+ o_offset = D * p.ne1 * p.k_num + p.ne1 * split_k_index * 2;
439
+ [[unroll]] for (uint32_t r = 0; r < Br; ++r) {
440
+ if (r < N) {
441
+ perElemOpStoreCol0(r, 0u, ACC_TYPE(Lf[r]), o_offset, iq2, N);
442
+ perElemOpStoreCol0(r, 0u, ACC_TYPE(Mf[r]), o_offset + p.ne1, iq2, N);
443
+ }
444
+ }
445
+
446
+ return;
447
+ }
448
+
449
+ float Lfrcp[Br];
450
+ [[unroll]] for (uint32_t r = 0; r < Br; ++r) {
451
+ Lfrcp[r] = 1.0 / Lf[r];
452
+ }
453
+
454
+ [[unroll]] for (uint32_t d = 0; d < D_per_thread / 4; ++d) {
455
+ [[unroll]] for (uint32_t r = 0; r < Br; ++r) {
456
+ Of[r][d] *= Lfrcp[r];
457
+ }
458
+ }
459
+
460
+ uint32_t o_offset = iq3*p.ne2*p.ne1;
461
+
462
+ if (p.gqa_ratio > 1) {
463
+ [[unroll]] for (uint32_t r = 0; r < Br; ++r) {
464
+ if (r < N) {
465
+ [[unroll]] for (uint32_t d = 0; d < D_per_thread / 4; ++d) {
466
+ [[unroll]] for (uint32_t comp = 0; comp < 4; ++comp) {
467
+ perElemOpGqaStore(r, 4*(d * D_split + d_tid) + comp, Of[r][d][comp], o_offset, iq2, N);
468
+ }
469
+ }
470
+ }
471
+ }
472
+ } else {
473
+ [[unroll]] for (uint32_t r = 0; r < Br; ++r) {
474
+ if (i * Br + r < N) {
475
+ [[unroll]] for (uint32_t d = 0; d < D_per_thread / 4; ++d) {
476
+ [[unroll]] for (uint32_t comp = 0; comp < 4; ++comp) {
477
+ data_o[o_offset + iq2 * D + (i * Br + r) * p.ne1 * D + 4*(d * D_split + d_tid) + comp] = D_TYPE(Of[r][d][comp]);
478
+ }
479
+ }
480
+ }
481
+ }
482
+ }
483
+ }
ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp CHANGED
@@ -421,7 +421,6 @@ void process_shaders() {
421
  #endif
422
  }
423
 
424
- #if defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
425
  // flash attention
426
  for (const auto& f16acc : {false, true}) {
427
  std::string acctype = f16acc ? "float16_t" : "float";
@@ -432,6 +431,7 @@ void process_shaders() {
432
  }
433
  if (tname == "bf16") continue;
434
 
 
435
  if (tname == "f16") {
436
  string_to_spv("flash_attn_f32_f16_" + tname, "flash_attn_cm2.comp",
437
  merge_maps(base_dict, {{"Q_TYPE", "float"}, {"D_TYPE", "float"}, {"ACC_TYPE", acctype}}), true, false, true, f16acc);
@@ -440,9 +440,17 @@ void process_shaders() {
440
  string_to_spv("flash_attn_f32_f16_" + tname, "flash_attn_cm2.comp",
441
  merge_maps(base_dict, {{data_a_key, "1"}, {"Q_TYPE", "float"}, {"D_TYPE", "float"}, {"ACC_TYPE", acctype}, {"DEQUANTFUNC", "dequantFunc"+to_uppercase(tname) }, {"BLOCK_SIZE", "QUANT_K_"+to_uppercase(tname) }}), true, false, true, f16acc);
442
  }
 
 
 
 
 
 
 
 
 
443
  }
444
  }
445
- #endif
446
 
447
  for (const auto& tname : type_names) {
448
  // mul mat vec
 
421
  #endif
422
  }
423
 
 
424
  // flash attention
425
  for (const auto& f16acc : {false, true}) {
426
  std::string acctype = f16acc ? "float16_t" : "float";
 
431
  }
432
  if (tname == "bf16") continue;
433
 
434
+ #if defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
435
  if (tname == "f16") {
436
  string_to_spv("flash_attn_f32_f16_" + tname, "flash_attn_cm2.comp",
437
  merge_maps(base_dict, {{"Q_TYPE", "float"}, {"D_TYPE", "float"}, {"ACC_TYPE", acctype}}), true, false, true, f16acc);
 
440
  string_to_spv("flash_attn_f32_f16_" + tname, "flash_attn_cm2.comp",
441
  merge_maps(base_dict, {{data_a_key, "1"}, {"Q_TYPE", "float"}, {"D_TYPE", "float"}, {"ACC_TYPE", acctype}, {"DEQUANTFUNC", "dequantFunc"+to_uppercase(tname) }, {"BLOCK_SIZE", "QUANT_K_"+to_uppercase(tname) }}), true, false, true, f16acc);
442
  }
443
+ #endif
444
+ if (tname == "f16") {
445
+ string_to_spv("flash_attn_f32_f16_" + tname, "flash_attn.comp",
446
+ merge_maps(base_dict, {{"Q_TYPE", "float"}, {"D_TYPE", "float"}, {"ACC_TYPE", acctype}}), true, false, false, f16acc);
447
+ } else if (tname == "q4_0" || tname == "q8_0") {
448
+ std::string data_a_key = "DATA_A_" + to_uppercase(tname);
449
+ string_to_spv("flash_attn_f32_f16_" + tname, "flash_attn.comp",
450
+ merge_maps(base_dict, {{data_a_key, "1"}, {"Q_TYPE", "float"}, {"D_TYPE", "float"}, {"ACC_TYPE", acctype}, {"BLOCK_SIZE", "QUANT_K_"+to_uppercase(tname) }}), true, false, false, f16acc);
451
+ }
452
  }
453
  }
 
454
 
455
  for (const auto& tname : type_names) {
456
  // mul mat vec