jeffbolznv commited on
Commit
3bb9e77
·
1 Parent(s): ee122d3

vulkan: support copy from f32 to q4_0/q4_1/q5_0/q5_1/q8_0/iq4_nl (llama/11166)

Browse files

* vulkan: support copy from f32 to q4_0/q4_1/q5_0/q5_1/q8_0/iq4_nl

Shaders are based on cpy.cu.

* vulkan: support copy from q4_0/q4_1/q5_0/q5_1/q8_0/iq4_nl to f32

* ggml: copy q->f32 assumes some contiguity in the destination

ggml/src/ggml-cpu/ggml-cpu.c CHANGED
@@ -3967,6 +3967,57 @@ static void ggml_compute_forward_dup_bytes(
3967
  }
3968
  }
3969
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3970
  static void ggml_compute_forward_dup(
3971
  const struct ggml_compute_params * params,
3972
  struct ggml_tensor * dst) {
@@ -3993,6 +4044,10 @@ static void ggml_compute_forward_dup(
3993
  } break;
3994
  default:
3995
  {
 
 
 
 
3996
  GGML_ABORT("fatal error");
3997
  }
3998
  }
 
3967
  }
3968
  }
3969
 
3970
+ static void ggml_compute_forward_dup_q(
3971
+ const struct ggml_compute_params * params,
3972
+ struct ggml_tensor * dst) {
3973
+
3974
+ const struct ggml_tensor * src0 = dst->src[0];
3975
+ const struct ggml_tensor * src1 = dst->src[1];
3976
+
3977
+ GGML_TENSOR_BINARY_OP_LOCALS
3978
+
3979
+ const enum ggml_type type = src0->type;
3980
+ ggml_to_float_t const dequantize_row_q = ggml_get_type_traits(type)->to_float;
3981
+
3982
+ size_t qk = ggml_blck_size(type);
3983
+ const int64_t nr = ggml_nelements(src1) / qk;
3984
+
3985
+ // destination must be contiguous in the first dimension
3986
+ GGML_ASSERT(nb10 == ggml_type_size(dst->type));
3987
+ // must either have first dimension large enough to hold a row, or fully contiguous
3988
+ GGML_ASSERT((ne10 % qk) == 0 || ggml_is_contiguous(dst));
3989
+
3990
+ const int ith = params->ith;
3991
+ const int nth = params->nth;
3992
+
3993
+ const int dr = (nr + nth - 1)/nth;
3994
+
3995
+ // row range for this thread
3996
+ const int ir0 = dr*ith;
3997
+ const int ir1 = MIN(ir0 + dr, nr);
3998
+
3999
+ for (int64_t ir = ir0; ir < ir1; ++ir) {
4000
+
4001
+ uint32_t i = ir * qk;
4002
+
4003
+ const int64_t i03 = i/(ne00 * ne01 * ne02);
4004
+ const int64_t i02 = (i - i03*ne00*ne01*ne02 )/ (ne00*ne01);
4005
+ const int64_t i01 = (i - i03*ne00*ne01*ne02 - i02*ne01*ne00) / ne00;
4006
+ const int64_t i00 = i - i03*ne00*ne01*ne02 - i02*ne01*ne00 - i01*ne00;
4007
+ const int64_t x_offset = (i00/qk)*nb00 + i01*nb01 + i02*nb02 + i03 * nb03;
4008
+
4009
+ const int64_t i13 = i/(ne10 * ne11 * ne12);
4010
+ const int64_t i12 = (i - i13*ne10*ne11*ne12) / (ne10*ne11);
4011
+ const int64_t i11 = (i - i13*ne10*ne11*ne12 - i12*ne10*ne11) / ne10;
4012
+ const int64_t i10 = i - i13*ne10*ne11*ne12 - i12*ne10*ne11 - i11*ne10;
4013
+ const int64_t dst_offset = i10*nb10 + i11*nb11 + i12*nb12 + i13*nb13;
4014
+
4015
+ dequantize_row_q(
4016
+ (const void *) ((char *) src0->data + x_offset),
4017
+ (float *) ((char *) dst->data + dst_offset), qk);
4018
+ }
4019
+ }
4020
+
4021
  static void ggml_compute_forward_dup(
4022
  const struct ggml_compute_params * params,
4023
  struct ggml_tensor * dst) {
 
4044
  } break;
4045
  default:
4046
  {
4047
+ if (ggml_is_quantized(src0->type) && dst->type == GGML_TYPE_F32) {
4048
+ ggml_compute_forward_dup_q(params, dst);
4049
+ break;
4050
+ }
4051
  GGML_ABORT("fatal error");
4052
  }
4053
  }
ggml/src/ggml-vulkan/ggml-vulkan.cpp CHANGED
@@ -228,6 +228,8 @@ struct vk_device_struct {
228
  vk_pipeline pipeline_repeat_f32;
229
  vk_pipeline pipeline_cpy_f32_f32, pipeline_cpy_f32_f16, pipeline_cpy_f16_f16;
230
  vk_pipeline pipeline_contig_cpy_f32_f32, pipeline_contig_cpy_f32_f16, pipeline_contig_cpy_f16_f16;
 
 
231
  vk_pipeline pipeline_norm_f32;
232
  vk_pipeline pipeline_group_norm_f32;
233
  vk_pipeline pipeline_rms_norm_f32;
@@ -1965,6 +1967,20 @@ static void ggml_vk_load_shaders(vk_device& device) {
1965
  ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_f32_f16, "contig_cpy_f32_f16", contig_cpy_f32_f16_len, contig_cpy_f32_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
1966
  ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_f16_f16, "contig_cpy_f16_f16", contig_cpy_f16_f16_len, contig_cpy_f16_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
1967
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1968
  ggml_vk_create_pipeline(device, device->pipeline_add_f32, "add_f32", add_f32_len, add_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {0}, 1);
1969
  ggml_vk_create_pipeline(device, device->pipeline_add_f32_norepeat, "add_f32_norepeat", add_f32_len, add_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {1}, 1);
1970
  ggml_vk_create_pipeline(device, device->pipeline_add_f16_f32_f16, "add_f16_f32_f16", add_f16_f32_f16_len, add_f16_f32_f16_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {0}, 1);
@@ -3689,6 +3705,33 @@ static vk_pipeline ggml_vk_get_cpy_pipeline(ggml_backend_vk_context * ctx, const
3689
  return ctx->device->pipeline_cpy_f16_f16;
3690
  }
3691
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3692
 
3693
  std::cerr << "Missing CPY op for types: " << ggml_type_name(src->type) << " " << ggml_type_name(to) << std::endl;
3694
  GGML_ABORT("fatal error");
@@ -5160,7 +5203,7 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co
5160
  }
5161
  std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
5162
  std::cerr << "), " << ggml_op_name(op) << ", " << (dryrun ? "dryrun" : "") << ")");
5163
- GGML_ASSERT(op == GGML_OP_GET_ROWS || (!ggml_is_quantized(src0->type) && (src1 == nullptr || !ggml_is_quantized(src1->type)))); // NOLINT
5164
  GGML_ASSERT(ggml_vk_op_supports_incontiguous(op) || ggml_vk_dim01_contiguous(src0)); // NOLINT
5165
  GGML_ASSERT(dst->buffer != nullptr);
5166
  const uint64_t ne00 = src0->ne[0];
@@ -7905,12 +7948,36 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm
7905
  {
7906
  ggml_type src0_type = op->src[0]->type;
7907
  ggml_type src1_type = op->src[1] != nullptr ? op->src[1]->type : src0_type;
7908
- if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) {
7909
- return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
7910
  }
7911
- if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F16) {
7912
- return true;
 
 
 
 
 
 
 
 
 
 
7913
  }
 
7914
  if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
7915
  return true;
7916
  }
 
228
  vk_pipeline pipeline_repeat_f32;
229
  vk_pipeline pipeline_cpy_f32_f32, pipeline_cpy_f32_f16, pipeline_cpy_f16_f16;
230
  vk_pipeline pipeline_contig_cpy_f32_f32, pipeline_contig_cpy_f32_f16, pipeline_contig_cpy_f16_f16;
231
+ vk_pipeline pipeline_cpy_f32_quant[GGML_TYPE_COUNT];
232
+ vk_pipeline pipeline_cpy_quant_f32[GGML_TYPE_COUNT];
233
  vk_pipeline pipeline_norm_f32;
234
  vk_pipeline pipeline_group_norm_f32;
235
  vk_pipeline pipeline_rms_norm_f32;
 
1967
  ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_f32_f16, "contig_cpy_f32_f16", contig_cpy_f32_f16_len, contig_cpy_f32_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
1968
  ggml_vk_create_pipeline(device, device->pipeline_contig_cpy_f16_f16, "contig_cpy_f16_f16", contig_cpy_f16_f16_len, contig_cpy_f16_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1);
1969
 
1970
+ ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q4_0], "cpy_f32_q4_0", cpy_f32_q4_0_len, cpy_f32_q4_0_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q4_0), 1, 1}, {}, 1);
1971
+ ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q4_1], "cpy_f32_q4_1", cpy_f32_q4_1_len, cpy_f32_q4_1_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q4_1), 1, 1}, {}, 1);
1972
+ ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q5_0], "cpy_f32_q5_0", cpy_f32_q5_0_len, cpy_f32_q5_0_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q5_0), 1, 1}, {}, 1);
1973
+ ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q5_1], "cpy_f32_q5_1", cpy_f32_q5_1_len, cpy_f32_q5_1_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q5_1), 1, 1}, {}, 1);
1974
+ ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_Q8_0], "cpy_f32_q8_0", cpy_f32_q8_0_len, cpy_f32_q8_0_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q8_0), 1, 1}, {}, 1);
1975
+ ggml_vk_create_pipeline(device, device->pipeline_cpy_f32_quant[GGML_TYPE_IQ4_NL], "cpy_f32_iq4_nl", cpy_f32_iq4_nl_len, cpy_f32_iq4_nl_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_IQ4_NL), 1, 1}, {}, 1);
1976
+
1977
+ ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_Q4_0], "cpy_q4_0_f32", cpy_q4_0_f32_len, cpy_q4_0_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q4_0), 1, 1}, {}, 1);
1978
+ ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_Q4_1], "cpy_q4_1_f32", cpy_q4_1_f32_len, cpy_q4_1_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q4_1), 1, 1}, {}, 1);
1979
+ ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_Q5_0], "cpy_q5_0_f32", cpy_q5_0_f32_len, cpy_q5_0_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q5_0), 1, 1}, {}, 1);
1980
+ ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_Q5_1], "cpy_q5_1_f32", cpy_q5_1_f32_len, cpy_q5_1_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q5_1), 1, 1}, {}, 1);
1981
+ ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_Q8_0], "cpy_q8_0_f32", cpy_q8_0_f32_len, cpy_q8_0_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_Q8_0), 1, 1}, {}, 1);
1982
+ ggml_vk_create_pipeline(device, device->pipeline_cpy_quant_f32[GGML_TYPE_IQ4_NL], "cpy_iq4_nl_f32", cpy_iq4_nl_f32_len, cpy_iq4_nl_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {(uint32_t)ggml_blck_size(GGML_TYPE_IQ4_NL), 1, 1}, {}, 1);
1983
+
1984
  ggml_vk_create_pipeline(device, device->pipeline_add_f32, "add_f32", add_f32_len, add_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {0}, 1);
1985
  ggml_vk_create_pipeline(device, device->pipeline_add_f32_norepeat, "add_f32_norepeat", add_f32_len, add_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {1}, 1);
1986
  ggml_vk_create_pipeline(device, device->pipeline_add_f16_f32_f16, "add_f16_f32_f16", add_f16_f32_f16_len, add_f16_f32_f16_data, "main", 3, sizeof(vk_op_binary_push_constants), {512, 1, 1}, {0}, 1);
 
3705
  return ctx->device->pipeline_cpy_f16_f16;
3706
  }
3707
  }
3708
+ if (src->type == GGML_TYPE_F32) {
3709
+ switch (to) {
3710
+ case GGML_TYPE_Q4_0:
3711
+ case GGML_TYPE_Q4_1:
3712
+ case GGML_TYPE_Q5_0:
3713
+ case GGML_TYPE_Q5_1:
3714
+ case GGML_TYPE_Q8_0:
3715
+ case GGML_TYPE_IQ4_NL:
3716
+ return ctx->device->pipeline_cpy_f32_quant[to];
3717
+ default:
3718
+ break;
3719
+ }
3720
+ }
3721
+
3722
+ if (to == GGML_TYPE_F32) {
3723
+ switch (src->type) {
3724
+ case GGML_TYPE_Q4_0:
3725
+ case GGML_TYPE_Q4_1:
3726
+ case GGML_TYPE_Q5_0:
3727
+ case GGML_TYPE_Q5_1:
3728
+ case GGML_TYPE_Q8_0:
3729
+ case GGML_TYPE_IQ4_NL:
3730
+ return ctx->device->pipeline_cpy_quant_f32[src->type];
3731
+ default:
3732
+ break;
3733
+ }
3734
+ }
3735
 
3736
  std::cerr << "Missing CPY op for types: " << ggml_type_name(src->type) << " " << ggml_type_name(to) << std::endl;
3737
  GGML_ABORT("fatal error");
 
5203
  }
5204
  std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3];
5205
  std::cerr << "), " << ggml_op_name(op) << ", " << (dryrun ? "dryrun" : "") << ")");
5206
+ GGML_ASSERT(op == GGML_OP_GET_ROWS || op == GGML_OP_CPY || (!ggml_is_quantized(src0->type) && (src1 == nullptr || !ggml_is_quantized(src1->type)))); // NOLINT
5207
  GGML_ASSERT(ggml_vk_op_supports_incontiguous(op) || ggml_vk_dim01_contiguous(src0)); // NOLINT
5208
  GGML_ASSERT(dst->buffer != nullptr);
5209
  const uint64_t ne00 = src0->ne[0];
 
7948
  {
7949
  ggml_type src0_type = op->src[0]->type;
7950
  ggml_type src1_type = op->src[1] != nullptr ? op->src[1]->type : src0_type;
7951
+
7952
+ if (src0_type == GGML_TYPE_F32) {
7953
+ switch (src1_type) {
7954
+ case GGML_TYPE_F32:
7955
+ case GGML_TYPE_F16:
7956
+ case GGML_TYPE_Q4_0:
7957
+ case GGML_TYPE_Q4_1:
7958
+ case GGML_TYPE_Q5_0:
7959
+ case GGML_TYPE_Q5_1:
7960
+ case GGML_TYPE_Q8_0:
7961
+ case GGML_TYPE_IQ4_NL:
7962
+ return true;
7963
+ default:
7964
+ break;
7965
+ }
7966
  }
7967
+ if (src1_type == GGML_TYPE_F32) {
7968
+ switch (src0_type) {
7969
+ case GGML_TYPE_Q4_0:
7970
+ case GGML_TYPE_Q4_1:
7971
+ case GGML_TYPE_Q5_0:
7972
+ case GGML_TYPE_Q5_1:
7973
+ case GGML_TYPE_Q8_0:
7974
+ case GGML_TYPE_IQ4_NL:
7975
+ return true;
7976
+ default:
7977
+ break;
7978
+ }
7979
  }
7980
+
7981
  if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
7982
  return true;
7983
  }
ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #version 450
2
+
3
+ #include "types.comp"
4
+ #include "generic_unary_head.comp"
5
+ #include "dequant_funcs.comp"
6
+
7
+ #if defined(DATA_A_IQ4_NL)
8
+ // 16 invocations needed for init_iq4nl_shmem
9
+ layout(local_size_x = 16, local_size_y = 1, local_size_z = 1) in;
10
+ #else
11
+ layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
12
+ #endif
13
+
14
+ void main() {
15
+ #if defined(DATA_A_IQ4_NL)
16
+ init_iq4nl_shmem();
17
+ if (gl_LocalInvocationIndex.x != 0) {
18
+ return;
19
+ }
20
+ #endif
21
+
22
+ const uint idx = gl_WorkGroupID.z * 262144 + gl_WorkGroupID.y * 512 + gl_WorkGroupID.x * QUANT_K;
23
+
24
+ if (idx >= p.ne) {
25
+ return;
26
+ }
27
+
28
+ uint dst_idx = get_doffset() + dst_idx(idx);
29
+ uint src_idx = src0_idx_quant(idx, QUANT_K);
30
+
31
+ const uint a_offset = 0;
32
+ const uint ib = src_idx;
33
+ const vec2 dm = get_dm(ib, a_offset);
34
+
35
+ [[unroll]] for (int j = 0; j < QUANT_K; j += 4) {
36
+ vec4 v = dequantize4(ib, j / QUANT_R, a_offset);
37
+ v = v * dm.x + vec4(dm.y);
38
+
39
+ #if QUANT_R == 2
40
+ data_d[dst_idx + j/2 + 0] = v[0];
41
+ data_d[dst_idx + j/2 + QUANT_K/2 + 0] = v[1];
42
+ data_d[dst_idx + j/2 + 1] = v[2];
43
+ data_d[dst_idx + j/2 + QUANT_K/2 + 1] = v[3];
44
+ #else
45
+ data_d[dst_idx + j + 0] = v[0];
46
+ data_d[dst_idx + j + 1] = v[1];
47
+ data_d[dst_idx + j + 2] = v[2];
48
+ data_d[dst_idx + j + 3] = v[3];
49
+ #endif
50
+ }
51
+ }
ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #version 450
2
+
3
+ #include "types.comp"
4
+ #include "generic_unary_head.comp"
5
+
6
+ #if defined(DATA_A_IQ4_NL)
7
+ // 16 invocations needed for init_iq4nl_shmem
8
+ layout(local_size_x = 16, local_size_y = 1, local_size_z = 1) in;
9
+ #else
10
+ layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
11
+ #endif
12
+
13
+ layout (binding = 0) readonly buffer S {float data_s[];};
14
+ layout (binding = 1) writeonly buffer Q {A_TYPE data_q[];};
15
+
16
+ #if defined(DATA_A_Q4_0)
17
+ void quantize(uint dst_idx, uint src_idx)
18
+ {
19
+ float amax = 0.0;
20
+ float vmax = 0.0;
21
+
22
+ [[unroll]] for (int j = 0; j < QUANT_K_Q4_0; ++j) {
23
+ const float v = data_s[src_idx + j];
24
+ if (amax < abs(v)) {
25
+ amax = abs(v);
26
+ vmax = v;
27
+ }
28
+ }
29
+
30
+ const float d = vmax / -8;
31
+ const float id = (d != 0.0) ? 1.0/d : 0.0;
32
+
33
+ data_q[dst_idx].d = float16_t(d);
34
+
35
+ [[unroll]] for (int j = 0; j < QUANT_K_Q4_0/2; ++j) {
36
+ const float x0 = data_s[src_idx + 0 + j]*id;
37
+ const float x1 = data_s[src_idx + QUANT_K_Q4_0/2 + j]*id;
38
+
39
+ const uint xi0 = min(15, int(x0 + 8.5));
40
+ const uint xi1 = min(15, int(x1 + 8.5));
41
+
42
+ data_q[dst_idx].qs[j] = uint8_t(xi0 | (xi1 << 4));
43
+ }
44
+ }
45
+ #endif
46
+
47
+ #if defined(DATA_A_Q4_1)
48
+ void quantize(uint dst_idx, uint src_idx)
49
+ {
50
+ float vmin = 1.0/0.0;
51
+ float vmax = -vmin;
52
+
53
+ [[unroll]] for (int j = 0; j < QUANT_K_Q4_1; ++j) {
54
+ const float v = data_s[src_idx + j];
55
+
56
+ if (v < vmin) vmin = v;
57
+ if (v > vmax) vmax = v;
58
+ }
59
+
60
+ const float d = (vmax - vmin) / ((1 << 4) - 1);
61
+ const float id = (d != 0.0) ? 1.0/d : 0.0;
62
+
63
+ data_q[dst_idx].d = float16_t(d);
64
+ data_q[dst_idx].m = float16_t(vmin);
65
+
66
+ [[unroll]] for (int j = 0; j < QUANT_K_Q4_1/2; ++j) {
67
+ const float x0 = (data_s[src_idx + 0 + j] - vmin)*id;
68
+ const float x1 = (data_s[src_idx + QUANT_K_Q4_1/2 + j] - vmin)*id;
69
+
70
+ const uint xi0 = min(15, int(x0 + 0.5));
71
+ const uint xi1 = min(15, int(x1 + 0.5));
72
+
73
+ data_q[dst_idx].qs[j] = uint8_t(xi0 | (xi1 << 4));
74
+ }
75
+ }
76
+ #endif
77
+
78
+ #if defined(DATA_A_Q5_0)
79
+ void quantize(uint dst_idx, uint src_idx)
80
+ {
81
+ float amax = 0.0;
82
+ float vmax = 0.0;
83
+
84
+ [[unroll]] for (int j = 0; j < QUANT_K_Q5_0; ++j) {
85
+ const float v = data_s[src_idx + j];
86
+ if (amax < abs(v)) {
87
+ amax = abs(v);
88
+ vmax = v;
89
+ }
90
+ }
91
+
92
+ const float d = vmax / -16;
93
+ const float id = (d != 0.0) ? 1.0/d : 0.0;
94
+
95
+ data_q[dst_idx].d = float16_t(d);
96
+
97
+ uint32_t qh = 0;
98
+ [[unroll]] for (int j = 0; j < QUANT_K_Q5_0/2; ++j) {
99
+ const float x0 = data_s[src_idx + 0 + j]*id;
100
+ const float x1 = data_s[src_idx + QUANT_K_Q5_0/2 + j]*id;
101
+
102
+ const uint xi0 = min(31, int(x0 + 16.5));
103
+ const uint xi1 = min(31, int(x1 + 16.5));
104
+
105
+ data_q[dst_idx].qs[j] = uint8_t((xi0 & 0xf) | ((xi1 & 0xf) << 4));
106
+ qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
107
+ qh |= ((xi1 & 0x10u) >> 4) << (j + QUANT_K_Q5_0/2);
108
+ }
109
+ data_q[dst_idx].qh[0] = uint16_t(qh & 0xFFFF);
110
+ data_q[dst_idx].qh[1] = uint16_t(qh >> 16);
111
+ }
112
+ #endif
113
+
114
+ #if defined(DATA_A_Q5_1)
115
+ void quantize(uint dst_idx, uint src_idx)
116
+ {
117
+ float min = data_s[src_idx + 0];
118
+ float max = min;
119
+
120
+ [[unroll]] for (int j = 1; j < QUANT_K_Q5_1; ++j) {
121
+ const float v = data_s[src_idx + j];
122
+ min = v < min ? v : min;
123
+ max = v > max ? v : max;
124
+ }
125
+
126
+ const float d = (max - min) / 31;
127
+ const float id = (d != 0) ? 1.0/d : 0.0;
128
+
129
+ data_q[dst_idx].d = float16_t(d);
130
+ data_q[dst_idx].m = float16_t(min);
131
+
132
+ uint32_t qh = 0;
133
+ [[unroll]] for (int j = 0; j < QUANT_K_Q5_1/2; ++j) {
134
+ const float x0 = (data_s[src_idx + 0 + j] - min)*id;
135
+ const float x1 = (data_s[src_idx + QUANT_K_Q5_1/2 + j] - min)*id;
136
+
137
+ const uint xi0 = uint(x0 + 0.5);
138
+ const uint xi1 = uint(x1 + 0.5);
139
+
140
+ data_q[dst_idx].qs[j] = uint8_t((xi0 & 0xf) | ((xi1 & 0xf) << 4));
141
+ qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
142
+ qh |= ((xi1 & 0x10u) >> 4) << (j + QUANT_K_Q5_1/2);
143
+ }
144
+ data_q[dst_idx].qh = qh;
145
+ }
146
+ #endif
147
+
148
+ #if defined(DATA_A_Q8_0)
149
+ void quantize(uint dst_idx, uint src_idx)
150
+ {
151
+ float amax = 0.0; // absolute max
152
+
153
+ [[unroll]] for (int j = 0; j < QUANT_K_Q8_0; j++) {
154
+ const float v = data_s[src_idx + j];
155
+ amax = max(amax, abs(v));
156
+ }
157
+
158
+ const float d = amax / ((1 << 7) - 1);
159
+ const float id = (d != 0.0) ? 1.0/d : 0.0;
160
+
161
+ data_q[dst_idx].d = float16_t(d);
162
+
163
+ [[unroll]] for (int j = 0; j < QUANT_K_Q8_0; ++j) {
164
+ const float x0 = data_s[src_idx + j]*id;
165
+
166
+ data_q[dst_idx].qs[j] = int8_t(round(x0));
167
+ }
168
+ }
169
+ #endif
170
+
171
+ #if defined(DATA_A_IQ4_NL)
172
+ uint best_index(float x) {
173
+ if (x <= kvalues_iq4nl[0]) return 0;
174
+ if (x >= kvalues_iq4nl[15]) return 15;
175
+ int ml = 0, mu = 15;
176
+ while (mu-ml > 1) {
177
+ int mav = (ml+mu)/2;
178
+ if (x < kvalues_iq4nl[mav]) mu = mav; else ml = mav;
179
+ }
180
+ return x - kvalues_iq4nl[mu-1] < kvalues_iq4nl[mu] - x ? mu-1 : mu;
181
+ }
182
+
183
+ void quantize(uint dst_idx, uint src_idx)
184
+ {
185
+ float amax = 0.0;
186
+ float vmax = 0.0;
187
+
188
+ [[unroll]] for (int j = 0; j < QUANT_K_IQ4_NL; ++j) {
189
+ const float v = data_s[src_idx + j];
190
+ if (amax < abs(v)) {
191
+ amax = abs(v);
192
+ vmax = v;
193
+ }
194
+ }
195
+
196
+ float d = vmax / kvalues_iq4nl[0];
197
+ const float id = (d != 0.0) ? 1.0/d : 0.0;
198
+
199
+ float sumqx = 0, sumq2 = 0;
200
+ [[unroll]] for (int j = 0; j < QUANT_K_IQ4_NL/2; ++j) {
201
+ const float x0 = data_s[src_idx + 0 + j]*id;
202
+ const float x1 = data_s[src_idx + QUANT_K_IQ4_NL/2 + j]*id;
203
+ const uint xi0 = best_index(x0);
204
+ const uint xi1 = best_index(x1);
205
+ data_q[dst_idx].qs[j] = uint8_t(xi0 | (xi1 << 4));
206
+ const float v0 = kvalues_iq4nl[xi0];
207
+ const float v1 = kvalues_iq4nl[xi1];
208
+ const float w0 = data_s[src_idx + 0 + j]*data_s[src_idx + 0 + j];
209
+ const float w1 = data_s[src_idx + QUANT_K_IQ4_NL/2 + j]*data_s[src_idx + QUANT_K_IQ4_NL/2 + j];
210
+ sumqx += w0*v0*data_s[src_idx + j] + w1*v1*data_s[src_idx + QUANT_K_IQ4_NL/2 + j];
211
+ sumq2 += w0*v0*v0 + w1*v1*v1;
212
+ }
213
+
214
+ data_q[dst_idx].d = float16_t(sumq2 > 0 ? sumqx/sumq2 : d);
215
+
216
+ }
217
+ #endif
218
+
219
+ void main() {
220
+ #if defined(DATA_A_IQ4_NL)
221
+ init_iq4nl_shmem();
222
+ if (gl_LocalInvocationIndex.x != 0) {
223
+ return;
224
+ }
225
+ #endif
226
+
227
+ const uint idx = gl_WorkGroupID.z * 262144 + gl_WorkGroupID.y * 512 + gl_WorkGroupID.x * QUANT_K;
228
+
229
+ if (idx >= p.ne) {
230
+ return;
231
+ }
232
+
233
+ uint dst_idx = dst_idx_quant(idx, QUANT_K);
234
+ uint src_idx = get_aoffset() + src0_idx(idx);
235
+
236
+ quantize(dst_idx, src_idx);
237
+ }
ggml/src/ggml-vulkan/vulkan-shaders/generic_unary_head.comp CHANGED
@@ -54,3 +54,23 @@ uint dst_idx(uint idx) {
54
  const uint i10 = idx - i13_offset - i12_offset - i11*p.ne10;
55
  return i13*p.nb13 + i12*p.nb12 + i11*p.nb11 + i10*p.nb10;
56
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  const uint i10 = idx - i13_offset - i12_offset - i11*p.ne10;
55
  return i13*p.nb13 + i12*p.nb12 + i11*p.nb11 + i10*p.nb10;
56
  }
57
+
58
+ uint src0_idx_quant(uint idx, uint qk) {
59
+ const uint i03 = fastdiv(idx, p.ne0_012mp, p.ne0_012L);
60
+ const uint i03_offset = i03 * p.ne02*p.ne01*p.ne00;
61
+ const uint i02 = fastdiv(idx - i03_offset, p.ne0_01mp, p.ne0_01L);
62
+ const uint i02_offset = i02*p.ne01*p.ne00;
63
+ const uint i01 = fastdiv(idx - i03_offset - i02_offset, p.ne0_0mp, p.ne0_0L);
64
+ const uint i00 = idx - i03_offset - i02_offset - i01*p.ne00;
65
+ return i03*p.nb03 + i02*p.nb02 + i01*p.nb01 + (i00/qk)*p.nb00;
66
+ }
67
+
68
+ uint dst_idx_quant(uint idx, uint qk) {
69
+ const uint i13 = fastdiv(idx, p.ne1_012mp, p.ne1_012L);
70
+ const uint i13_offset = i13 * p.ne12*p.ne11*p.ne10;
71
+ const uint i12 = fastdiv(idx - i13_offset, p.ne1_01mp, p.ne1_01L);
72
+ const uint i12_offset = i12*p.ne11*p.ne10;
73
+ const uint i11 = fastdiv(idx - i13_offset - i12_offset, p.ne1_0mp, p.ne1_0L);
74
+ const uint i10 = idx - i13_offset - i12_offset - i11*p.ne10;
75
+ return i13*p.nb13 + i12*p.nb12 + i11*p.nb11 + (i10/qk)*p.nb10;
76
+ }
ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp CHANGED
@@ -417,6 +417,11 @@ void process_shaders() {
417
  string_to_spv("contig_cpy_f32_f16", "contig_copy.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float16_t"}});
418
  string_to_spv("contig_cpy_f16_f16", "contig_copy.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}, {"OPTIMIZATION_ERROR_WORKAROUND", "1"}});
419
 
 
 
 
 
 
420
  string_to_spv("add_f32", "add.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
421
  string_to_spv("add_f16_f32_f16", "add.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float16_t"}, {"FLOAT_TYPE", "float"}});
422
 
 
417
  string_to_spv("contig_cpy_f32_f16", "contig_copy.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float16_t"}});
418
  string_to_spv("contig_cpy_f16_f16", "contig_copy.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}, {"OPTIMIZATION_ERROR_WORKAROUND", "1"}});
419
 
420
+ for (std::string t : {"q4_0", "q4_1", "q5_0", "q5_1", "q8_0", "iq4_nl"}) {
421
+ string_to_spv("cpy_f32_" + t, "copy_to_quant.comp", {{"DATA_A_" + to_uppercase(t), "1"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
422
+ string_to_spv("cpy_" + t + "_f32", "copy_from_quant.comp", {{"DATA_A_" + to_uppercase(t), "1"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
423
+ }
424
+
425
  string_to_spv("add_f32", "add.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
426
  string_to_spv("add_f16_f32_f16", "add.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float16_t"}, {"FLOAT_TYPE", "float"}});
427