ggerganov commited on
Commit
6f45862
·
unverified ·
1 Parent(s): 8253b98

whisper : fix quantize bug (#842)

Browse files

* whisper : debug

* whisper : fix bug during quantization

Files changed (2) hide show
  1. examples/common-ggml.cpp +6 -6
  2. whisper.cpp +3 -1
examples/common-ggml.cpp CHANGED
@@ -90,7 +90,7 @@ bool ggml_common_quantize_0(
90
  }
91
 
92
  int32_t nelements = 1;
93
- int32_t ne[2] = { 1, 1 };
94
  for (int i = 0; i < n_dims; ++i) {
95
  finp.read (reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
96
  nelements *= ne[i];
@@ -99,7 +99,7 @@ bool ggml_common_quantize_0(
99
  std::string name(length, 0);
100
  finp.read (&name[0], length);
101
 
102
- printf("%64s - [%5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ggml_type_name((ggml_type) ttype));
103
 
104
  bool quantize = false;
105
 
@@ -204,11 +204,11 @@ bool ggml_common_quantize_0(
204
  total_size_new += cur_size;
205
 
206
  printf("size = %8.2f MB -> %8.2f MB | hist: ", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0);
207
- for (int i = 0; i < hist_cur.size(); ++i) {
208
  hist_all[i] += hist_cur[i];
209
  }
210
 
211
- for (int i = 0; i < hist_cur.size(); ++i) {
212
  printf("%5.3f ", hist_cur[i] / (float)nelements);
213
  }
214
  printf("\n");
@@ -226,12 +226,12 @@ bool ggml_common_quantize_0(
226
 
227
  {
228
  int64_t sum_all = 0;
229
- for (int i = 0; i < hist_all.size(); ++i) {
230
  sum_all += hist_all[i];
231
  }
232
 
233
  printf("%s: hist: ", __func__);
234
- for (int i = 0; i < hist_all.size(); ++i) {
235
  printf("%5.3f ", hist_all[i] / (float)sum_all);
236
  }
237
  printf("\n");
 
90
  }
91
 
92
  int32_t nelements = 1;
93
+ int32_t ne[4] = { 1, 1, 1, 1 };
94
  for (int i = 0; i < n_dims; ++i) {
95
  finp.read (reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
96
  nelements *= ne[i];
 
99
  std::string name(length, 0);
100
  finp.read (&name[0], length);
101
 
102
+ printf("%64s - [%5d, %5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ne[2], ggml_type_name((ggml_type) ttype));
103
 
104
  bool quantize = false;
105
 
 
204
  total_size_new += cur_size;
205
 
206
  printf("size = %8.2f MB -> %8.2f MB | hist: ", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0);
207
+ for (int i = 0; i < (int) hist_cur.size(); ++i) {
208
  hist_all[i] += hist_cur[i];
209
  }
210
 
211
+ for (int i = 0; i < (int) hist_cur.size(); ++i) {
212
  printf("%5.3f ", hist_cur[i] / (float)nelements);
213
  }
214
  printf("\n");
 
226
 
227
  {
228
  int64_t sum_all = 0;
229
+ for (int i = 0; i < (int) hist_all.size(); ++i) {
230
  sum_all += hist_all[i];
231
  }
232
 
233
  printf("%s: hist: ", __func__);
234
+ for (int i = 0; i < (int) hist_all.size(); ++i) {
235
  printf("%5.3f ", hist_all[i] / (float)sum_all);
236
  }
237
  printf("\n");
whisper.cpp CHANGED
@@ -1333,7 +1333,7 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con
1333
  }
1334
 
1335
  int32_t nelements = 1;
1336
- int32_t ne[3] = { 1, 1, 1 };
1337
  for (int i = 0; i < n_dims; ++i) {
1338
  read_safe(loader, ne[i]);
1339
  nelements *= ne[i];
@@ -1352,6 +1352,8 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con
1352
  auto tensor = model.tensors[name.data()];
1353
  if (ggml_nelements(tensor) != nelements) {
1354
  fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
 
 
1355
  return false;
1356
  }
1357
 
 
1333
  }
1334
 
1335
  int32_t nelements = 1;
1336
+ int32_t ne[4] = { 1, 1, 1, 1 };
1337
  for (int i = 0; i < n_dims; ++i) {
1338
  read_safe(loader, ne[i]);
1339
  nelements *= ne[i];
 
1352
  auto tensor = model.tensors[name.data()];
1353
  if (ggml_nelements(tensor) != nelements) {
1354
  fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
1355
+ fprintf(stderr, "%s: shape: [%d, %d, %d], expected: [%d, %d, %d]\n",
1356
+ __func__, ne[0], ne[1], ne[2], (int) tensor->ne[0], (int) tensor->ne[1], (int) tensor->ne[2]);
1357
  return false;
1358
  }
1359