ggerganov commited on
Commit
bb6071e
·
unverified ·
1 Parent(s): ec8187f

ci : fix and re-enable tests (2nd try)

Browse files
Files changed (2) hide show
  1. CMakeLists.txt +4 -5
  2. whisper.cpp +16 -6
CMakeLists.txt CHANGED
@@ -170,9 +170,8 @@ if (WHISPER_STANDALONE)
170
  target_link_libraries(${TARGET} PRIVATE whisper ${SDL2_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
171
  endif ()
172
 
173
- # TODO: temporary disabled
174
- #if (WHISPER_BUILD_TESTS)
175
- # enable_testing()
176
- # add_subdirectory(tests)
177
- #endif ()
178
  endif ()
 
170
  target_link_libraries(${TARGET} PRIVATE whisper ${SDL2_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
171
  endif ()
172
 
173
+ if (WHISPER_BUILD_TESTS)
174
+ enable_testing()
175
+ add_subdirectory(tests)
176
+ endif ()
 
177
  endif ()
whisper.cpp CHANGED
@@ -379,8 +379,11 @@ struct whisper_model {
379
  struct ggml_tensor * memory_cross_k;
380
  struct ggml_tensor * memory_cross_v;
381
 
382
- //
383
  struct ggml_context * ctx;
 
 
 
384
  std::map<std::string, struct ggml_tensor *> tensors;
385
  };
386
 
@@ -951,9 +954,10 @@ bool whisper_model_load(const std::string & fname, whisper_context & wctx) {
951
 
952
  // load weights
953
  {
954
- int n_loaded = 0;
955
  size_t total_size = 0;
956
 
 
 
957
  while (true) {
958
  int32_t n_dims;
959
  int32_t length;
@@ -1006,15 +1010,15 @@ bool whisper_model_load(const std::string & fname, whisper_context & wctx) {
1006
 
1007
  //printf("%24s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ftype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0);
1008
  total_size += ggml_nbytes(tensor);
1009
- n_loaded++;
1010
  }
1011
 
1012
  fprintf(stderr, "%s: model size = %8.2f MB\n", __func__, total_size/1024.0/1024.0);
1013
 
1014
- if (n_loaded == 0) {
1015
  fprintf(stderr, "%s: WARN no tensors loaded from model file - assuming empty model for testing\n", __func__);
1016
- } else if (n_loaded != (int) model.tensors.size()) {
1017
- fprintf(stderr, "%s: ERROR not all tensors loaded from model file - expected %zu, got %d\n", __func__, model.tensors.size(), n_loaded);
1018
  return false;
1019
  }
1020
  }
@@ -2477,6 +2481,12 @@ int whisper_full(
2477
  }
2478
  break;
2479
  }
 
 
 
 
 
 
2480
  }
2481
 
2482
  if (done) {
 
379
  struct ggml_tensor * memory_cross_k;
380
  struct ggml_tensor * memory_cross_v;
381
 
382
+ // context
383
  struct ggml_context * ctx;
384
+
385
+ // tensors
386
+ int n_loaded;
387
  std::map<std::string, struct ggml_tensor *> tensors;
388
  };
389
 
 
954
 
955
  // load weights
956
  {
 
957
  size_t total_size = 0;
958
 
959
+ model.n_loaded = 0;
960
+
961
  while (true) {
962
  int32_t n_dims;
963
  int32_t length;
 
1010
 
1011
  //printf("%24s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ftype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0);
1012
  total_size += ggml_nbytes(tensor);
1013
+ model.n_loaded++;
1014
  }
1015
 
1016
  fprintf(stderr, "%s: model size = %8.2f MB\n", __func__, total_size/1024.0/1024.0);
1017
 
1018
+ if (model.n_loaded == 0) {
1019
  fprintf(stderr, "%s: WARN no tensors loaded from model file - assuming empty model for testing\n", __func__);
1020
+ } else if (model.n_loaded != (int) model.tensors.size()) {
1021
+ fprintf(stderr, "%s: ERROR not all tensors loaded from model file - expected %zu, got %d\n", __func__, model.tensors.size(), model.n_loaded);
1022
  return false;
1023
  }
1024
  }
 
2481
  }
2482
  break;
2483
  }
2484
+
2485
+ // TESTS: if no tensors are loaded, it means we are running tests
2486
+ if (ctx->model.n_loaded == 0) {
2487
+ seek_delta = 100*WHISPER_CHUNK_SIZE;
2488
+ break;
2489
+ }
2490
  }
2491
 
2492
  if (done) {