Spaces:
Running
Running
st-gr
Stefan Grundmann
commited on
openvino : fix convert-whisper-to-openvino.py (#1890)
Browse filesFix issue: Conversion from Whisper to OpenVino failed #1870
convert-whisper-to-openvino.py stopped working with OpenVINO version 2023.0.0-10926-b4452d56304-releases/2023/0 .
Error was: TypeError: load(): incompatible function arguments. The following argument types are supported:
1. (self: openvino._pyopenvino.FrontEnd, path: object) -> ov::frontend::InputModel
Tested successfully with a large-v3 conversion.
Co-authored-by: Stefan Grundmann <[email protected]>
models/convert-whisper-to-openvino.py
CHANGED
|
@@ -3,6 +3,7 @@ import torch
|
|
| 3 |
from whisper import load_model
|
| 4 |
import os
|
| 5 |
from openvino.tools import mo
|
|
|
|
| 6 |
from openvino.runtime import serialize
|
| 7 |
import shutil
|
| 8 |
|
|
@@ -11,7 +12,7 @@ def convert_encoder(hparams, encoder, mname):
|
|
| 11 |
|
| 12 |
mel = torch.zeros((1, hparams.n_mels, 3000))
|
| 13 |
|
| 14 |
-
onnx_folder=os.path.join(os.path.dirname(__file__),"onnx_encoder")
|
| 15 |
|
| 16 |
#create a directory to store the onnx model, and other collateral that is saved during onnx export procedure
|
| 17 |
if not os.path.isdir(onnx_folder):
|
|
@@ -19,6 +20,7 @@ def convert_encoder(hparams, encoder, mname):
|
|
| 19 |
|
| 20 |
onnx_path = os.path.join(onnx_folder, "whisper_encoder.onnx")
|
| 21 |
|
|
|
|
| 22 |
torch.onnx.export(
|
| 23 |
encoder,
|
| 24 |
mel,
|
|
@@ -27,11 +29,16 @@ def convert_encoder(hparams, encoder, mname):
|
|
| 27 |
output_names=["output_features"]
|
| 28 |
)
|
| 29 |
|
| 30 |
-
#
|
| 31 |
-
|
| 32 |
-
|
|
|
|
|
|
|
| 33 |
|
| 34 |
-
#
|
|
|
|
|
|
|
|
|
|
| 35 |
if os.path.isdir(onnx_folder):
|
| 36 |
shutil.rmtree(onnx_folder)
|
| 37 |
|
|
|
|
| 3 |
from whisper import load_model
|
| 4 |
import os
|
| 5 |
from openvino.tools import mo
|
| 6 |
+
from openvino.frontend import FrontEndManager
|
| 7 |
from openvino.runtime import serialize
|
| 8 |
import shutil
|
| 9 |
|
|
|
|
| 12 |
|
| 13 |
mel = torch.zeros((1, hparams.n_mels, 3000))
|
| 14 |
|
| 15 |
+
onnx_folder = os.path.join(os.path.dirname(__file__), "onnx_encoder")
|
| 16 |
|
| 17 |
#create a directory to store the onnx model, and other collateral that is saved during onnx export procedure
|
| 18 |
if not os.path.isdir(onnx_folder):
|
|
|
|
| 20 |
|
| 21 |
onnx_path = os.path.join(onnx_folder, "whisper_encoder.onnx")
|
| 22 |
|
| 23 |
+
# Export the PyTorch model to ONNX
|
| 24 |
torch.onnx.export(
|
| 25 |
encoder,
|
| 26 |
mel,
|
|
|
|
| 29 |
output_names=["output_features"]
|
| 30 |
)
|
| 31 |
|
| 32 |
+
# Convert ONNX to OpenVINO IR format using the frontend
|
| 33 |
+
fem = FrontEndManager()
|
| 34 |
+
onnx_fe = fem.load_by_framework("onnx")
|
| 35 |
+
onnx_model = onnx_fe.load(onnx_path)
|
| 36 |
+
ov_model = onnx_fe.convert(onnx_model)
|
| 37 |
|
| 38 |
+
# Serialize the OpenVINO model to XML and BIN files
|
| 39 |
+
serialize(ov_model, xml_path=os.path.join(os.path.dirname(__file__), "ggml-" + mname + "-encoder-openvino.xml"))
|
| 40 |
+
|
| 41 |
+
# Cleanup
|
| 42 |
if os.path.isdir(onnx_folder):
|
| 43 |
shutil.rmtree(onnx_folder)
|
| 44 |
|