Catch OOM and run whisper on cpu automatically.

This commit is contained in:
tigi6346 2023-03-12 06:48:28 +02:00
parent ede9804b76
commit 61500107ab

View File

@ -2065,7 +2065,13 @@ def load_whisper_model(language=None, model_name=None, progress=None):
if args.whisper_backend == "openai/whisper":
import whisper
try:
#is it possible for model to fit on vram but go oom later on while executing on data?
whisper_model = whisper.load_model(model_name)
except:
print("Out of VRAM memory.")
print(f"Falling back to loading Whisper on CPU.")
whisper_model = whisper.load_model(model_name, device="cpu")
elif args.whisper_backend == "lightmare/whispercpp":
from whispercpp import Whisper
if not language: