From 61500107ab9b28d277e3a5beff61d98282bfc392 Mon Sep 17 00:00:00 2001
From: tigi6346 <ybkqoaznfyobcqcxuc@bbitj.com>
Date: Sun, 12 Mar 2023 06:48:28 +0200
Subject: [PATCH] Catch OOM and run whisper on cpu automatically.

---
 src/utils.py | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/src/utils.py b/src/utils.py
index 743b873..6da2af4 100755
--- a/src/utils.py
+++ b/src/utils.py
@@ -2065,7 +2065,13 @@ def load_whisper_model(language=None, model_name=None, progress=None):
 
 	if args.whisper_backend == "openai/whisper":
 		import whisper
-		whisper_model = whisper.load_model(model_name)
+		try:
+			#is it possible for model to fit on vram but go oom later on while executing on data?
+			whisper_model = whisper.load_model(model_name)
+		except:
+			print("Out of VRAM memory.")
+			print(f"Falling back to loading Whisper on CPU.")
+			whisper_model = whisper.load_model(model_name, device="cpu")
 	elif args.whisper_backend == "lightmare/whispercpp":
 		from whispercpp import Whisper
 		if not language: