Merge pull request 'Catch OOM and run whisper on cpu automatically.' (#117) from zim33/ai-voice-cloning:vram into master
Reviewed-on: #117
This commit is contained in:
commit
8ed09f9b87
|
@ -2065,7 +2065,13 @@ def load_whisper_model(language=None, model_name=None, progress=None):
|
||||||
|
|
||||||
if args.whisper_backend == "openai/whisper":
|
if args.whisper_backend == "openai/whisper":
|
||||||
import whisper
|
import whisper
|
||||||
whisper_model = whisper.load_model(model_name)
|
try:
|
||||||
|
#is it possible for model to fit on vram but go oom later on while executing on data?
|
||||||
|
whisper_model = whisper.load_model(model_name)
|
||||||
|
except:
|
||||||
|
print("Out of VRAM memory.")
|
||||||
|
print(f"Falling back to loading Whisper on CPU.")
|
||||||
|
whisper_model = whisper.load_model(model_name, device="cpu")
|
||||||
elif args.whisper_backend == "lightmare/whispercpp":
|
elif args.whisper_backend == "lightmare/whispercpp":
|
||||||
from whispercpp import Whisper
|
from whispercpp import Whisper
|
||||||
if not language:
|
if not language:
|
||||||
|
|
Loading…
Reference in New Issue
Block a user