1
0
Fork 0
master
mrq 2023-03-14 17:42:42 +07:00
parent 9d2c7fb942
commit fe03ae5839
2 changed files with 18 additions and 1 deletions

@ -49,6 +49,7 @@ WHISPER_BACKENDS = ["openai/whisper", "lightmare/whispercpp"]
VOCODERS = ['univnet', 'bigvgan_base_24khz_100band', 'bigvgan_24khz_100band']
TTSES = ['tortoise']
INFERENCING = False
GENERATE_SETTINGS_ARGS = None
LEARNING_RATE_SCHEMES = {"Multistep": "MultiStepLR", "Cos. Annealing": "CosineAnnealingLR_Restart"}
@ -320,6 +321,7 @@ def generate(**kwargs):
return info
INFERENCING = True
for line, cut_text in enumerate(texts):
if parameters['emotion'] == "Custom":
if parameters['prompt'] and parameters['prompt'].strip() != "":
@ -371,6 +373,7 @@ def generate(**kwargs):
del gen
do_gc()
INFERENCING = False
for k in audio_cache:
audio = audio_cache[k]['audio']
@ -486,7 +489,11 @@ def generate(**kwargs):
)
def cancel_generate():
if not INFERENCING:
return
import tortoise.api
tortoise.api.STOP_SIGNAL = True
def hash_file(path, algo="md5", buffer_size=0):

@ -155,6 +155,16 @@ def import_generate_settings_proxy( file=None ):
return tuple(res)
def reset_generate_settings_proxy( file=None ):
global GENERATE_SETTINGS_ARGS
settings = reset_generate_settings( file )
res = []
for k in GENERATE_SETTINGS_ARGS:
res.append(settings[k] if k in settings else None)
return tuple(res)
def compute_latents_proxy(voice, voice_latents_chunks, progress=gr.Progress(track_tqdm=True)):
compute_latents( voice=voice, voice_latents_chunks=voice_latents_chunks, progress=progress )
return voice
@ -688,7 +698,7 @@ def setup_gradio():
)
reset_generation_settings_button.click(
fn=reset_generation_settings,
fn=reset_generation_settings_proxy,
inputs=None,
outputs=generate_settings
)