From ec92613847cf3f7d4b6a6a7833a40b37ac4b607f Mon Sep 17 00:00:00 2001 From: mrq Date: Mon, 11 Nov 2024 20:39:48 -0600 Subject: [PATCH] actually pass input prompt length size to inference --- vall_e/__main__.py | 1 + vall_e/webui.py | 1 + 2 files changed, 2 insertions(+) diff --git a/vall_e/__main__.py b/vall_e/__main__.py index d51e0fd..47efea0 100755 --- a/vall_e/__main__.py +++ b/vall_e/__main__.py @@ -96,6 +96,7 @@ def main(): layer_skip_varentropy_threshold=args.layer_skip_varentropy_threshold, refine_on_stop=args.refine_on_stop, denoise_start=args.denoise_start, + input_prompt_length=args.input_prompt_length, input_prompt_prefix=args.input_prompt_prefix, prefix_silence=args.prefix_silence, cfg_strength=args.cfg_strength, diff --git a/vall_e/webui.py b/vall_e/webui.py index c8bbbaf..e3f4d0c 100644 --- a/vall_e/webui.py +++ b/vall_e/webui.py @@ -277,6 +277,7 @@ def do_inference_tts( progress=gr.Progress(track_tqdm=True), *args, **kwargs ): denoise_start=args.denoise_start, prefix_silence=args.prefix_silence, input_prompt_prefix=args.input_prompt_prefix, + input_prompt_length=args.input_prompt_length, ) with timer("Inferenced in", callback=lambda msg: gr.Info( msg )) as t: