From ddd0c4ccf874f534a2e4047a70dff6e663a657cb Mon Sep 17 00:00:00 2001 From: mrq Date: Sun, 12 Feb 2023 01:15:22 +0000 Subject: [PATCH] cleanup loop, save files while generating a batch in the event it crashes midway through --- webui.py | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/webui.py b/webui.py index 37b365a..cc9c1f9 100755 --- a/webui.py +++ b/webui.py @@ -143,12 +143,10 @@ def generate( volume_adjust = torchaudio.transforms.Vol(gain=args.output_volume, gain_type="amplitude") if args.output_volume != 1 else None - idx = 0 + idx = 1 for i, file in enumerate(os.listdir(outdir)): if file[-5:] == ".json": idx = idx + 1 - if idx: - idx = idx + 1 # reserve, if for whatever reason you manage to concurrently generate with open(f'{outdir}/input_{idx}.json', 'w', encoding="utf-8") as f: @@ -180,24 +178,23 @@ def generate( run_time = time.time()-start_time print(f"Generating line took {run_time} seconds") - if isinstance(gen, list): - for j, g in enumerate(gen): - name = get_name(line=line, candidate=j) - audio_cache[name] = { - 'audio': g, - 'text': cut_text, - 'time': run_time - } - else: - name = get_name(line=line) + if not isinstance(gen, list): + gen = [gen] + + for j, g in enumerate(gen): + audio = g.squeeze(0).cpu() + name = get_name(line=line, candidate=j) audio_cache[name] = { - 'audio': gen, + 'audio': audio, 'text': cut_text, - 'time': run_time, + 'time': run_time } + # save here in case some error happens mid-batch + torchaudio.save(f'{outdir}/{voice}_{name}.wav', audio, args.output_sample_rate) for k in audio_cache: - audio = audio_cache[k]['audio'].squeeze(0).cpu() + audio = audio_cache[k]['audio'] + if resampler is not None: audio = resampler(audio) if volume_adjust is not None: