diff --git a/api.py b/api.py index 2d17408..ecc6f68 100644 --- a/api.py +++ b/api.py @@ -202,7 +202,7 @@ class TextToSpeech: 'ultra_fast': {'num_autoregressive_samples': 32, 'diffusion_iterations': 16, 'cond_free': False}, 'fast': {'num_autoregressive_samples': 96, 'diffusion_iterations': 32}, 'standard': {'num_autoregressive_samples': 256, 'diffusion_iterations': 128}, - 'high_quality': {'num_autoregressive_samples': 512, 'diffusion_iterations': 2048}, + 'high_quality': {'num_autoregressive_samples': 512, 'diffusion_iterations': 1024}, } kwargs.update(presets[preset]) return self.tts(text, voice_samples, **kwargs) diff --git a/do_tts.py b/do_tts.py index 611d698..f7c7477 100644 --- a/do_tts.py +++ b/do_tts.py @@ -11,6 +11,10 @@ if __name__ == '__main__': parser.add_argument('--text', type=str, help='Text to speak.', default="I am a language model that has learned to speak.") parser.add_argument('--voice', type=str, help='Selects the voice to use for generation. See options in voices/ directory (and add your own!) ' 'Use the & character to join two voices together. Use a comma to perform inference on multiple voices.', default='patrick_stewart') + parser.add_argument('--preset', type=str, help='Which voice preset to use.', default='standard') + parser.add_argument('--voice_diversity_intelligibility_slider', type=float, + help='How to balance vocal diversity with the quality/intelligibility of the spoken text. 0 means highly diverse voice (not recommended), 1 means maximize intellibility', + default=.5) parser.add_argument('--output_path', type=str, help='Where to store outputs.', default='results/') args = parser.parse_args() os.makedirs(args.output_path, exist_ok=True) @@ -25,6 +29,6 @@ if __name__ == '__main__': for cond_path in cond_paths: c = load_audio(cond_path, 22050) conds.append(c) - gen = tts.tts_with_preset(args.text, conds, preset='standard') + gen = tts.tts_with_preset(args.text, conds, preset=args.preset, clvp_cvvp_slider=args.voice_diversity_intelligibility_slider) torchaudio.save(os.path.join(args.output_path, f'{voice}.wav'), gen.squeeze(0).cpu(), 24000) diff --git a/read.py b/read.py index fbff527..d3128ac 100644 --- a/read.py +++ b/read.py @@ -28,11 +28,14 @@ def split_and_recombine_text(texts, desired_length=200, max_len=300): if __name__ == '__main__': parser = argparse.ArgumentParser() - parser.add_argument('--textfile', type=str, help='A file containing the text to read.', default="data/riding_hood2.txt") + parser.add_argument('--textfile', type=str, help='A file containing the text to read.', default="data/riding_hood.txt") parser.add_argument('--voice', type=str, help='Selects the voice to use for generation. See options in voices/ directory (and add your own!) ' 'Use the & character to join two voices together. Use a comma to perform inference on multiple voices.', default='patrick_stewart') parser.add_argument('--output_path', type=str, help='Where to store outputs.', default='results/longform/') - parser.add_argument('--generation_preset', type=str, help='Preset to use for generation', default='standard') + parser.add_argument('--preset', type=str, help='Which voice preset to use.', default='standard') + parser.add_argument('--voice_diversity_intelligibility_slider', type=float, + help='How to balance vocal diversity with the quality/intelligibility of the spoken text. 0 means highly diverse voice (not recommended), 1 means maximize intellibility', + default=.5) args = parser.parse_args() outpath = args.output_path @@ -60,16 +63,11 @@ if __name__ == '__main__': if not cond_paths: print('Error: no valid voices specified. Try again.') - priors = [] + conds = [] + for cond_path in cond_paths: + c = load_audio(cond_path, 22050) + conds.append(c) for j, text in enumerate(texts): - conds = priors.copy() - for cond_path in cond_paths: - c = load_audio(cond_path, 22050) - conds.append(c) - gen = tts.tts_with_preset(text, conds, preset=args.generation_preset) + gen = tts.tts_with_preset(text, conds, preset=args.preset, clvp_cvvp_slider=args.voice_diversity_intelligibility_slider) torchaudio.save(os.path.join(voice_outpath, f'{j}.wav'), gen.squeeze(0).cpu(), 24000) - priors.append(torchaudio.functional.resample(gen, 24000, 22050).squeeze(0)) - while len(priors) > 2: - priors.pop(0) -