From c52cc78632aeea6fc9fcb38c889fd0b237cc8897 Mon Sep 17 00:00:00 2001 From: James Betker Date: Fri, 15 Apr 2022 08:26:11 -0600 Subject: [PATCH] update --- .gitignore | 1 + api.py | 43 +++++++++++++++--------------- do_tts.py | 12 ++++----- read.py | 72 ++++++++++++++++++++++++++++---------------------- utils/audio.py | 13 +++++++++ 5 files changed, 81 insertions(+), 60 deletions(-) diff --git a/.gitignore b/.gitignore index fbc6b1c..41582a9 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,7 @@ parts/ sdist/ var/ wheels/ +results/* pip-wheel-metadata/ share/python-wheels/ *.egg-info/ diff --git a/api.py b/api.py index b101518..c64918a 100644 --- a/api.py +++ b/api.py @@ -150,7 +150,7 @@ def do_spectrogram_diffusion(diffusion_model, diffuser, mel_codes, conditioning_ class TextToSpeech: - def __init__(self, autoregressive_batch_size=32): + def __init__(self, autoregressive_batch_size=16): self.autoregressive_batch_size = autoregressive_batch_size self.tokenizer = VoiceBpeTokenizer() download_models() @@ -160,14 +160,7 @@ class TextToSpeech: heads=16, number_text_tokens=256, start_text_token=255, checkpointing=False, train_solo_embeddings=False, average_conditioning_embeddings=True).cpu().eval() - self.autoregressive.load_state_dict(torch.load('.models/autoregressive_audiobooks.pth')) - - self.autoregressive_for_latents = UnifiedVoice(max_mel_tokens=604, max_text_tokens=402, max_conditioning_inputs=2, layers=30, - model_dim=1024, - heads=16, number_text_tokens=256, start_text_token=255, checkpointing=False, - train_solo_embeddings=False, - average_conditioning_embeddings=True).cpu().eval() - self.autoregressive_for_latents.load_state_dict(torch.load('.models/autoregressive_audiobooks.pth')) + self.autoregressive.load_state_dict(torch.load('.models/autoregressive.pth')) self.clip = VoiceCLIP(dim_text=512, dim_speech=512, dim_latent=512, num_text_tokens=256, text_enc_depth=12, text_seq_len=350, text_heads=8, @@ -178,32 +171,38 @@ class TextToSpeech: self.diffusion = DiffusionTts(model_channels=1024, num_layers=10, in_channels=100, out_channels=200, in_latent_channels=1024, in_tokens=8193, dropout=0, use_fp16=False, num_heads=16, layer_drop=0, unconditioned_percentage=0).cpu().eval() - self.diffusion.load_state_dict(torch.load('.models/diffusion_decoder_audiobooks.pth')) + self.diffusion.load_state_dict(torch.load('.models/diffusion_decoder.pth')) self.vocoder = UnivNetGenerator().cpu() self.vocoder.load_state_dict(torch.load('.models/vocoder.pth')['model_g']) self.vocoder.eval(inference=True) - def tts_with_preset(self, text, voice_samples, preset='intelligible', **kwargs): + def tts_with_preset(self, text, voice_samples, preset='fast', **kwargs): """ Calls TTS with one of a set of preset generation parameters. Options: - 'intelligible': Maximizes the probability of understandable words at the cost of diverse voices, intonation and prosody. - 'realistic': Increases the diversity of spoken voices and improves realism of vocal characteristics at the cost of intelligibility. - 'mid': Somewhere between 'intelligible' and 'realistic'. + 'ultra_fast': Produces speech at a speed which belies the name of this repo. (Not really, but it's definitely fastest). + 'fast': Decent quality speech at a decent inference rate. A good choice for mass inference. + 'standard': Very good quality. This is generally about as good as you are going to get. + 'high_quality': Use if you want the absolute best. This is not really worth the compute, though. """ + # Use generally found best tuning knobs for generation. + kwargs.update({'temperature': .8, 'length_penalty': 1.0, 'repetition_penalty': 2.0, 'top_p': .8, + 'cond_free_k': 2.0, 'diffusion_temperature': 1.0}) + # Presets are defined here. presets = { - 'intelligible': {'temperature': .5, 'length_penalty': 2.0, 'repetition_penalty': 2.0, 'top_p': .5, 'diffusion_iterations': 100, 'cond_free': True, 'cond_free_k': .7, 'diffusion_temperature': .7}, - 'mid': {'temperature': .7, 'length_penalty': 1.0, 'repetition_penalty': 2.0, 'top_p': .7, 'diffusion_iterations': 100, 'cond_free': True, 'cond_free_k': 1.5, 'diffusion_temperature': .8}, - 'realistic': {'temperature': 1.0, 'length_penalty': 1.0, 'repetition_penalty': 2.0, 'top_p': .9, 'diffusion_iterations': 100, 'cond_free': True, 'cond_free_k': 2, 'diffusion_temperature': 1}, + 'ultra_fast': {'num_autoregressive_samples': 32, 'diffusion_iterations': 16, 'cond_free': False}, + 'fast': {'num_autoregressive_samples': 96, 'diffusion_iterations': 32}, + 'standard': {'num_autoregressive_samples': 256, 'diffusion_iterations': 128}, + 'high_quality': {'num_autoregressive_samples': 512, 'diffusion_iterations': 2048}, } kwargs.update(presets[preset]) return self.tts(text, voice_samples, **kwargs) def tts(self, text, voice_samples, k=1, # autoregressive generation parameters follow - num_autoregressive_samples=512, temperature=.5, length_penalty=1, repetition_penalty=2.0, top_p=.5, + num_autoregressive_samples=512, temperature=.8, length_penalty=1, repetition_penalty=2.0, top_p=.8, # diffusion generation parameters follow - diffusion_iterations=100, cond_free=True, cond_free_k=2, diffusion_temperature=.7,): + diffusion_iterations=100, cond_free=True, cond_free_k=2, diffusion_temperature=1.0,): text = torch.IntTensor(self.tokenizer.encode(text)).unsqueeze(0).cuda() text = F.pad(text, (0, 1)) # This may not be necessary. @@ -250,11 +249,11 @@ class TextToSpeech: # The diffusion model actually wants the last hidden layer from the autoregressive model as conditioning # inputs. Re-produce those for the top results. This could be made more efficient by storing all of these # results, but will increase memory usage. - self.autoregressive_for_latents = self.autoregressive_for_latents.cuda() - best_latents = self.autoregressive_for_latents(conds, text, torch.tensor([text.shape[-1]], device=conds.device), best_results, + self.autoregressive = self.autoregressive.cuda() + best_latents = self.autoregressive(conds, text, torch.tensor([text.shape[-1]], device=conds.device), best_results, torch.tensor([best_results.shape[-1]*self.autoregressive.mel_length_compression], device=conds.device), return_latent=True, clip_inputs=False) - self.autoregressive_for_latents = self.autoregressive_for_latents.cpu() + self.autoregressive = self.autoregressive.cpu() print("Performing vocoding..") wav_candidates = [] diff --git a/do_tts.py b/do_tts.py index 2c00981..3448942 100644 --- a/do_tts.py +++ b/do_tts.py @@ -27,12 +27,12 @@ if __name__ == '__main__': } parser = argparse.ArgumentParser() - parser.add_argument('-text', type=str, help='Text to speak.', default="I am a language model that has learned to speak.") - parser.add_argument('-voice', type=str, help='Use a preset conditioning voice (defined above). Overrides cond_path.', default='obama,dotrice,harris,lescault,otto,atkins,grace,kennard,mol') - parser.add_argument('-num_samples', type=int, help='How many total outputs the autoregressive transformer should produce.', default=128) - parser.add_argument('-batch_size', type=int, help='How many samples to process at once in the autoregressive model.', default=16) - parser.add_argument('-num_diffusion_samples', type=int, help='Number of outputs that progress to the diffusion stage.', default=16) - parser.add_argument('-output_path', type=str, help='Where to store outputs.', default='results/') + parser.add_argument('--text', type=str, help='Text to speak.', default="I am a language model that has learned to speak.") + parser.add_argument('--voice', type=str, help='Use a preset conditioning voice (defined above). Overrides cond_path.', default='obama,dotrice,harris,lescault,otto,atkins,grace,kennard,mol') + parser.add_argument('--num_samples', type=int, help='How many total outputs the autoregressive transformer should produce.', default=128) + parser.add_argument('--batch_size', type=int, help='How many samples to process at once in the autoregressive model.', default=16) + parser.add_argument('--num_diffusion_samples', type=int, help='Number of outputs that progress to the diffusion stage.', default=16) + parser.add_argument('--output_path', type=str, help='Where to store outputs.', default='results/') args = parser.parse_args() os.makedirs(args.output_path, exist_ok=True) diff --git a/read.py b/read.py index dfcfc1d..22623ac 100644 --- a/read.py +++ b/read.py @@ -6,7 +6,7 @@ import torch.nn.functional as F import torchaudio from api import TextToSpeech, load_conditioning -from utils.audio import load_audio +from utils.audio import load_audio, get_voices from utils.tokenizer import VoiceBpeTokenizer def split_and_recombine_text(texts, desired_length=200, max_len=300): @@ -27,41 +27,49 @@ def split_and_recombine_text(texts, desired_length=200, max_len=300): return texts if __name__ == '__main__': - # These are voices drawn randomly from the training set. You are free to substitute your own voices in, but testing - # has shown that the model does not generalize to new voices very well. - preselected_cond_voices = { - 'emma_stone': ['voices/emma_stone/1.wav','voices/emma_stone/2.wav','voices/emma_stone/3.wav'], - 'tom_hanks': ['voices/tom_hanks/1.wav','voices/tom_hanks/2.wav','voices/tom_hanks/3.wav'], - 'patrick_stewart': ['voices/patrick_stewart/1.wav','voices/patrick_stewart/2.wav','voices/patrick_stewart/3.wav','voices/patrick_stewart/4.wav'], - } - parser = argparse.ArgumentParser() - parser.add_argument('-textfile', type=str, help='A file containing the text to read.', default="data/riding_hood.txt") - parser.add_argument('-voice', type=str, help='Use a preset conditioning voice (defined above). Overrides cond_path.', default='patrick_stewart') - parser.add_argument('-num_samples', type=int, help='How many total outputs the autoregressive transformer should produce.', default=128) - parser.add_argument('-batch_size', type=int, help='How many samples to process at once in the autoregressive model.', default=16) - parser.add_argument('-output_path', type=str, help='Where to store outputs.', default='results/longform/') - parser.add_argument('-generation_preset', type=str, help='Preset to use for generation', default='realistic') + parser.add_argument('--textfile', type=str, help='A file containing the text to read.', default="data/riding_hood.txt") + parser.add_argument('--voice', type=str, help='Selects the voice to use for generation. See options in voices/ directory (and add your own!) ' + 'Use the & character to join two voices together. Use a comma to perform inference on multiple voices.', default='patrick_stewart') + parser.add_argument('--output_path', type=str, help='Where to store outputs.', default='results/longform/') + parser.add_argument('--generation_preset', type=str, help='Preset to use for generation', default='standard') args = parser.parse_args() - os.makedirs(args.output_path, exist_ok=True) - with open(args.textfile, 'r', encoding='utf-8') as f: - text = ''.join([l for l in f.readlines()]) - texts = split_and_recombine_text(text) + outpath = args.output_path + voices = get_voices() + selected_voices = args.voice.split(',') + for selected_voice in selected_voices: + voice_outpath = os.path.join(outpath, selected_voice) + os.makedirs(voice_outpath, exist_ok=True) - tts = TextToSpeech(autoregressive_batch_size=args.batch_size) + with open(args.textfile, 'r', encoding='utf-8') as f: + text = ''.join([l for l in f.readlines()]) + texts = split_and_recombine_text(text) + tts = TextToSpeech() - priors = [] - for j, text in enumerate(texts): - cond_paths = preselected_cond_voices[args.voice] - conds = priors.copy() - for cond_path in cond_paths: - c = load_audio(cond_path, 22050) - conds.append(c) - gen = tts.tts_with_preset(text, conds, preset=args.generation_preset, num_autoregressive_samples=args.num_samples) - torchaudio.save(os.path.join(args.output_path, f'{j}.wav'), gen.squeeze(0).cpu(), 24000) + if '&' in selected_voice: + voice_sel = selected_voice.split('&') + else: + voice_sel = [selected_voice] + cond_paths = [] + for vsel in voice_sel: + if vsel not in voices.keys(): + print(f'Error: voice {vsel} not available. Skipping.') + continue + cond_paths.extend(voices[vsel]) + if not cond_paths: + print('Error: no valid voices specified. Try again.') - priors.append(torchaudio.functional.resample(gen, 24000, 22050).squeeze(0)) - while len(priors) > 2: - priors.pop(0) + priors = [] + for j, text in enumerate(texts): + conds = priors.copy() + for cond_path in cond_paths: + c = load_audio(cond_path, 22050) + conds.append(c) + gen = tts.tts_with_preset(text, conds, preset=args.generation_preset) + torchaudio.save(os.path.join(voice_outpath, f'{j}.wav'), gen.squeeze(0).cpu(), 24000) + + priors.append(torchaudio.functional.resample(gen, 24000, 22050).squeeze(0)) + while len(priors) > 2: + priors.pop(0) diff --git a/utils/audio.py b/utils/audio.py index ff2f4ea..aad3a0f 100644 --- a/utils/audio.py +++ b/utils/audio.py @@ -1,3 +1,6 @@ +import os +from glob import glob + import torch import torchaudio import numpy as np @@ -78,6 +81,16 @@ def dynamic_range_decompression(x, C=1): return torch.exp(x) / C +def get_voices(): + subs = os.listdir('voices') + voices = {} + for sub in subs: + subj = os.path.join('voices', sub) + if os.path.isdir(subj): + voices[sub] = glob(f'{subj}/*.wav') + return voices + + class TacotronSTFT(torch.nn.Module): def __init__(self, filter_length=1024, hop_length=256, win_length=1024, n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0,