forked from mrq/tortoise-tts
clear out new_autoregressive api
This commit is contained in:
parent
8e94abd341
commit
a578697287
|
@ -1,245 +0,0 @@
|
|||
import argparse
|
||||
import os
|
||||
import random
|
||||
from urllib import request
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
import torchaudio
|
||||
import progressbar
|
||||
import ocotillo
|
||||
|
||||
from models.diffusion_decoder import DiffusionTts
|
||||
from models.autoregressive import UnifiedVoice
|
||||
from tqdm import tqdm
|
||||
|
||||
from models.arch_util import TorchMelSpectrogram
|
||||
from models.new_autoregressive import AutoregressiveCodegen
|
||||
from models.text_voice_clip import VoiceCLIP
|
||||
from models.vocoder import UnivNetGenerator
|
||||
from utils.audio import load_audio, wav_to_univnet_mel, denormalize_tacotron_mel
|
||||
from utils.diffusion import SpacedDiffusion, space_timesteps, get_named_beta_schedule
|
||||
from utils.tokenizer import VoiceBpeTokenizer, lev_distance
|
||||
|
||||
|
||||
pbar = None
|
||||
def download_models():
|
||||
MODELS = {
|
||||
'clip.pth': 'https://huggingface.co/jbetker/tortoise-tts-clip/resolve/main/pytorch-model.bin',
|
||||
'diffusion.pth': 'https://huggingface.co/jbetker/tortoise-tts-diffusion-v1/resolve/main/pytorch-model.bin',
|
||||
'autoregressive.pth': 'https://huggingface.co/jbetker/tortoise-tts-autoregressive/resolve/main/pytorch-model.bin'
|
||||
}
|
||||
os.makedirs('.models', exist_ok=True)
|
||||
def show_progress(block_num, block_size, total_size):
|
||||
global pbar
|
||||
if pbar is None:
|
||||
pbar = progressbar.ProgressBar(maxval=total_size)
|
||||
pbar.start()
|
||||
|
||||
downloaded = block_num * block_size
|
||||
if downloaded < total_size:
|
||||
pbar.update(downloaded)
|
||||
else:
|
||||
pbar.finish()
|
||||
pbar = None
|
||||
for model_name, url in MODELS.items():
|
||||
if os.path.exists(f'.models/{model_name}'):
|
||||
continue
|
||||
print(f'Downloading {model_name} from {url}...')
|
||||
request.urlretrieve(url, f'.models/{model_name}', show_progress)
|
||||
print('Done.')
|
||||
|
||||
|
||||
def pad_or_truncate(t, length):
|
||||
if t.shape[-1] == length:
|
||||
return t
|
||||
elif t.shape[-1] < length:
|
||||
return F.pad(t, (0, length-t.shape[-1]))
|
||||
else:
|
||||
return t[..., :length]
|
||||
|
||||
|
||||
def load_discrete_vocoder_diffuser(trained_diffusion_steps=4000, desired_diffusion_steps=200, cond_free=True, cond_free_k=1):
|
||||
"""
|
||||
Helper function to load a GaussianDiffusion instance configured for use as a vocoder.
|
||||
"""
|
||||
return SpacedDiffusion(use_timesteps=space_timesteps(trained_diffusion_steps, [desired_diffusion_steps]), model_mean_type='epsilon',
|
||||
model_var_type='learned_range', loss_type='mse', betas=get_named_beta_schedule('linear', trained_diffusion_steps),
|
||||
conditioning_free=cond_free, conditioning_free_k=cond_free_k)
|
||||
|
||||
|
||||
def load_conditioning(clip, cond_length=132300):
|
||||
gap = clip.shape[-1] - cond_length
|
||||
if gap < 0:
|
||||
clip = F.pad(clip, pad=(0, abs(gap)))
|
||||
elif gap > 0:
|
||||
rand_start = random.randint(0, gap)
|
||||
clip = clip[:, rand_start:rand_start + cond_length]
|
||||
mel_clip = TorchMelSpectrogram()(clip.unsqueeze(0)).squeeze(0)
|
||||
return mel_clip.unsqueeze(0).cuda()
|
||||
|
||||
|
||||
def fix_autoregressive_output(codes, stop_token):
|
||||
"""
|
||||
This function performs some padding on coded audio that fixes a mismatch issue between what the diffusion model was
|
||||
trained on and what the autoregressive code generator creates (which has no padding or end).
|
||||
This is highly specific to the DVAE being used, so this particular coding will not necessarily work if used with
|
||||
a different DVAE. This can be inferred by feeding a audio clip padded with lots of zeros on the end through the DVAE
|
||||
and copying out the last few codes.
|
||||
|
||||
Failing to do this padding will produce speech with a harsh end that sounds like "BLAH" or similar.
|
||||
"""
|
||||
# Strip off the autoregressive stop token and add padding.
|
||||
stop_token_indices = (codes == stop_token).nonzero()
|
||||
if len(stop_token_indices) == 0:
|
||||
print("No stop tokens found, enjoy that output of yours!")
|
||||
return codes
|
||||
else:
|
||||
codes[stop_token_indices] = 83
|
||||
stm = stop_token_indices.min().item()
|
||||
codes[stm:] = 83
|
||||
if stm - 3 < codes.shape[0]:
|
||||
codes[-3] = 45
|
||||
codes[-2] = 45
|
||||
codes[-1] = 248
|
||||
|
||||
return codes
|
||||
|
||||
|
||||
def do_spectrogram_diffusion(diffusion_model, diffuser, mel_codes, conditioning_samples, temperature=1):
|
||||
"""
|
||||
Uses the specified diffusion model to convert discrete codes into a spectrogram.
|
||||
"""
|
||||
with torch.no_grad():
|
||||
cond_mels = []
|
||||
for sample in conditioning_samples:
|
||||
sample = pad_or_truncate(sample, 102400)
|
||||
cond_mel = wav_to_univnet_mel(sample.to(mel_codes.device), do_normalization=False)
|
||||
cond_mels.append(cond_mel)
|
||||
cond_mels = torch.stack(cond_mels, dim=1)
|
||||
|
||||
output_seq_len = mel_codes.shape[-1]*4*24000//22050 # This diffusion model converts from 22kHz spectrogram codes to a 24kHz spectrogram signal.
|
||||
output_shape = (mel_codes.shape[0], 100, output_seq_len)
|
||||
precomputed_embeddings = diffusion_model.timestep_independent(mel_codes, cond_mels, output_seq_len, False)
|
||||
|
||||
noise = torch.randn(output_shape, device=mel_codes.device) * temperature
|
||||
mel = diffuser.p_sample_loop(diffusion_model, output_shape, noise=noise,
|
||||
model_kwargs={'precomputed_aligned_embeddings': precomputed_embeddings})
|
||||
return denormalize_tacotron_mel(mel)[:,:,:output_seq_len]
|
||||
|
||||
|
||||
class TextToSpeech:
|
||||
def __init__(self, autoregressive_batch_size=32):
|
||||
self.autoregressive_batch_size = autoregressive_batch_size
|
||||
self.tokenizer = VoiceBpeTokenizer()
|
||||
download_models()
|
||||
|
||||
self.autoregressive = AutoregressiveCodegen(1024, 16).cpu().eval()
|
||||
self.autoregressive.load_state_dict(torch.load('X:\\dlas\\experiments\\train_autoregressive_codegen\\models\\20750_codegen_ema.pth'))
|
||||
|
||||
self.clip = VoiceCLIP(dim_text=512, dim_speech=512, dim_latent=512, num_text_tokens=256, text_enc_depth=12,
|
||||
text_seq_len=350, text_heads=8,
|
||||
num_speech_tokens=8192, speech_enc_depth=12, speech_heads=8, speech_seq_len=430,
|
||||
use_xformers=True).cpu().eval()
|
||||
self.clip.load_state_dict(torch.load('.models/clip.pth'))
|
||||
|
||||
self.diffusion = DiffusionTts(model_channels=1024, num_layers=10, in_channels=100, out_channels=200,
|
||||
in_latent_channels=1024, in_tokens=8193, dropout=0, use_fp16=False, num_heads=16,
|
||||
layer_drop=0, unconditioned_percentage=0).cpu().eval()
|
||||
self.diffusion.load_state_dict(torch.load('.models/diffusion.pth'))
|
||||
|
||||
self.vocoder = UnivNetGenerator().cpu()
|
||||
self.vocoder.load_state_dict(torch.load('.models/vocoder.pth')['model_g'])
|
||||
self.vocoder.eval(inference=True)
|
||||
|
||||
def tts(self, text, voice_samples, k=1,
|
||||
# autoregressive generation parameters follow
|
||||
num_autoregressive_samples=512, temperature=.5, length_penalty=2, repetition_penalty=2.0, top_p=.5,
|
||||
typical_sampling=False, typical_mass=.9,
|
||||
# diffusion generation parameters follow
|
||||
diffusion_iterations=100, cond_free=True, cond_free_k=2, diffusion_temperature=.7,):
|
||||
text = torch.IntTensor(self.tokenizer.encode(text)).unsqueeze(0).cuda()
|
||||
text = F.pad(text, (0, 1)) # This may not be necessary.
|
||||
|
||||
conds = []
|
||||
if not isinstance(voice_samples, list):
|
||||
voice_samples = [voice_samples]
|
||||
for vs in voice_samples:
|
||||
conds.append(load_conditioning(vs))
|
||||
conds = torch.stack(conds, dim=1)
|
||||
|
||||
diffuser = load_discrete_vocoder_diffuser(desired_diffusion_steps=diffusion_iterations, cond_free=cond_free, cond_free_k=cond_free_k)
|
||||
|
||||
with torch.no_grad():
|
||||
samples = []
|
||||
num_batches = num_autoregressive_samples // self.autoregressive_batch_size
|
||||
stop_mel_token = self.autoregressive.STOP_TOKEN
|
||||
self.autoregressive = self.autoregressive.cuda()
|
||||
for _ in tqdm(range(num_batches)):
|
||||
codes = self.autoregressive.generate(conds, text,
|
||||
do_sample=True,
|
||||
top_p=top_p,
|
||||
temperature=temperature,
|
||||
num_return_sequences=self.autoregressive_batch_size,
|
||||
length_penalty=length_penalty,
|
||||
repetition_penalty=repetition_penalty,
|
||||
typical_sampling=typical_sampling,
|
||||
typical_mass=typical_mass)
|
||||
padding_needed = 250 - codes.shape[1]
|
||||
codes = F.pad(codes, (0, padding_needed), value=stop_mel_token)
|
||||
samples.append(codes)
|
||||
#self.autoregressive = self.autoregressive.cpu()
|
||||
|
||||
clip_results = []
|
||||
self.clip = self.clip.cuda()
|
||||
for batch in samples:
|
||||
for i in range(batch.shape[0]):
|
||||
batch[i] = fix_autoregressive_output(batch[i], stop_mel_token)
|
||||
bad_toks = batch >= 8192
|
||||
batch = batch * bad_toks.logical_not()
|
||||
clip_results.append(self.clip(text.repeat(batch.shape[0], 1), batch, return_loss=False))
|
||||
clip_results = torch.cat(clip_results, dim=0)
|
||||
samples = torch.cat(samples, dim=0)
|
||||
best_results = samples[torch.topk(clip_results, k=k).indices]
|
||||
self.clip = self.clip.cpu()
|
||||
del samples
|
||||
|
||||
print("Performing vocoding..")
|
||||
wav_candidates = []
|
||||
self.diffusion = self.diffusion.cuda()
|
||||
self.vocoder = self.vocoder.cuda()
|
||||
for b in range(best_results.shape[0]):
|
||||
code = best_results[b].unsqueeze(0)
|
||||
mel = do_spectrogram_diffusion(self.diffusion, diffuser, code, voice_samples, temperature=diffusion_temperature)
|
||||
wav = self.vocoder.inference(mel)
|
||||
wav_candidates.append(wav.cpu())
|
||||
self.diffusion = self.diffusion.cpu()
|
||||
self.vocoder = self.vocoder.cpu()
|
||||
|
||||
if len(wav_candidates) > 1:
|
||||
return wav_candidates
|
||||
return wav_candidates[0]
|
||||
|
||||
def refine_for_intellibility(self, wav_candidates, corresponding_codes, output_path):
|
||||
"""
|
||||
Further refine the remaining candidates using a ASR model to pick out the ones that are the most understandable.
|
||||
TODO: finish this function
|
||||
:param wav_candidates:
|
||||
:return:
|
||||
"""
|
||||
transcriber = ocotillo.Transcriber(on_cuda=True)
|
||||
transcriptions = transcriber.transcribe_batch(torch.cat(wav_candidates, dim=0).squeeze(1), 24000)
|
||||
best = 99999999
|
||||
for i, transcription in enumerate(transcriptions):
|
||||
dist = lev_distance(transcription, args.text.lower())
|
||||
if dist < best:
|
||||
best = dist
|
||||
best_codes = corresponding_codes[i].unsqueeze(0)
|
||||
best_wav = wav_candidates[i]
|
||||
del transcriber
|
||||
torchaudio.save(os.path.join(output_path, f'{voice}_poor.wav'), best_wav.squeeze(0).cpu(), 24000)
|
||||
|
||||
# Perform diffusion again with the high-quality diffuser.
|
||||
mel = do_spectrogram_diffusion(diffusion, final_diffuser, best_codes, cond_diffusion, mean=False)
|
||||
wav = vocoder.inference(mel)
|
||||
torchaudio.save(os.path.join(args.output_path, f'{voice}.wav'), wav.squeeze(0).cpu(), 24000)
|
Loading…
Reference in New Issue
Block a user