This commit is contained in:
James Betker 2022-04-10 14:41:13 -06:00
parent f37375bb72
commit b1ba8416ff
6 changed files with 33 additions and 16 deletions

16
api.py
View File

@ -133,7 +133,7 @@ class TextToSpeech:
self.tokenizer = VoiceBpeTokenizer() self.tokenizer = VoiceBpeTokenizer()
download_models() download_models()
self.autoregressive = UnifiedVoice(max_mel_tokens=300, max_text_tokens=200, max_conditioning_inputs=2, layers=30, self.autoregressive = UnifiedVoice(max_mel_tokens=604, max_text_tokens=402, max_conditioning_inputs=2, layers=30,
model_dim=1024, model_dim=1024,
heads=16, number_text_tokens=256, start_text_token=255, checkpointing=False, heads=16, number_text_tokens=256, start_text_token=255, checkpointing=False,
train_solo_embeddings=False, train_solo_embeddings=False,
@ -151,14 +151,18 @@ class TextToSpeech:
layer_drop=0, unconditioned_percentage=0).cpu().eval() layer_drop=0, unconditioned_percentage=0).cpu().eval()
self.diffusion.load_state_dict(torch.load('.models/diffusion.pth')) self.diffusion.load_state_dict(torch.load('.models/diffusion.pth'))
self.diffusion_next = DiffusionTts(model_channels=1024, num_layers=10, in_channels=100, out_channels=200,
in_latent_channels=1024, in_tokens=8193, dropout=0, use_fp16=False, num_heads=16,
layer_drop=0, unconditioned_percentage=0).cpu().eval()
self.diffusion_next.load_state_dict(torch.load('.models/diffusion_next.pth'))
self.vocoder = UnivNetGenerator().cpu() self.vocoder = UnivNetGenerator().cpu()
self.vocoder.load_state_dict(torch.load('.models/vocoder.pth')['model_g']) self.vocoder.load_state_dict(torch.load('.models/vocoder.pth')['model_g'])
self.vocoder.eval(inference=True) self.vocoder.eval(inference=True)
def tts(self, text, voice_samples, k=1, def tts(self, text, voice_samples, k=1,
# autoregressive generation parameters follow # autoregressive generation parameters follow
num_autoregressive_samples=512, temperature=.5, length_penalty=2, repetition_penalty=2.0, top_p=.5, num_autoregressive_samples=512, temperature=.5, length_penalty=1, repetition_penalty=2.0, top_p=.5,
typical_sampling=False, typical_mass=.9,
# diffusion generation parameters follow # diffusion generation parameters follow
diffusion_iterations=100, cond_free=True, cond_free_k=2, diffusion_temperature=.7,): diffusion_iterations=100, cond_free=True, cond_free_k=2, diffusion_temperature=.7,):
text = torch.IntTensor(self.tokenizer.encode(text)).unsqueeze(0).cuda() text = torch.IntTensor(self.tokenizer.encode(text)).unsqueeze(0).cuda()
@ -185,10 +189,8 @@ class TextToSpeech:
temperature=temperature, temperature=temperature,
num_return_sequences=self.autoregressive_batch_size, num_return_sequences=self.autoregressive_batch_size,
length_penalty=length_penalty, length_penalty=length_penalty,
repetition_penalty=repetition_penalty, repetition_penalty=repetition_penalty)
typical_sampling=typical_sampling, padding_needed = self.autoregressive.max_mel_tokens - codes.shape[1]
typical_mass=typical_mass)
padding_needed = 250 - codes.shape[1]
codes = F.pad(codes, (0, padding_needed), value=stop_mel_token) codes = F.pad(codes, (0, padding_needed), value=stop_mel_token)
samples.append(codes) samples.append(codes)
self.autoregressive = self.autoregressive.cpu() self.autoregressive = self.autoregressive.cpu()

View File

@ -135,7 +135,7 @@ class TextToSpeech:
download_models() download_models()
self.autoregressive = AutoregressiveCodegen(1024, 16).cpu().eval() self.autoregressive = AutoregressiveCodegen(1024, 16).cpu().eval()
self.autoregressive.load_state_dict(torch.load('X:\\dlas\\experiments\\train_autoregressive_codegen\\models\\17000_codegen_ema.pth')) self.autoregressive.load_state_dict(torch.load('X:\\dlas\\experiments\\train_autoregressive_codegen\\models\\20750_codegen_ema.pth'))
self.clip = VoiceCLIP(dim_text=512, dim_speech=512, dim_latent=512, num_text_tokens=256, text_enc_depth=12, self.clip = VoiceCLIP(dim_text=512, dim_speech=512, dim_latent=512, num_text_tokens=256, text_enc_depth=12,
text_seq_len=350, text_heads=8, text_seq_len=350, text_heads=8,

View File

@ -7,7 +7,7 @@ from utils.audio import load_audio
if __name__ == '__main__': if __name__ == '__main__':
fname = 'Y:\\libritts\\test-clean\\transcribed-brief-w2v.tsv' fname = 'Y:\\libritts\\test-clean\\transcribed-brief-w2v.tsv'
outpath = 'D:\\tmp\\tortoise-tts-eval\\attempt_best' outpath = 'D:\\tmp\\tortoise-tts-eval\\compare_vocoders'
outpath_real = 'D:\\tmp\\tortoise-tts-eval\\real' outpath_real = 'D:\\tmp\\tortoise-tts-eval\\real'
os.makedirs(outpath, exist_ok=True) os.makedirs(outpath, exist_ok=True)
@ -24,12 +24,18 @@ if __name__ == '__main__':
path = os.path.join(os.path.dirname(fname), line[1]) path = os.path.join(os.path.dirname(fname), line[1])
cond_audio = load_audio(path, 22050) cond_audio = load_audio(path, 22050)
torchaudio.save(os.path.join(outpath_real, os.path.basename(line[1])), cond_audio, 22050) torchaudio.save(os.path.join(outpath_real, os.path.basename(line[1])), cond_audio, 22050)
sample = tts.tts(transcript, [cond_audio, cond_audio], num_autoregressive_samples=512, k=1, sample, sample2 = tts.tts(transcript, [cond_audio, cond_audio], num_autoregressive_samples=512, k=1,
repetition_penalty=2.0, length_penalty=2, temperature=.5, top_p=.5, repetition_penalty=2.0, length_penalty=2, temperature=.5, top_p=.5,
diffusion_temperature=.7, cond_free_k=2, diffusion_iterations=400) diffusion_temperature=.7, cond_free_k=2, diffusion_iterations=200)
down = torchaudio.functional.resample(sample, 24000, 22050) down = torchaudio.functional.resample(sample, 24000, 22050)
fout_path = os.path.join(outpath, os.path.basename(line[1])) fout_path = os.path.join(outpath, 'old', os.path.basename(line[1]))
torchaudio.save(fout_path, down.squeeze(0), 22050) torchaudio.save(fout_path, down.squeeze(0), 22050)
down = torchaudio.functional.resample(sample2, 24000, 22050)
fout_path = os.path.join(outpath, 'new', os.path.basename(line[1]))
torchaudio.save(fout_path, down.squeeze(0), 22050)
recorder.write(f'{transcript}\t{fout_path}\n') recorder.write(f'{transcript}\t{fout_path}\n')
recorder.flush() recorder.flush()
recorder.close() recorder.close()

View File

@ -168,6 +168,8 @@ class AutoregressiveCodegen(nn.Module):
self.START_TOKEN=8192 self.START_TOKEN=8192
self.STOP_TOKEN=8193 self.STOP_TOKEN=8193
self.START_TEXT_TOKEN = 255
self.STOP_TEXT_TOKEN = 0
self.max_text_token_id = num_text_tokens self.max_text_token_id = num_text_tokens
self.max_mel_token_id = num_mel_tokens self.max_mel_token_id = num_mel_tokens
self.mel_embedding = ConditioningEncoder(80, model_dim, do_checkpointing=False) self.mel_embedding = ConditioningEncoder(80, model_dim, do_checkpointing=False)
@ -231,6 +233,9 @@ class AutoregressiveCodegen(nn.Module):
for i in range(conditioning_signal.shape[1]): for i in range(conditioning_signal.shape[1]):
cond_embs.append(self.mel_embedding(conditioning_signal[:, i])) cond_embs.append(self.mel_embedding(conditioning_signal[:, i]))
cond_emb = torch.stack(cond_embs, dim=1).mean(dim=1, keepdim=True) cond_emb = torch.stack(cond_embs, dim=1).mean(dim=1, keepdim=True)
# Since all positional embeddings are relative, it is (probably) important to "fix" the text with some permanent embeddings.
text_codes = F.pad(text_codes, (1,0), value=self.START_TEXT_TOKEN)
text_codes = F.pad(text_codes, (0,1), value=self.STOP_TEXT_TOKEN)
_, enc_text = self.encoder(text_codes, return_hiddens=True) _, enc_text = self.encoder(text_codes, return_hiddens=True)
# Interleave cond_emb into the first few contexts. # Interleave cond_emb into the first few contexts.
full_context = enc_text full_context = enc_text
@ -255,6 +260,8 @@ class AutoregressiveCodegen(nn.Module):
for i in range(conditioning_signal.shape[1]): for i in range(conditioning_signal.shape[1]):
cond_embs.append(self.mel_embedding(conditioning_signal[:, i])) cond_embs.append(self.mel_embedding(conditioning_signal[:, i]))
cond_emb = torch.stack(cond_embs, dim=1).mean(dim=1, keepdim=True) cond_emb = torch.stack(cond_embs, dim=1).mean(dim=1, keepdim=True)
text_codes = F.pad(text_codes, (1,0), value=self.START_TEXT_TOKEN)
text_codes = F.pad(text_codes, (0,1), value=self.STOP_TEXT_TOKEN)
_, enc_text = self.encoder(text_codes, return_hiddens=True) _, enc_text = self.encoder(text_codes, return_hiddens=True)
# Interleave cond_emb into the first few contexts. # Interleave cond_emb into the first few contexts.
full_context = enc_text full_context = enc_text

View File

@ -55,7 +55,6 @@ class VoiceCLIP(nn.Module):
needs_permute=False, needs_permute=False,
exit_permute=False, exit_permute=False,
max_seq_len=-1, max_seq_len=-1,
use_pos_emb=False,
attn_layers=Encoder( attn_layers=Encoder(
dim=dim_text, dim=dim_text,
depth=text_enc_depth, depth=text_enc_depth,
@ -71,7 +70,6 @@ class VoiceCLIP(nn.Module):
needs_permute=False, needs_permute=False,
exit_permute=False, exit_permute=False,
max_seq_len=-1, max_seq_len=-1,
use_pos_emb=False,
attn_layers=Encoder( attn_layers=Encoder(
dim=dim_speech, dim=dim_speech,
depth=speech_enc_depth, depth=speech_enc_depth,

View File

@ -1186,7 +1186,9 @@ class TransformerWrapper(nn.Module):
if use_cache: if use_cache:
res.append(intermediates.past_key_values) res.append(intermediates.past_key_values)
return res if len(res) > 1:
return tuple(res)
return res[0]
class ContinuousTransformerWrapper(nn.Module): class ContinuousTransformerWrapper(nn.Module):
@ -1247,7 +1249,9 @@ class ContinuousTransformerWrapper(nn.Module):
if use_cache: if use_cache:
res.append(intermediates.past_key_values) res.append(intermediates.past_key_values)
if len(res) > 1:
return tuple(res) return tuple(res)
return res[0]
class XTransformer(nn.Module): class XTransformer(nn.Module):