forked from mrq/tortoise-tts
Updates
This commit is contained in:
parent
e9f3abcae7
commit
57ffdeff78
16
api.py
16
api.py
|
@ -133,7 +133,7 @@ class TextToSpeech:
|
||||||
self.tokenizer = VoiceBpeTokenizer()
|
self.tokenizer = VoiceBpeTokenizer()
|
||||||
download_models()
|
download_models()
|
||||||
|
|
||||||
self.autoregressive = UnifiedVoice(max_mel_tokens=300, max_text_tokens=200, max_conditioning_inputs=2, layers=30,
|
self.autoregressive = UnifiedVoice(max_mel_tokens=604, max_text_tokens=402, max_conditioning_inputs=2, layers=30,
|
||||||
model_dim=1024,
|
model_dim=1024,
|
||||||
heads=16, number_text_tokens=256, start_text_token=255, checkpointing=False,
|
heads=16, number_text_tokens=256, start_text_token=255, checkpointing=False,
|
||||||
train_solo_embeddings=False,
|
train_solo_embeddings=False,
|
||||||
|
@ -151,14 +151,18 @@ class TextToSpeech:
|
||||||
layer_drop=0, unconditioned_percentage=0).cpu().eval()
|
layer_drop=0, unconditioned_percentage=0).cpu().eval()
|
||||||
self.diffusion.load_state_dict(torch.load('.models/diffusion.pth'))
|
self.diffusion.load_state_dict(torch.load('.models/diffusion.pth'))
|
||||||
|
|
||||||
|
self.diffusion_next = DiffusionTts(model_channels=1024, num_layers=10, in_channels=100, out_channels=200,
|
||||||
|
in_latent_channels=1024, in_tokens=8193, dropout=0, use_fp16=False, num_heads=16,
|
||||||
|
layer_drop=0, unconditioned_percentage=0).cpu().eval()
|
||||||
|
self.diffusion_next.load_state_dict(torch.load('.models/diffusion_next.pth'))
|
||||||
|
|
||||||
self.vocoder = UnivNetGenerator().cpu()
|
self.vocoder = UnivNetGenerator().cpu()
|
||||||
self.vocoder.load_state_dict(torch.load('.models/vocoder.pth')['model_g'])
|
self.vocoder.load_state_dict(torch.load('.models/vocoder.pth')['model_g'])
|
||||||
self.vocoder.eval(inference=True)
|
self.vocoder.eval(inference=True)
|
||||||
|
|
||||||
def tts(self, text, voice_samples, k=1,
|
def tts(self, text, voice_samples, k=1,
|
||||||
# autoregressive generation parameters follow
|
# autoregressive generation parameters follow
|
||||||
num_autoregressive_samples=512, temperature=.5, length_penalty=2, repetition_penalty=2.0, top_p=.5,
|
num_autoregressive_samples=512, temperature=.5, length_penalty=1, repetition_penalty=2.0, top_p=.5,
|
||||||
typical_sampling=False, typical_mass=.9,
|
|
||||||
# diffusion generation parameters follow
|
# diffusion generation parameters follow
|
||||||
diffusion_iterations=100, cond_free=True, cond_free_k=2, diffusion_temperature=.7,):
|
diffusion_iterations=100, cond_free=True, cond_free_k=2, diffusion_temperature=.7,):
|
||||||
text = torch.IntTensor(self.tokenizer.encode(text)).unsqueeze(0).cuda()
|
text = torch.IntTensor(self.tokenizer.encode(text)).unsqueeze(0).cuda()
|
||||||
|
@ -185,10 +189,8 @@ class TextToSpeech:
|
||||||
temperature=temperature,
|
temperature=temperature,
|
||||||
num_return_sequences=self.autoregressive_batch_size,
|
num_return_sequences=self.autoregressive_batch_size,
|
||||||
length_penalty=length_penalty,
|
length_penalty=length_penalty,
|
||||||
repetition_penalty=repetition_penalty,
|
repetition_penalty=repetition_penalty)
|
||||||
typical_sampling=typical_sampling,
|
padding_needed = self.autoregressive.max_mel_tokens - codes.shape[1]
|
||||||
typical_mass=typical_mass)
|
|
||||||
padding_needed = 250 - codes.shape[1]
|
|
||||||
codes = F.pad(codes, (0, padding_needed), value=stop_mel_token)
|
codes = F.pad(codes, (0, padding_needed), value=stop_mel_token)
|
||||||
samples.append(codes)
|
samples.append(codes)
|
||||||
self.autoregressive = self.autoregressive.cpu()
|
self.autoregressive = self.autoregressive.cpu()
|
||||||
|
|
|
@ -7,7 +7,7 @@ from utils.audio import load_audio
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
fname = 'Y:\\libritts\\test-clean\\transcribed-brief-w2v.tsv'
|
fname = 'Y:\\libritts\\test-clean\\transcribed-brief-w2v.tsv'
|
||||||
outpath = 'D:\\tmp\\tortoise-tts-eval\\attempt_best'
|
outpath = 'D:\\tmp\\tortoise-tts-eval\\compare_vocoders'
|
||||||
outpath_real = 'D:\\tmp\\tortoise-tts-eval\\real'
|
outpath_real = 'D:\\tmp\\tortoise-tts-eval\\real'
|
||||||
|
|
||||||
os.makedirs(outpath, exist_ok=True)
|
os.makedirs(outpath, exist_ok=True)
|
||||||
|
@ -24,12 +24,18 @@ if __name__ == '__main__':
|
||||||
path = os.path.join(os.path.dirname(fname), line[1])
|
path = os.path.join(os.path.dirname(fname), line[1])
|
||||||
cond_audio = load_audio(path, 22050)
|
cond_audio = load_audio(path, 22050)
|
||||||
torchaudio.save(os.path.join(outpath_real, os.path.basename(line[1])), cond_audio, 22050)
|
torchaudio.save(os.path.join(outpath_real, os.path.basename(line[1])), cond_audio, 22050)
|
||||||
sample = tts.tts(transcript, [cond_audio, cond_audio], num_autoregressive_samples=512, k=1,
|
sample, sample2 = tts.tts(transcript, [cond_audio, cond_audio], num_autoregressive_samples=512, k=1,
|
||||||
repetition_penalty=2.0, length_penalty=2, temperature=.5, top_p=.5,
|
repetition_penalty=2.0, length_penalty=2, temperature=.5, top_p=.5,
|
||||||
diffusion_temperature=.7, cond_free_k=2, diffusion_iterations=400)
|
diffusion_temperature=.7, cond_free_k=2, diffusion_iterations=200)
|
||||||
|
|
||||||
down = torchaudio.functional.resample(sample, 24000, 22050)
|
down = torchaudio.functional.resample(sample, 24000, 22050)
|
||||||
fout_path = os.path.join(outpath, os.path.basename(line[1]))
|
fout_path = os.path.join(outpath, 'old', os.path.basename(line[1]))
|
||||||
torchaudio.save(fout_path, down.squeeze(0), 22050)
|
torchaudio.save(fout_path, down.squeeze(0), 22050)
|
||||||
|
|
||||||
|
down = torchaudio.functional.resample(sample2, 24000, 22050)
|
||||||
|
fout_path = os.path.join(outpath, 'new', os.path.basename(line[1]))
|
||||||
|
torchaudio.save(fout_path, down.squeeze(0), 22050)
|
||||||
|
|
||||||
recorder.write(f'{transcript}\t{fout_path}\n')
|
recorder.write(f'{transcript}\t{fout_path}\n')
|
||||||
recorder.flush()
|
recorder.flush()
|
||||||
recorder.close()
|
recorder.close()
|
|
@ -55,7 +55,6 @@ class VoiceCLIP(nn.Module):
|
||||||
needs_permute=False,
|
needs_permute=False,
|
||||||
exit_permute=False,
|
exit_permute=False,
|
||||||
max_seq_len=-1,
|
max_seq_len=-1,
|
||||||
use_pos_emb=False,
|
|
||||||
attn_layers=Encoder(
|
attn_layers=Encoder(
|
||||||
dim=dim_text,
|
dim=dim_text,
|
||||||
depth=text_enc_depth,
|
depth=text_enc_depth,
|
||||||
|
@ -71,7 +70,6 @@ class VoiceCLIP(nn.Module):
|
||||||
needs_permute=False,
|
needs_permute=False,
|
||||||
exit_permute=False,
|
exit_permute=False,
|
||||||
max_seq_len=-1,
|
max_seq_len=-1,
|
||||||
use_pos_emb=False,
|
|
||||||
attn_layers=Encoder(
|
attn_layers=Encoder(
|
||||||
dim=dim_speech,
|
dim=dim_speech,
|
||||||
depth=speech_enc_depth,
|
depth=speech_enc_depth,
|
||||||
|
|
|
@ -1186,7 +1186,9 @@ class TransformerWrapper(nn.Module):
|
||||||
if use_cache:
|
if use_cache:
|
||||||
res.append(intermediates.past_key_values)
|
res.append(intermediates.past_key_values)
|
||||||
|
|
||||||
return res
|
if len(res) > 1:
|
||||||
|
return tuple(res)
|
||||||
|
return res[0]
|
||||||
|
|
||||||
|
|
||||||
class ContinuousTransformerWrapper(nn.Module):
|
class ContinuousTransformerWrapper(nn.Module):
|
||||||
|
@ -1247,7 +1249,9 @@ class ContinuousTransformerWrapper(nn.Module):
|
||||||
if use_cache:
|
if use_cache:
|
||||||
res.append(intermediates.past_key_values)
|
res.append(intermediates.past_key_values)
|
||||||
|
|
||||||
|
if len(res) > 1:
|
||||||
return tuple(res)
|
return tuple(res)
|
||||||
|
return res[0]
|
||||||
|
|
||||||
|
|
||||||
class XTransformer(nn.Module):
|
class XTransformer(nn.Module):
|
||||||
|
|
Loading…
Reference in New Issue
Block a user