forked from mrq/tortoise-tts
Remove CVVP
After training a similar model for a different purpose, I realized that this model is faulty: the contrastive loss it uses only pays attention to high-frequency details which do not contribute meaningfully to output quality. I validated this by comparing a no-CVVP output with a baseline using tts-scores and found no differences.
This commit is contained in:
parent
849db260ba
commit
8fdf516e62
|
@ -10,7 +10,6 @@ import progressbar
|
||||||
import torchaudio
|
import torchaudio
|
||||||
|
|
||||||
from tortoise.models.classifier import AudioMiniEncoderWithClassifierHead
|
from tortoise.models.classifier import AudioMiniEncoderWithClassifierHead
|
||||||
from tortoise.models.cvvp import CVVP
|
|
||||||
from tortoise.models.diffusion_decoder import DiffusionTts
|
from tortoise.models.diffusion_decoder import DiffusionTts
|
||||||
from tortoise.models.autoregressive import UnifiedVoice
|
from tortoise.models.autoregressive import UnifiedVoice
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
@ -35,7 +34,6 @@ def download_models(specific_models=None):
|
||||||
'autoregressive.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/autoregressive.pth',
|
'autoregressive.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/autoregressive.pth',
|
||||||
'classifier.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/classifier.pth',
|
'classifier.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/classifier.pth',
|
||||||
'clvp2.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/clvp2.pth',
|
'clvp2.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/clvp2.pth',
|
||||||
'cvvp.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/cvvp.pth',
|
|
||||||
'diffusion_decoder.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/diffusion_decoder.pth',
|
'diffusion_decoder.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/diffusion_decoder.pth',
|
||||||
'vocoder.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/vocoder.pth',
|
'vocoder.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/vocoder.pth',
|
||||||
'rlg_auto.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/rlg_auto.pth',
|
'rlg_auto.pth': 'https://huggingface.co/jbetker/tortoise-tts-v2/resolve/main/.models/rlg_auto.pth',
|
||||||
|
@ -223,10 +221,6 @@ class TextToSpeech:
|
||||||
use_xformers=True).cpu().eval()
|
use_xformers=True).cpu().eval()
|
||||||
self.clvp.load_state_dict(torch.load(f'{models_dir}/clvp2.pth'))
|
self.clvp.load_state_dict(torch.load(f'{models_dir}/clvp2.pth'))
|
||||||
|
|
||||||
self.cvvp = CVVP(model_dim=512, transformer_heads=8, dropout=0, mel_codes=8192, conditioning_enc_depth=8, cond_mask_percentage=0,
|
|
||||||
speech_enc_depth=8, speech_mask_percentage=0, latent_multiplier=1).cpu().eval()
|
|
||||||
self.cvvp.load_state_dict(torch.load(f'{models_dir}/cvvp.pth'))
|
|
||||||
|
|
||||||
self.vocoder = UnivNetGenerator().cpu()
|
self.vocoder = UnivNetGenerator().cpu()
|
||||||
self.vocoder.load_state_dict(torch.load(f'{models_dir}/vocoder.pth')['model_g'])
|
self.vocoder.load_state_dict(torch.load(f'{models_dir}/vocoder.pth')['model_g'])
|
||||||
self.vocoder.eval(inference=True)
|
self.vocoder.eval(inference=True)
|
||||||
|
@ -309,8 +303,6 @@ class TextToSpeech:
|
||||||
return_deterministic_state=False,
|
return_deterministic_state=False,
|
||||||
# autoregressive generation parameters follow
|
# autoregressive generation parameters follow
|
||||||
num_autoregressive_samples=512, temperature=.8, length_penalty=1, repetition_penalty=2.0, top_p=.8, max_mel_tokens=500,
|
num_autoregressive_samples=512, temperature=.8, length_penalty=1, repetition_penalty=2.0, top_p=.8, max_mel_tokens=500,
|
||||||
# CLVP & CVVP parameters
|
|
||||||
clvp_cvvp_slider=.5,
|
|
||||||
# diffusion generation parameters follow
|
# diffusion generation parameters follow
|
||||||
diffusion_iterations=100, cond_free=True, cond_free_k=2, diffusion_temperature=1.0,
|
diffusion_iterations=100, cond_free=True, cond_free_k=2, diffusion_temperature=1.0,
|
||||||
**hf_generate_kwargs):
|
**hf_generate_kwargs):
|
||||||
|
@ -321,10 +313,10 @@ class TextToSpeech:
|
||||||
:param conditioning_latents: A tuple of (autoregressive_conditioning_latent, diffusion_conditioning_latent), which
|
:param conditioning_latents: A tuple of (autoregressive_conditioning_latent, diffusion_conditioning_latent), which
|
||||||
can be provided in lieu of voice_samples. This is ignored unless voice_samples=None.
|
can be provided in lieu of voice_samples. This is ignored unless voice_samples=None.
|
||||||
Conditioning latents can be retrieved via get_conditioning_latents().
|
Conditioning latents can be retrieved via get_conditioning_latents().
|
||||||
:param k: The number of returned clips. The most likely (as determined by Tortoises' CLVP and CVVP models) clips are returned.
|
:param k: The number of returned clips. The most likely (as determined by Tortoises' CLVP model) clips are returned.
|
||||||
:param verbose: Whether or not to print log messages indicating the progress of creating a clip. Default=true.
|
:param verbose: Whether or not to print log messages indicating the progress of creating a clip. Default=true.
|
||||||
~~AUTOREGRESSIVE KNOBS~~
|
~~AUTOREGRESSIVE KNOBS~~
|
||||||
:param num_autoregressive_samples: Number of samples taken from the autoregressive model, all of which are filtered using CLVP+CVVP.
|
:param num_autoregressive_samples: Number of samples taken from the autoregressive model, all of which are filtered using CLVP.
|
||||||
As Tortoise is a probabilistic model, more samples means a higher probability of creating something "great".
|
As Tortoise is a probabilistic model, more samples means a higher probability of creating something "great".
|
||||||
:param temperature: The softmax temperature of the autoregressive model.
|
:param temperature: The softmax temperature of the autoregressive model.
|
||||||
:param length_penalty: A length penalty applied to the autoregressive decoder. Higher settings causes the model to produce more terse outputs.
|
:param length_penalty: A length penalty applied to the autoregressive decoder. Higher settings causes the model to produce more terse outputs.
|
||||||
|
@ -336,11 +328,6 @@ class TextToSpeech:
|
||||||
I was interested in the premise, but the results were not as good as I was hoping. This is off by default, but
|
I was interested in the premise, but the results were not as good as I was hoping. This is off by default, but
|
||||||
could use some tuning.
|
could use some tuning.
|
||||||
:param typical_mass: The typical_mass parameter from the typical_sampling algorithm.
|
:param typical_mass: The typical_mass parameter from the typical_sampling algorithm.
|
||||||
~~CLVP-CVVP KNOBS~~
|
|
||||||
:param clvp_cvvp_slider: Controls the influence of the CLVP and CVVP models in selecting the best output from the autoregressive model.
|
|
||||||
[0,1]. Values closer to 1 will cause Tortoise to emit clips that follow the text more. Values closer to
|
|
||||||
0 will cause Tortoise to emit clips that more closely follow the reference clip (e.g. the voice sounds more
|
|
||||||
similar).
|
|
||||||
~~DIFFUSION KNOBS~~
|
~~DIFFUSION KNOBS~~
|
||||||
:param diffusion_iterations: Number of diffusion steps to perform. [0,4000]. More steps means the network has more chances to iteratively refine
|
:param diffusion_iterations: Number of diffusion steps to perform. [0,4000]. More steps means the network has more chances to iteratively refine
|
||||||
the output, which should theoretically mean a higher quality output. Generally a value above 250 is not noticeably better,
|
the output, which should theoretically mean a higher quality output. Generally a value above 250 is not noticeably better,
|
||||||
|
@ -402,28 +389,19 @@ class TextToSpeech:
|
||||||
samples.append(codes)
|
samples.append(codes)
|
||||||
self.autoregressive = self.autoregressive.cpu()
|
self.autoregressive = self.autoregressive.cpu()
|
||||||
|
|
||||||
clip_results = []
|
clvp_results = []
|
||||||
self.clvp = self.clvp.cuda()
|
self.clvp = self.clvp.cuda()
|
||||||
self.cvvp = self.cvvp.cuda()
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print("Computing best candidates using CLVP and CVVP")
|
print("Computing best candidates using CLVP")
|
||||||
for batch in tqdm(samples, disable=not verbose):
|
for batch in tqdm(samples, disable=not verbose):
|
||||||
for i in range(batch.shape[0]):
|
for i in range(batch.shape[0]):
|
||||||
batch[i] = fix_autoregressive_output(batch[i], stop_mel_token)
|
batch[i] = fix_autoregressive_output(batch[i], stop_mel_token)
|
||||||
clvp = self.clvp(text_tokens.repeat(batch.shape[0], 1), batch, return_loss=False)
|
clvp = self.clvp(text_tokens.repeat(batch.shape[0], 1), batch, return_loss=False)
|
||||||
if auto_conds is not None:
|
clvp_results.append(clvp)
|
||||||
cvvp_accumulator = 0
|
clvp_results = torch.cat(clvp_results, dim=0)
|
||||||
for cl in range(auto_conds.shape[1]):
|
|
||||||
cvvp_accumulator = cvvp_accumulator + self.cvvp(auto_conds[:, cl].repeat(batch.shape[0], 1, 1), batch, return_loss=False)
|
|
||||||
cvvp = cvvp_accumulator / auto_conds.shape[1]
|
|
||||||
clip_results.append(clvp * clvp_cvvp_slider + cvvp * (1-clvp_cvvp_slider))
|
|
||||||
else:
|
|
||||||
clip_results.append(clvp)
|
|
||||||
clip_results = torch.cat(clip_results, dim=0)
|
|
||||||
samples = torch.cat(samples, dim=0)
|
samples = torch.cat(samples, dim=0)
|
||||||
best_results = samples[torch.topk(clip_results, k=k).indices]
|
best_results = samples[torch.topk(clvp_results, k=k).indices]
|
||||||
self.clvp = self.clvp.cpu()
|
self.clvp = self.clvp.cpu()
|
||||||
self.cvvp = self.cvvp.cpu()
|
|
||||||
del samples
|
del samples
|
||||||
|
|
||||||
# The diffusion model actually wants the last hidden layer from the autoregressive model as conditioning
|
# The diffusion model actually wants the last hidden layer from the autoregressive model as conditioning
|
||||||
|
|
|
@ -13,9 +13,6 @@ if __name__ == '__main__':
|
||||||
parser.add_argument('--voice', type=str, help='Selects the voice to use for generation. See options in voices/ directory (and add your own!) '
|
parser.add_argument('--voice', type=str, help='Selects the voice to use for generation. See options in voices/ directory (and add your own!) '
|
||||||
'Use the & character to join two voices together. Use a comma to perform inference on multiple voices.', default='random')
|
'Use the & character to join two voices together. Use a comma to perform inference on multiple voices.', default='random')
|
||||||
parser.add_argument('--preset', type=str, help='Which voice preset to use.', default='fast')
|
parser.add_argument('--preset', type=str, help='Which voice preset to use.', default='fast')
|
||||||
parser.add_argument('--voice_diversity_intelligibility_slider', type=float,
|
|
||||||
help='How to balance vocal diversity with the quality/intelligibility of the spoken text. 0 means highly diverse voice (not recommended), 1 means maximize intellibility',
|
|
||||||
default=.5)
|
|
||||||
parser.add_argument('--output_path', type=str, help='Where to store outputs.', default='results/')
|
parser.add_argument('--output_path', type=str, help='Where to store outputs.', default='results/')
|
||||||
parser.add_argument('--model_dir', type=str, help='Where to find pretrained model checkpoints. Tortoise automatically downloads these to .models, so this'
|
parser.add_argument('--model_dir', type=str, help='Where to find pretrained model checkpoints. Tortoise automatically downloads these to .models, so this'
|
||||||
'should only be specified if you have custom checkpoints.', default='.models')
|
'should only be specified if you have custom checkpoints.', default='.models')
|
||||||
|
@ -31,8 +28,7 @@ if __name__ == '__main__':
|
||||||
for k, voice in enumerate(selected_voices):
|
for k, voice in enumerate(selected_voices):
|
||||||
voice_samples, conditioning_latents = load_voice(voice)
|
voice_samples, conditioning_latents = load_voice(voice)
|
||||||
gen, dbg_state = tts.tts_with_preset(args.text, k=args.candidates, voice_samples=voice_samples, conditioning_latents=conditioning_latents,
|
gen, dbg_state = tts.tts_with_preset(args.text, k=args.candidates, voice_samples=voice_samples, conditioning_latents=conditioning_latents,
|
||||||
preset=args.preset, clvp_cvvp_slider=args.voice_diversity_intelligibility_slider,
|
preset=args.preset, use_deterministic_seed=args.seed, return_deterministic_state=True)
|
||||||
use_deterministic_seed=args.seed, return_deterministic_state=True)
|
|
||||||
if isinstance(gen, list):
|
if isinstance(gen, list):
|
||||||
for j, g in enumerate(gen):
|
for j, g in enumerate(gen):
|
||||||
torchaudio.save(os.path.join(args.output_path, f'{voice}_{k}_{j}.wav'), g.squeeze(0).cpu(), 24000)
|
torchaudio.save(os.path.join(args.output_path, f'{voice}_{k}_{j}.wav'), g.squeeze(0).cpu(), 24000)
|
||||||
|
|
|
@ -18,9 +18,6 @@ if __name__ == '__main__':
|
||||||
parser.add_argument('--output_path', type=str, help='Where to store outputs.', default='results/longform/')
|
parser.add_argument('--output_path', type=str, help='Where to store outputs.', default='results/longform/')
|
||||||
parser.add_argument('--preset', type=str, help='Which voice preset to use.', default='standard')
|
parser.add_argument('--preset', type=str, help='Which voice preset to use.', default='standard')
|
||||||
parser.add_argument('--regenerate', type=str, help='Comma-separated list of clip numbers to re-generate, or nothing.', default=None)
|
parser.add_argument('--regenerate', type=str, help='Comma-separated list of clip numbers to re-generate, or nothing.', default=None)
|
||||||
parser.add_argument('--voice_diversity_intelligibility_slider', type=float,
|
|
||||||
help='How to balance vocal diversity with the quality/intelligibility of the spoken text. 0 means highly diverse voice (not recommended), 1 means maximize intellibility',
|
|
||||||
default=.5)
|
|
||||||
parser.add_argument('--model_dir', type=str, help='Where to find pretrained model checkpoints. Tortoise automatically downloads these to .models, so this'
|
parser.add_argument('--model_dir', type=str, help='Where to find pretrained model checkpoints. Tortoise automatically downloads these to .models, so this'
|
||||||
'should only be specified if you have custom checkpoints.', default='.models')
|
'should only be specified if you have custom checkpoints.', default='.models')
|
||||||
parser.add_argument('--seed', type=int, help='Random seed which can be used to reproduce results.', default=None)
|
parser.add_argument('--seed', type=int, help='Random seed which can be used to reproduce results.', default=None)
|
||||||
|
@ -62,8 +59,7 @@ if __name__ == '__main__':
|
||||||
all_parts.append(load_audio(os.path.join(voice_outpath, f'{j}.wav'), 24000))
|
all_parts.append(load_audio(os.path.join(voice_outpath, f'{j}.wav'), 24000))
|
||||||
continue
|
continue
|
||||||
gen = tts.tts_with_preset(text, voice_samples=voice_samples, conditioning_latents=conditioning_latents,
|
gen = tts.tts_with_preset(text, voice_samples=voice_samples, conditioning_latents=conditioning_latents,
|
||||||
preset=args.preset, clvp_cvvp_slider=args.voice_diversity_intelligibility_slider,
|
preset=args.preset, use_deterministic_seed=seed)
|
||||||
use_deterministic_seed=seed)
|
|
||||||
gen = gen.squeeze(0).cpu()
|
gen = gen.squeeze(0).cpu()
|
||||||
torchaudio.save(os.path.join(voice_outpath, f'{j}.wav'), gen, 24000)
|
torchaudio.save(os.path.join(voice_outpath, f'{j}.wav'), gen, 24000)
|
||||||
all_parts.append(gen)
|
all_parts.append(gen)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user