diff --git a/api.py b/api.py index c64918a..fa1a010 100644 --- a/api.py +++ b/api.py @@ -186,7 +186,9 @@ class TextToSpeech: 'high_quality': Use if you want the absolute best. This is not really worth the compute, though. """ # Use generally found best tuning knobs for generation. - kwargs.update({'temperature': .8, 'length_penalty': 1.0, 'repetition_penalty': 2.0, 'top_p': .8, + kwargs.update({'temperature': .8, 'length_penalty': 1.0, 'repetition_penalty': 2.0, + #'typical_sampling': True, + 'top_p': .8, 'cond_free_k': 2.0, 'diffusion_temperature': 1.0}) # Presets are defined here. presets = { @@ -202,7 +204,8 @@ class TextToSpeech: # autoregressive generation parameters follow num_autoregressive_samples=512, temperature=.8, length_penalty=1, repetition_penalty=2.0, top_p=.8, # diffusion generation parameters follow - diffusion_iterations=100, cond_free=True, cond_free_k=2, diffusion_temperature=1.0,): + diffusion_iterations=100, cond_free=True, cond_free_k=2, diffusion_temperature=1.0, + **hf_generate_kwargs): text = torch.IntTensor(self.tokenizer.encode(text)).unsqueeze(0).cuda() text = F.pad(text, (0, 1)) # This may not be necessary. @@ -228,7 +231,8 @@ class TextToSpeech: temperature=temperature, num_return_sequences=self.autoregressive_batch_size, length_penalty=length_penalty, - repetition_penalty=repetition_penalty) + repetition_penalty=repetition_penalty, + **hf_generate_kwargs) padding_needed = self.autoregressive.max_mel_tokens - codes.shape[1] codes = F.pad(codes, (0, padding_needed), value=stop_mel_token) samples.append(codes) diff --git a/eval_multiple.py b/eval_multiple.py index 1113433..9defa52 100644 --- a/eval_multiple.py +++ b/eval_multiple.py @@ -16,7 +16,7 @@ if __name__ == '__main__': lines = [l.strip().split('\t') for l in f.readlines()] tts = TextToSpeech() - for k in range(4): + for k in range(3): outpath = f'{outpath_base}_{k}' os.makedirs(outpath, exist_ok=True) recorder = open(os.path.join(outpath, 'transcript.tsv'), 'w', encoding='utf-8') @@ -27,9 +27,7 @@ if __name__ == '__main__': path = os.path.join(os.path.dirname(fname), line[1]) cond_audio = load_audio(path, 22050) torchaudio.save(os.path.join(outpath_real, os.path.basename(line[1])), cond_audio, 22050) - sample = tts.tts(transcript, [cond_audio, cond_audio], num_autoregressive_samples=128, k=1, - repetition_penalty=2.0, length_penalty=2, temperature=.5, top_p=.5, - diffusion_temperature=.7, cond_free_k=2, diffusion_iterations=70) + sample = tts.tts_with_preset(transcript, [cond_audio, cond_audio], preset='standard') down = torchaudio.functional.resample(sample, 24000, 22050) fout_path = os.path.join(outpath, os.path.basename(line[1])) diff --git a/models/cvvp.py b/models/cvvp.py new file mode 100644 index 0000000..e69de29