build a better speech synthesis toolset
This commit is contained in:
parent
32cfcf3684
commit
b2d8fbcfc0
80
codes/scripts/audio/speech_synthesis_utils.py
Normal file
80
codes/scripts/audio/speech_synthesis_utils.py
Normal file
|
@ -0,0 +1,80 @@
|
|||
import os
|
||||
import random
|
||||
|
||||
import torch
|
||||
|
||||
from data.audio.unsupervised_audio_dataset import load_audio
|
||||
from data.util import find_files_of_type, is_audio_file
|
||||
from models.diffusion.gaussian_diffusion import get_named_beta_schedule
|
||||
from models.diffusion.respace import SpacedDiffusion, space_timesteps
|
||||
from trainer.injectors.base_injectors import TorchMelSpectrogramInjector
|
||||
from utils.audio import plot_spectrogram
|
||||
|
||||
|
||||
def wav_to_mel(wav):
|
||||
"""
|
||||
Converts an audio clip into a MEL tensor that the vocoder, DVAE and GptTts models use whenever a MEL is called for.
|
||||
"""
|
||||
return TorchMelSpectrogramInjector({'in': 'wav', 'out': 'mel', 'normalize': True},{})({'wav': wav})['mel']
|
||||
|
||||
|
||||
def convert_mel_to_codes(dvae_model, mel):
|
||||
"""
|
||||
Converts an audio clip into discrete codes.
|
||||
"""
|
||||
dvae_model.eval()
|
||||
with torch.no_grad():
|
||||
return dvae_model.get_codebook_indices(mel)
|
||||
|
||||
|
||||
def load_gpt_conditioning_inputs_from_directory(path, num_candidates=3, sample_rate=22050, max_samples=44100):
|
||||
candidates = find_files_of_type('img', os.path.dirname(path), qualifier=is_audio_file)[0]
|
||||
assert len(candidates) < 50000 # Sanity check to ensure we aren't loading "related files" that aren't actually related.
|
||||
if len(candidates) == 0:
|
||||
print(f"No conditioning candidates found for {path} (not even the clip itself??)")
|
||||
raise NotImplementedError()
|
||||
# Sample with replacement. This can get repeats, but more conveniently handles situations where there are not enough candidates.
|
||||
related_mels = []
|
||||
for k in range(num_candidates):
|
||||
rel_clip = load_audio(random.choice(candidates), sample_rate)
|
||||
gap = rel_clip.shape[-1] - max_samples
|
||||
if gap > 0:
|
||||
rand_start = random.randint(0, gap)
|
||||
rel_clip = rel_clip[:, rand_start:rand_start+max_samples]
|
||||
as_mel = wav_to_mel(rel_clip)
|
||||
related_mels.append(as_mel)
|
||||
return torch.stack(related_mels, dim=0)
|
||||
|
||||
|
||||
def load_discrete_vocoder_diffuser(trained_diffusion_steps=4000, desired_diffusion_steps=200):
|
||||
"""
|
||||
Helper function to load a GaussianDiffusion instance configured for use as a vocoder.
|
||||
"""
|
||||
return SpacedDiffusion(use_timesteps=space_timesteps(trained_diffusion_steps, [desired_diffusion_steps]), model_mean_type='epsilon',
|
||||
model_var_type='learned_range', loss_type='mse', betas=get_named_beta_schedule('linear', trained_diffusion_steps))
|
||||
|
||||
|
||||
def do_spectrogram_diffusion(diffusion_model, dvae_model, diffuser, mel_codes, conditioning_input, spectrogram_compression_factor=128, plt_spec=False, am=None):
|
||||
"""
|
||||
Uses the specified diffusion model and DVAE model to convert the provided MEL & conditioning inputs into an audio clip.
|
||||
"""
|
||||
diffusion_model.eval()
|
||||
dvae_model.eval()
|
||||
with torch.no_grad():
|
||||
mel = dvae_model.decode(mel_codes)[0]
|
||||
|
||||
if plt_spec:
|
||||
plot_spectrogram(mel[0].cpu())
|
||||
m=mel[:,:,:am.shape[-1]]
|
||||
print(torch.nn.MSELoss()(am,m))
|
||||
|
||||
# Pad MEL to multiples of 4096//spectrogram_compression_factor
|
||||
msl = mel.shape[-1]
|
||||
dsl = 4096 // spectrogram_compression_factor
|
||||
gap = dsl - (msl % dsl)
|
||||
if gap > 0:
|
||||
mel = torch.nn.functional.pad(mel, (0, gap))
|
||||
|
||||
output_shape = (mel.shape[0], 1, mel.shape[-1] * spectrogram_compression_factor)
|
||||
return diffuser.p_sample_loop(diffusion_model, output_shape, model_kwargs={'spectrogram': mel, 'conditioning_input': conditioning_input})
|
||||
|
47
codes/scripts/audio/use_discrete_vocoder.py
Normal file
47
codes/scripts/audio/use_discrete_vocoder.py
Normal file
|
@ -0,0 +1,47 @@
|
|||
import argparse
|
||||
|
||||
import torchaudio
|
||||
|
||||
from data.audio.unsupervised_audio_dataset import load_audio
|
||||
from scripts.audio.speech_synthesis_utils import do_spectrogram_diffusion, \
|
||||
load_discrete_vocoder_diffuser, wav_to_mel, convert_mel_to_codes
|
||||
from utils.audio import plot_spectrogram
|
||||
from utils.util import load_model_from_config
|
||||
|
||||
|
||||
def roundtrip_vocoding(dvae, vocoder, diffuser, clip, cond=None, plot_spec=False):
|
||||
clip = clip.unsqueeze(0)
|
||||
if cond is None:
|
||||
cond = clip
|
||||
else:
|
||||
cond = cond.unsqueeze(0)
|
||||
mel = wav_to_mel(clip)
|
||||
if plot_spec:
|
||||
plot_spectrogram(mel[0].cpu())
|
||||
codes = convert_mel_to_codes(dvae, mel)
|
||||
return do_spectrogram_diffusion(vocoder, dvae, diffuser, codes, cond, spectrogram_compression_factor=128, plt_spec=plot_spec, am=mel)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-opt', type=str, help='Path to options YAML file used to train the diffusion model', default='X:\\dlas\\experiments\\train_diffusion_vocoder_with_cond_new_dvae.yml')
|
||||
parser.add_argument('-diffusion_model_name', type=str, help='Name of the diffusion model in opt.', default='generator')
|
||||
parser.add_argument('-diffusion_model_path', type=str, help='Name of the diffusion model in opt.', default='X:\\dlas\\experiments\\train_diffusion_vocoder_with_cond_new_dvae\\models\\6200_generator_ema.pth')
|
||||
parser.add_argument('-dvae_model_name', type=str, help='Name of the DVAE model in opt.', default='dvae')
|
||||
parser.add_argument('-input_file', type=str, help='Path to the input audio file.', default='Z:\\clips\\books1\\3_dchha04 Romancing The Tribes\\00036.wav')
|
||||
parser.add_argument('-cond', type=str, help='Path to the conditioning input audio file.', default=None)
|
||||
args = parser.parse_args()
|
||||
|
||||
print("Loading DVAE..")
|
||||
dvae = load_model_from_config(args.opt, args.dvae_model_name)
|
||||
print("Loading Diffusion Model..")
|
||||
diffusion = load_model_from_config(args.opt, args.diffusion_model_name, also_load_savepoint=False, load_path=args.diffusion_model_path)
|
||||
|
||||
print("Loading data..")
|
||||
diffuser = load_discrete_vocoder_diffuser()
|
||||
inp = load_audio(args.input_file, 22050).cuda()
|
||||
cond = None if args.cond is None else load_audio(args.cond, 22050).cuda()
|
||||
|
||||
print("Performing inference..")
|
||||
roundtripped = roundtrip_vocoding(dvae, diffusion, diffuser, inp, cond).cpu()
|
||||
torchaudio.save('roundtrip_vocoded_output.wav', roundtripped.squeeze(0), 10025)
|
|
@ -286,7 +286,7 @@ class Trainer:
|
|||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='../options/train_gpt_asr_mass_hf2.yml')
|
||||
parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='../options/validate_lrdvae_proper.yml')
|
||||
parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none', help='job launcher')
|
||||
parser.add_argument('--local_rank', type=int, default=0)
|
||||
args = parser.parse_args()
|
||||
|
|
|
@ -18,6 +18,9 @@ from torch.utils.checkpoint import checkpoint
|
|||
from torch._six import inf
|
||||
|
||||
import yaml
|
||||
|
||||
from trainer import networks
|
||||
|
||||
try:
|
||||
from yaml import CLoader as Loader, CDumper as Dumper
|
||||
except ImportError:
|
||||
|
@ -460,4 +463,24 @@ def clip_grad_norm(parameters: list, parameter_names: list, max_norm: float, nor
|
|||
if clip_coef < 1:
|
||||
for p in parameters:
|
||||
p.grad.detach().mul_(clip_coef.to(p.grad.device))
|
||||
return total_norm
|
||||
return total_norm
|
||||
|
||||
|
||||
Loader, Dumper = OrderedYaml()
|
||||
def load_model_from_config(cfg_file, model_name=None, dev='cuda', also_load_savepoint=True, load_path=None):
|
||||
with open(cfg_file, mode='r') as f:
|
||||
opt = yaml.load(f, Loader=Loader)
|
||||
if model_name is None:
|
||||
model_cfg = opt['networks'].values()
|
||||
model_name = next(opt['networks'].keys())
|
||||
else:
|
||||
model_cfg = opt['networks'][model_name]
|
||||
if 'which_model_G' in model_cfg.keys() and 'which_model' not in model_cfg.keys():
|
||||
model_cfg['which_model'] = model_cfg['which_model_G']
|
||||
model = networks.create_model(opt, model_cfg).to(dev)
|
||||
if also_load_savepoint and f'pretrain_model_{model_name}' in opt['path'].keys():
|
||||
assert load_path is None
|
||||
load_path = opt['path'][f'pretrain_model_{model_name}']
|
||||
if load_path is not None:
|
||||
model.load_state_dict(torch.load(load_path))
|
||||
return model
|
||||
|
|
Loading…
Reference in New Issue
Block a user