diff --git a/codes/models/audio/music/transformer_diffusion8.py b/codes/models/audio/music/transformer_diffusion8.py index 0037ec5c..8521437b 100644 --- a/codes/models/audio/music/transformer_diffusion8.py +++ b/codes/models/audio/music/transformer_diffusion8.py @@ -196,18 +196,18 @@ class TransformerDiffusion(nn.Module): class TransformerDiffusionWithQuantizer(nn.Module): - def __init__(self, quantizer_dims=[1024], train_quantizer_reconstruction_until=-1, freeze_quantizer_until=10000, **kwargs): + def __init__(self, freeze_quantizer_until=20000, quantizer_dims=[1024], no_reconstruction=True, **kwargs): super().__init__() + self.internal_step = 0 self.freeze_quantizer_until = freeze_quantizer_until - self.train_quantizer_reconstruction_until = train_quantizer_reconstruction_until self.diff = TransformerDiffusion(**kwargs) self.quantizer = MusicQuantizer2(inp_channels=kwargs['in_channels'], inner_dim=quantizer_dims, - codevector_dim=quantizer_dims[0], codebook_size=256, - codebook_groups=2, max_gumbel_temperature=4, min_gumbel_temperature=.5) + codevector_dim=quantizer_dims[0], checkpoint=False, + codebook_size=256, codebook_groups=2, + max_gumbel_temperature=4, min_gumbel_temperature=.5) self.quantizer.quantizer.temperature = self.quantizer.min_gumbel_temperature - if train_quantizer_reconstruction_until == -1: - # We won't be using the upsampler, so delete it. + if no_reconstruction: del self.quantizer.up def update_for_step(self, step, *args): @@ -219,38 +219,30 @@ class TransformerDiffusionWithQuantizer(nn.Module): ) def forward(self, x, timesteps, truth_mel, conditioning_input=None, disable_diversity=False, conditioning_free=False): - diff_disabled = self.internal_step < self.train_quantizer_reconstruction_until - if diff_disabled: - mse, diversity_loss = self.quantizer(truth_mel) - - # Use the diff parameters so DDP doesn't give us grief. - unused = 0 - for p in self.diff.parameters(): - unused = unused + p.mean() * 0 - mse = mse + unused - return x.repeat(1,2,1), diversity_loss, mse - - quant_grad_enabled = self.internal_step >= self.freeze_quantizer_until - with torch.set_grad_enabled(quant_grad_enabled): - proj, diversity_loss = self.quantizer(truth_mel, return_decoder_latent=True) - proj = proj.permute(0,2,1) + mse, diversity_loss, proj = self.quantizer(truth_mel, return_decoder_latent=True) + proj = proj.permute(0,2,1) + quant_grad_enabled = self.internal_step > self.freeze_quantizer_until if not quant_grad_enabled: + proj = proj.detach() # Make sure this does not cause issues in DDP by explicitly using the parameters for nothing. unused = 0 for p in self.quantizer.parameters(): unused = unused + p.mean() * 0 proj = proj + unused - diversity_loss = diversity_loss * 0 - diff = self.diff(x, timesteps, codes=proj, conditioning_input=conditioning_input, conditioning_free=conditioning_free) + diff = self.diff(x, timesteps, codes=proj, conditioning_input=conditioning_input, + conditioning_free=conditioning_free) + if disable_diversity: return diff - return diff, diversity_loss, None + if mse is None: + return diff, diversity_loss + return diff, diversity_loss, mse def get_debug_values(self, step, __): if self.quantizer.total_codes > 0: - return {'histogram_codes': self.quantizer.codes[:self.quantizer.total_codes], + return {'histogram_quant_codes': self.quantizer.codes[:self.quantizer.total_codes], 'gumbel_temperature': self.quantizer.quantizer.temperature} else: return {} @@ -330,17 +322,24 @@ def register_transformer_diffusion8_with_ar_prior(opt_net, opt): def test_quant_model(): clip = torch.randn(2, 100, 401) ts = torch.LongTensor([600, 600]) - model = TransformerDiffusionWithQuantizer(in_channels=100, model_channels=2048, block_channels=1024, prenet_channels=1024, - input_vec_dim=1024, num_layers=16, prenet_layers=6, quantizer_dims=[1024,896,768,512], - train_quantizer_reconstruction_until=-1) - model.get_grad_norm_parameter_groups() + model = TransformerDiffusionWithQuantizer(in_channels=100, out_channels=200, quantizer_dims=[1024,768,512,384], + model_channels=2048, block_channels=1024, prenet_channels=1024, + input_vec_dim=1024, num_layers=16, prenet_layers=6, + no_reconstruction=False) + #model.get_grad_norm_parameter_groups() + #quant_weights = torch.load('D:\\dlas\\experiments\\train_music_quant_r4\\models\\5000_generator.pth') + #diff_weights = torch.load('X:\\dlas\\experiments\\train_music_diffusion_tfd5\\models\\48000_generator_ema.pth') + #model.quantizer.load_state_dict(quant_weights, strict=False) + #model.diff.load_state_dict(diff_weights) + + #torch.save(model.state_dict(), 'sample.pth') print_network(model) o = model(clip, ts, clip) def test_ar_model(): - clip = torch.randn(2, 256, 400) + clip = torch.randn(2, 256, 401) cond = torch.randn(2, 256, 400) ts = torch.LongTensor([600, 600]) model = TransformerDiffusionWithARPrior(model_channels=2048, block_channels=1024, prenet_channels=1024, @@ -358,7 +357,8 @@ def test_ar_model(): model.diff.load_state_dict(pruned_diff_weights, strict=False) torch.save(model.state_dict(), 'sample.pth') - model(clip, ts, cond) + model(clip, ts, cond, conditioning_input=cond) + if __name__ == '__main__':