diff --git a/codes/models/audio/music/music_quantizer.py b/codes/models/audio/music/music_quantizer.py index 80cbbb2a..19429e41 100644 --- a/codes/models/audio/music/music_quantizer.py +++ b/codes/models/audio/music/music_quantizer.py @@ -5,6 +5,7 @@ from torch import nn import torch.nn.functional as F from models.arch_util import zero_module +from models.vqvae.vqvae import Quantize from trainer.networks import register_model from utils.util import checkpoint, ceil_multiple, print_network @@ -152,16 +153,21 @@ class Wav2Vec2GumbelVectorQuantizer(nn.Module): class MusicQuantizer(nn.Module): def __init__(self, inp_channels=256, inner_dim=1024, codevector_dim=1024, down_steps=2, max_gumbel_temperature=2.0, min_gumbel_temperature=.5, gumbel_temperature_decay=.999995, - codebook_size=16, codebook_groups=4): + codebook_size=16, codebook_groups=4, use_vqvae_quantizer=False): super().__init__() if not isinstance(inner_dim, list): inner_dim = [inner_dim // 2 ** x for x in range(down_steps+1)] self.max_gumbel_temperature = max_gumbel_temperature self.min_gumbel_temperature = min_gumbel_temperature self.gumbel_temperature_decay = gumbel_temperature_decay - self.quantizer = Wav2Vec2GumbelVectorQuantizer(inner_dim[0], codevector_dim=codevector_dim, - num_codevector_groups=codebook_groups, - num_codevectors_per_group=codebook_size) + self.use_vqvae_quantizer = use_vqvae_quantizer + if use_vqvae_quantizer: + self.quantizer = Quantize(inner_dim[0], codebook_size) + assert codevector_dim == inner_dim[0] # Because this quantizer doesn't support different sizes. + else: + self.quantizer = Wav2Vec2GumbelVectorQuantizer(inner_dim[0], codevector_dim=codevector_dim, + num_codevector_groups=codebook_groups, + num_codevectors_per_group=codebook_size) self.codebook_size = codebook_size self.codebook_groups = codebook_groups self.num_losses_record = [] @@ -209,8 +215,11 @@ class MusicQuantizer(nn.Module): h = self.down(mel) h = self.encoder(h) h = self.enc_norm(h.permute(0,2,1)) - codevectors, perplexity, codes = self.quantizer(h, return_probs=True) - diversity = (self.quantizer.num_codevectors - perplexity) / self.quantizer.num_codevectors + if self.use_vqvae_quantizer: + codevectors, diversity, codes = self.quantizer(h) + else: + codevectors, perplexity, codes = self.quantizer(h, return_probs=True) + diversity = (self.quantizer.num_codevectors - perplexity) / self.quantizer.num_codevectors self.log_codes(codes) h = self.decoder(codevectors.permute(0,2,1)) if return_decoder_latent: @@ -224,11 +233,12 @@ class MusicQuantizer(nn.Module): def log_codes(self, codes): if self.internal_step % 5 == 0: - codes = torch.argmax(codes, dim=-1) - ccodes = codes[:,:,0] - for j in range(1,codes.shape[-1]): - ccodes += codes[:,:,j] * self.codebook_size ** j - codes = ccodes + if not self.use_vqvae_quantizer: + codes = torch.argmax(codes, dim=-1) + ccodes = codes[:,:,0] + for j in range(1,codes.shape[-1]): + ccodes += codes[:,:,j] * self.codebook_size ** j + codes = ccodes codes = codes.flatten() l = codes.shape[0] i = self.code_ind if (self.codes.shape[0] - self.code_ind) > l else self.codes.shape[0] - l @@ -251,7 +261,7 @@ def register_music_quantizer(opt_net, opt): if __name__ == '__main__': - model = MusicQuantizer(inner_dim=[1024,1024,512], codevector_dim=1024, codebook_size=512, codebook_groups=2) + model = MusicQuantizer(inner_dim=[1024,1024,512], codevector_dim=1024, codebook_size=8192, codebook_groups=0, use_vqvae_quantizer=True) print_network(model) mel = torch.randn((2,256,782)) model(mel) \ No newline at end of file