forked from mrq/DL-Art-School
Support legacy vqvae quantizer in music_quantizer
This commit is contained in:
parent
4819f15521
commit
8f8b189025
|
@ -5,6 +5,7 @@ from torch import nn
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F
|
||||||
|
|
||||||
from models.arch_util import zero_module
|
from models.arch_util import zero_module
|
||||||
|
from models.vqvae.vqvae import Quantize
|
||||||
from trainer.networks import register_model
|
from trainer.networks import register_model
|
||||||
from utils.util import checkpoint, ceil_multiple, print_network
|
from utils.util import checkpoint, ceil_multiple, print_network
|
||||||
|
|
||||||
|
@ -152,16 +153,21 @@ class Wav2Vec2GumbelVectorQuantizer(nn.Module):
|
||||||
class MusicQuantizer(nn.Module):
|
class MusicQuantizer(nn.Module):
|
||||||
def __init__(self, inp_channels=256, inner_dim=1024, codevector_dim=1024, down_steps=2,
|
def __init__(self, inp_channels=256, inner_dim=1024, codevector_dim=1024, down_steps=2,
|
||||||
max_gumbel_temperature=2.0, min_gumbel_temperature=.5, gumbel_temperature_decay=.999995,
|
max_gumbel_temperature=2.0, min_gumbel_temperature=.5, gumbel_temperature_decay=.999995,
|
||||||
codebook_size=16, codebook_groups=4):
|
codebook_size=16, codebook_groups=4, use_vqvae_quantizer=False):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
if not isinstance(inner_dim, list):
|
if not isinstance(inner_dim, list):
|
||||||
inner_dim = [inner_dim // 2 ** x for x in range(down_steps+1)]
|
inner_dim = [inner_dim // 2 ** x for x in range(down_steps+1)]
|
||||||
self.max_gumbel_temperature = max_gumbel_temperature
|
self.max_gumbel_temperature = max_gumbel_temperature
|
||||||
self.min_gumbel_temperature = min_gumbel_temperature
|
self.min_gumbel_temperature = min_gumbel_temperature
|
||||||
self.gumbel_temperature_decay = gumbel_temperature_decay
|
self.gumbel_temperature_decay = gumbel_temperature_decay
|
||||||
self.quantizer = Wav2Vec2GumbelVectorQuantizer(inner_dim[0], codevector_dim=codevector_dim,
|
self.use_vqvae_quantizer = use_vqvae_quantizer
|
||||||
num_codevector_groups=codebook_groups,
|
if use_vqvae_quantizer:
|
||||||
num_codevectors_per_group=codebook_size)
|
self.quantizer = Quantize(inner_dim[0], codebook_size)
|
||||||
|
assert codevector_dim == inner_dim[0] # Because this quantizer doesn't support different sizes.
|
||||||
|
else:
|
||||||
|
self.quantizer = Wav2Vec2GumbelVectorQuantizer(inner_dim[0], codevector_dim=codevector_dim,
|
||||||
|
num_codevector_groups=codebook_groups,
|
||||||
|
num_codevectors_per_group=codebook_size)
|
||||||
self.codebook_size = codebook_size
|
self.codebook_size = codebook_size
|
||||||
self.codebook_groups = codebook_groups
|
self.codebook_groups = codebook_groups
|
||||||
self.num_losses_record = []
|
self.num_losses_record = []
|
||||||
|
@ -209,8 +215,11 @@ class MusicQuantizer(nn.Module):
|
||||||
h = self.down(mel)
|
h = self.down(mel)
|
||||||
h = self.encoder(h)
|
h = self.encoder(h)
|
||||||
h = self.enc_norm(h.permute(0,2,1))
|
h = self.enc_norm(h.permute(0,2,1))
|
||||||
codevectors, perplexity, codes = self.quantizer(h, return_probs=True)
|
if self.use_vqvae_quantizer:
|
||||||
diversity = (self.quantizer.num_codevectors - perplexity) / self.quantizer.num_codevectors
|
codevectors, diversity, codes = self.quantizer(h)
|
||||||
|
else:
|
||||||
|
codevectors, perplexity, codes = self.quantizer(h, return_probs=True)
|
||||||
|
diversity = (self.quantizer.num_codevectors - perplexity) / self.quantizer.num_codevectors
|
||||||
self.log_codes(codes)
|
self.log_codes(codes)
|
||||||
h = self.decoder(codevectors.permute(0,2,1))
|
h = self.decoder(codevectors.permute(0,2,1))
|
||||||
if return_decoder_latent:
|
if return_decoder_latent:
|
||||||
|
@ -224,11 +233,12 @@ class MusicQuantizer(nn.Module):
|
||||||
|
|
||||||
def log_codes(self, codes):
|
def log_codes(self, codes):
|
||||||
if self.internal_step % 5 == 0:
|
if self.internal_step % 5 == 0:
|
||||||
codes = torch.argmax(codes, dim=-1)
|
if not self.use_vqvae_quantizer:
|
||||||
ccodes = codes[:,:,0]
|
codes = torch.argmax(codes, dim=-1)
|
||||||
for j in range(1,codes.shape[-1]):
|
ccodes = codes[:,:,0]
|
||||||
ccodes += codes[:,:,j] * self.codebook_size ** j
|
for j in range(1,codes.shape[-1]):
|
||||||
codes = ccodes
|
ccodes += codes[:,:,j] * self.codebook_size ** j
|
||||||
|
codes = ccodes
|
||||||
codes = codes.flatten()
|
codes = codes.flatten()
|
||||||
l = codes.shape[0]
|
l = codes.shape[0]
|
||||||
i = self.code_ind if (self.codes.shape[0] - self.code_ind) > l else self.codes.shape[0] - l
|
i = self.code_ind if (self.codes.shape[0] - self.code_ind) > l else self.codes.shape[0] - l
|
||||||
|
@ -251,7 +261,7 @@ def register_music_quantizer(opt_net, opt):
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
model = MusicQuantizer(inner_dim=[1024,1024,512], codevector_dim=1024, codebook_size=512, codebook_groups=2)
|
model = MusicQuantizer(inner_dim=[1024,1024,512], codevector_dim=1024, codebook_size=8192, codebook_groups=0, use_vqvae_quantizer=True)
|
||||||
print_network(model)
|
print_network(model)
|
||||||
mel = torch.randn((2,256,782))
|
mel = torch.randn((2,256,782))
|
||||||
model(mel)
|
model(mel)
|
Loading…
Reference in New Issue
Block a user