update
This commit is contained in:
parent
f9ebcf11d8
commit
51d1908e94
|
@ -4,6 +4,7 @@ import torch
|
||||||
import torch.nn as nn
|
import torch.nn as nn
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F
|
||||||
|
|
||||||
|
from models.arch_util import ResBlock
|
||||||
from models.audio.music.music_quantizer2 import MusicQuantizer2
|
from models.audio.music.music_quantizer2 import MusicQuantizer2
|
||||||
from models.diffusion.nn import timestep_embedding, normalization, zero_module, conv_nd, linear
|
from models.diffusion.nn import timestep_embedding, normalization, zero_module, conv_nd, linear
|
||||||
from models.diffusion.unet_diffusion import TimestepBlock
|
from models.diffusion.unet_diffusion import TimestepBlock
|
||||||
|
@ -45,13 +46,17 @@ class DietAttentionBlock(TimestepBlock):
|
||||||
self.rms_scale_norm = RMSScaleShiftNorm(in_dim)
|
self.rms_scale_norm = RMSScaleShiftNorm(in_dim)
|
||||||
self.proj = nn.Linear(in_dim, dim)
|
self.proj = nn.Linear(in_dim, dim)
|
||||||
self.attn = Attention(dim, heads=heads, causal=False, dropout=dropout)
|
self.attn = Attention(dim, heads=heads, causal=False, dropout=dropout)
|
||||||
self.ff = FeedForward(dim, in_dim, mult=1, dropout=dropout, zero_init_output=True)
|
self.res = ResBlock(channels=dim, dropout=dropout, out_channels=dim, dims=1)
|
||||||
|
self.ff = nn.Sequential(nn.LayerNorm(dim),
|
||||||
|
nn.GELU(),
|
||||||
|
FeedForward(dim, in_dim, mult=1, dropout=dropout, zero_init_output=True))
|
||||||
|
|
||||||
def forward(self, x, timestep_emb, rotary_emb):
|
def forward(self, x, timestep_emb, rotary_emb):
|
||||||
h = self.rms_scale_norm(x, norm_scale_shift_inp=timestep_emb)
|
h = self.rms_scale_norm(x, norm_scale_shift_inp=timestep_emb)
|
||||||
h = self.proj(h)
|
h = self.proj(h)
|
||||||
k, _, _, _ = checkpoint(self.attn, h, None, None, None, None, None, rotary_emb)
|
k, _, _, _ = checkpoint(self.attn, h, None, None, None, None, None, rotary_emb)
|
||||||
h = k + h
|
h = k + h
|
||||||
|
h = checkpoint(self.res, h.permute(0,2,1)).permute(0,2,1)
|
||||||
h = checkpoint(self.ff, h)
|
h = checkpoint(self.ff, h)
|
||||||
return h + x
|
return h + x
|
||||||
|
|
||||||
|
@ -227,6 +232,7 @@ class TransformerDiffusionWithQuantizer(nn.Module):
|
||||||
def get_grad_norm_parameter_groups(self):
|
def get_grad_norm_parameter_groups(self):
|
||||||
groups = {
|
groups = {
|
||||||
'attention_layers': list(itertools.chain.from_iterable([lyr.attn.parameters() for lyr in self.diff.layers])),
|
'attention_layers': list(itertools.chain.from_iterable([lyr.attn.parameters() for lyr in self.diff.layers])),
|
||||||
|
'res_layers': list(itertools.chain.from_iterable([lyr.res.parameters() for lyr in self.diff.layers])),
|
||||||
'ff_layers': list(itertools.chain.from_iterable([lyr.ff.parameters() for lyr in self.diff.layers])),
|
'ff_layers': list(itertools.chain.from_iterable([lyr.ff.parameters() for lyr in self.diff.layers])),
|
||||||
'quantizer_encoder': list(self.quantizer.encoder.parameters()),
|
'quantizer_encoder': list(self.quantizer.encoder.parameters()),
|
||||||
'quant_codebook': [self.quantizer.quantizer.codevectors],
|
'quant_codebook': [self.quantizer.quantizer.codevectors],
|
||||||
|
|
Loading…
Reference in New Issue
Block a user