forked from mrq/DL-Art-School
resolve more issues
This commit is contained in:
parent
3692c4cae3
commit
cc4c9faf9a
|
@ -157,6 +157,7 @@ class DiffusionTts(nn.Module):
|
|||
kernel_size=3,
|
||||
scale_factor=2,
|
||||
time_embed_dim_multiplier=4,
|
||||
freeze_main_net=False,
|
||||
efficient_convs=True, # Uses kernels with width of 1 in several places rather than 3.
|
||||
use_scale_shift_norm=True,
|
||||
# Parameters for regularization.
|
||||
|
@ -188,6 +189,7 @@ class DiffusionTts(nn.Module):
|
|||
self.unconditioned_percentage = unconditioned_percentage
|
||||
self.enable_fp16 = use_fp16
|
||||
self.alignment_size = 2 ** (len(channel_mult)+1)
|
||||
self.freeze_main_net = freeze_main_net
|
||||
padding = 1 if kernel_size == 3 else 2
|
||||
down_kernel = 1 if efficient_convs else 3
|
||||
|
||||
|
@ -379,7 +381,17 @@ class DiffusionTts(nn.Module):
|
|||
zero_module(conv_nd(dims, model_channels, out_channels, kernel_size, padding=padding)),
|
||||
)
|
||||
|
||||
if self.freeze_main_net:
|
||||
mains = [self.time_embed, self.contextual_embedder, self.conditioning_conv, self.unconditioned_embedding, self.conditioning_timestep_integrator,
|
||||
self.input_blocks, self.middle_block, self.output_blocks, self.out]
|
||||
for m in mains:
|
||||
for p in m.parameters():
|
||||
p.requires_grad = False
|
||||
p.DO_NOT_TRAIN = True
|
||||
|
||||
def get_grad_norm_parameter_groups(self):
|
||||
if self.freeze_main_net:
|
||||
return {}
|
||||
groups = {
|
||||
'minicoder': list(self.contextual_embedder.parameters()),
|
||||
'input_blocks': list(self.input_blocks.parameters()),
|
||||
|
|
|
@ -80,6 +80,7 @@ class DiffusionTtsFlat(nn.Module):
|
|||
self.contextual_embedder = nn.Sequential(nn.Conv1d(in_channels,model_channels,3,padding=1,stride=2),
|
||||
CheckpointedXTransformerEncoder(
|
||||
needs_permute=True,
|
||||
checkpoint=False, # This is repeatedly executed for many conditioning signals, which is incompatible with checkpointing & DDP.
|
||||
max_seq_len=-1,
|
||||
use_pos_emb=False,
|
||||
attn_layers=Encoder(
|
||||
|
@ -166,18 +167,16 @@ class DiffusionTtsFlat(nn.Module):
|
|||
conds = []
|
||||
for j in range(speech_conditioning_input.shape[1]):
|
||||
conds.append(self.contextual_embedder(speech_conditioning_input[:, j]))
|
||||
conds = torch.stack(conds, dim=1)
|
||||
cond_emb = conds.mean(dim=1)
|
||||
conds = torch.cat(conds, dim=-1)
|
||||
cond_emb = conds.mean(dim=-1).unsqueeze(-1)
|
||||
|
||||
if len(cond_emb.shape) == 3: # Just take the first element.
|
||||
cond_emb = cond_emb[:, :, 0]
|
||||
if is_latent(aligned_conditioning):
|
||||
code_emb = self.latent_converter(aligned_conditioning)
|
||||
unused_params.extend(list(self.code_converter.parameters()))
|
||||
else:
|
||||
code_emb = self.code_converter(aligned_conditioning)
|
||||
unused_params.extend(list(self.latent_converter.parameters()))
|
||||
cond_emb_spread = cond_emb.unsqueeze(-1).repeat(1, 1, code_emb.shape[-1])
|
||||
cond_emb_spread = cond_emb.repeat(1, 1, code_emb.shape[-1])
|
||||
code_emb = self.conditioning_conv(torch.cat([cond_emb_spread, code_emb], dim=1))
|
||||
|
||||
# Mask out the conditioning branch for whole batch elements, implementing something similar to classifier-free guidance.
|
||||
|
|
Loading…
Reference in New Issue
Block a user