handle unused encoder parameters

This commit is contained in:
James Betker 2022-06-17 09:37:07 -06:00
parent e025183bfb
commit 7ca532c7cc

View File

@ -525,10 +525,16 @@ class TransformerDiffusionWithCheaterLatent(nn.Module):
self.encoder = self.encoder.eval() self.encoder = self.encoder.eval()
def forward(self, x, timesteps, truth_mel, conditioning_input=None, disable_diversity=False, conditioning_free=False): def forward(self, x, timesteps, truth_mel, conditioning_input=None, disable_diversity=False, conditioning_free=False):
unused_parameters = []
encoder_grad_enabled = self.internal_step > self.freeze_encoder_until encoder_grad_enabled = self.internal_step > self.freeze_encoder_until
if not encoder_grad_enabled:
unused_parameters.extend(list(self.encoder.parameters()))
with torch.set_grad_enabled(encoder_grad_enabled): with torch.set_grad_enabled(encoder_grad_enabled):
proj = self.encoder(truth_mel).permute(0,2,1) proj = self.encoder(truth_mel).permute(0,2,1)
for p in unused_parameters:
proj = proj + p.mean() * 0
diff = self.diff(x, timesteps, codes=proj, conditioning_input=conditioning_input, conditioning_free=conditioning_free) diff = self.diff(x, timesteps, codes=proj, conditioning_input=conditioning_input, conditioning_free=conditioning_free)
return diff return diff