We don't need that encoder either..

This commit is contained in:
James Betker 2022-06-19 23:24:42 -06:00
parent 56c4a00e71
commit 0e5a3f4712
2 changed files with 1 additions and 4 deletions

View File

@ -20,6 +20,7 @@ class PreprocessedMelDataset(torch.utils.data.Dataset):
if os.path.exists(cache_path):
self.paths = torch.load(cache_path)
else:
print("Building cache..")
path = Path(path)
self.paths = [str(p) for p in path.rglob("*.npz")]
torch.save(self.paths, cache_path)

View File

@ -216,10 +216,6 @@ class TransformerDiffusionWithConditioningEncoder(nn.Module):
self.internal_step = 0
self.diff = TransformerDiffusion(**kwargs)
self.conditioning_encoder = ConditioningEncoder(256, kwargs['model_channels'])
self.encoder = UpperEncoder(256, 1024, 256).eval()
for p in self.encoder.parameters():
p.DO_NOT_TRAIN = True
p.requires_grad = False
def forward(self, x, timesteps, true_cheater, conditioning_input=None, disable_diversity=False, conditioning_free=False):
cond = self.conditioning_encoder(true_cheater)