more oversights fixed because I've been using a cached dataloader forever now and didn't catch these problems

This commit is contained in:
mrq 2023-08-24 10:25:33 -05:00
parent 5873c27f1a
commit 22904a8639
2 changed files with 4 additions and 2 deletions

View File

@ -138,8 +138,8 @@ class Dataset(_Dataset):
self.paths = paths
self.phone_symmap = phone_symmap or self._get_phone_symmap()
self.spkr_symmap = spkr_symmap or self._get_spkr_symmap()
self.task_symmap = get_task_symmap or self._get_task_symmap()
self.spkr_symmap = self._get_spkr_symmap()
self.task_symmap = self._get_task_symmap()
self.training = training
# assert len(self.phone_symmap) < 256, "Unique token count should be [0,255] to fit within uint8"

View File

@ -26,6 +26,7 @@ def _load_encodec_model(device="cuda", levels=cfg.models.max_levels):
assert cfg.sample_rate == 24_000
# too lazy to un-if ladder this shit
bandwidth_id = 6.0
if levels == 2:
bandwidth_id = 1.5
elif levels == 4:
@ -50,6 +51,7 @@ def _load_vocos_model(device="cuda", levels=cfg.models.max_levels):
model = model.to(device)
# too lazy to un-if ladder this shit
bandwidth_id = 2
if levels == 2:
bandwidth_id = 0
elif levels == 4: