diff --git a/codes/data/base_unsupervised_image_dataset.py b/codes/data/base_unsupervised_image_dataset.py index 24a2c54d..1cf3db13 100644 --- a/codes/data/base_unsupervised_image_dataset.py +++ b/codes/data/base_unsupervised_image_dataset.py @@ -34,7 +34,7 @@ class BaseUnsupervisedImageDataset(data.Dataset): for c in chunks: c.reload(opt) else: - chunks = [ChunkWithReference(opt, d) for d in os.scandir(path) if d.is_dir()] + chunks = [ChunkWithReference(opt, d) for d in sorted(os.scandir(path), key=lambda e: e.name) if d.is_dir()] # Prune out chunks that have no images res = [] for c in chunks: diff --git a/codes/data/multi_frame_dataset.py b/codes/data/multi_frame_dataset.py index fb37ae3d..17cc43fb 100644 --- a/codes/data/multi_frame_dataset.py +++ b/codes/data/multi_frame_dataset.py @@ -23,6 +23,7 @@ class MultiFrameDataset(BaseUnsupervisedImageDataset): frames_needed -= 1 search_idx -= 1 else: + search_idx += 1 break # Now build num_frames starting from search_idx. @@ -62,7 +63,7 @@ class MultiFrameDataset(BaseUnsupervisedImageDataset): if __name__ == '__main__': opt = { 'name': 'amalgam', - 'paths': ['F:\\4k6k\\datasets\\ns_images\\vixen\\full_video_256_tiled_with_ref'], + 'paths': ['/content/fullvideo_256_tiled_test'], 'weights': [1], 'target_size': 128, 'force_multiple': 32, @@ -77,13 +78,14 @@ if __name__ == '__main__': ds = MultiFrameDataset(opt) import os os.makedirs("debug", exist_ok=True) - for i in range(100000, len(ds)): + for i in [3]: import random - o = ds[random.randint(0, 1000000)] + o = ds[i] k = 'GT' v = o[k] if 'path' not in k and 'center' not in k: fr, f, h, w = v.shape for j in range(fr): import torchvision - torchvision.utils.save_image(v[j].unsqueeze(0), "debug/%i_%s_%i.png" % (i, k, j)) \ No newline at end of file + base=osp.basename(o["GT_path"]) + torchvision.utils.save_image(v[j].unsqueeze(0), "debug/%i_%s_%i__%s.png" % (i, k, j, base))