More cleaning

This commit is contained in:
James Betker 2022-03-16 12:05:56 -06:00
parent d186414566
commit 95ea0a592f
32 changed files with 38 additions and 66 deletions

View File

@ -33,33 +33,33 @@ def create_dataset(dataset_opt, return_collate=False):
# datasets for image restoration # datasets for image restoration
if mode == 'fullimage': if mode == 'fullimage':
from data.full_image_dataset import FullImageDataset as D from data.images.full_image_dataset import FullImageDataset as D
elif mode == 'single_image_extensible': elif mode == 'single_image_extensible':
from data.single_image_dataset import SingleImageDataset as D from data.images.single_image_dataset import SingleImageDataset as D
elif mode == 'multi_frame_extensible': elif mode == 'multi_frame_extensible':
from data.multi_frame_dataset import MultiFrameDataset as D from data.images.multi_frame_dataset import MultiFrameDataset as D
elif mode == 'combined': elif mode == 'combined':
from data.combined_dataset import CombinedDataset as D from data.combined_dataset import CombinedDataset as D
elif mode == 'multiscale': elif mode == 'multiscale':
from data.multiscale_dataset import MultiScaleDataset as D from data.images.multiscale_dataset import MultiScaleDataset as D
elif mode == 'paired_frame': elif mode == 'paired_frame':
from data.paired_frame_dataset import PairedFrameDataset as D from data.images.paired_frame_dataset import PairedFrameDataset as D
elif mode == 'stylegan2': elif mode == 'stylegan2':
from data.stylegan2_dataset import Stylegan2Dataset as D from data.images.stylegan2_dataset import Stylegan2Dataset as D
elif mode == 'imagefolder': elif mode == 'imagefolder':
from data.image_folder_dataset import ImageFolderDataset as D from data.images.image_folder_dataset import ImageFolderDataset as D
elif mode == 'torch_dataset': elif mode == 'torch_dataset':
from data.torch_dataset import TorchDataset as D from data.torch_dataset import TorchDataset as D
elif mode == 'byol_dataset': elif mode == 'byol_dataset':
from data.byol_attachment import ByolDatasetWrapper as D from data.images.byol_attachment import ByolDatasetWrapper as D
elif mode == 'byol_structured_dataset': elif mode == 'byol_structured_dataset':
from data.byol_attachment import StructuredCropDatasetWrapper as D from data.images.byol_attachment import StructuredCropDatasetWrapper as D
elif mode == 'random_aug_wrapper': elif mode == 'random_aug_wrapper':
from data.byol_attachment import DatasetRandomAugWrapper as D from data.images.byol_attachment import DatasetRandomAugWrapper as D
elif mode == 'random_dataset': elif mode == 'random_dataset':
from data.random_dataset import RandomDataset as D from data.images.random_dataset import RandomDataset as D
elif mode == 'zipfile': elif mode == 'zipfile':
from data.zip_file_dataset import ZipFileDataset as D from data.images.zip_file_dataset import ZipFileDataset as D
elif mode == 'nv_tacotron': elif mode == 'nv_tacotron':
from data.audio.nv_tacotron_dataset import TextWavLoader as D from data.audio.nv_tacotron_dataset import TextWavLoader as D
from data.audio.nv_tacotron_dataset import TextMelCollate as C from data.audio.nv_tacotron_dataset import TextMelCollate as C

View File

View File

@ -1,7 +1,7 @@
import torch import torch
from torch.utils import data from torch.utils import data
from data.image_corruptor import ImageCorruptor from data.images.image_corruptor import ImageCorruptor
from data.chunk_with_reference import ChunkWithReference from data.images.chunk_with_reference import ChunkWithReference
import os import os
import cv2 import cv2
import numpy as np import numpy as np

View File

@ -1,24 +1,19 @@
import functools import functools
import glob
import itertools
import random import random
import cv2 import cv2
import kornia
import numpy as np import numpy as np
import pytorch_ssim
import torch import torch
import os
import torchvision import torchvision
from torch.utils.data import DataLoader from torch.utils.data import DataLoader
from torchvision.transforms import Normalize, CenterCrop from torchvision.transforms import Normalize
from tqdm import tqdm from tqdm import tqdm
from data import util from data import util
# Builds a dataset created from a simple folder containing a list of training/test/validation images. # Builds a dataset created from a simple folder containing a list of training/test/validation images.
from data.image_corruptor import ImageCorruptor, kornia_color_jitter_numpy from data.images.image_corruptor import ImageCorruptor, kornia_color_jitter_numpy
from data.image_label_parser import VsNetImageLabeler from data.images.image_label_parser import VsNetImageLabeler
from utils.util import opt_get from utils.util import opt_get

View File

@ -1,11 +1,3 @@
import glob
import itertools
import random
import cv2
import kornia
import numpy as np
import pytorch_ssim
import torch import torch
import os import os
@ -13,14 +5,10 @@ import torchvision
from PIL import Image from PIL import Image
from torch.utils.data import DataLoader, Dataset from torch.utils.data import DataLoader, Dataset
from torchvision import transforms from torchvision import transforms
from torchvision.transforms import Normalize
from tqdm import tqdm from tqdm import tqdm
from data import util
# Builds a dataset created from a simple folder containing a list of training/test/validation images. # Builds a dataset created from a simple folder containing a list of training/test/validation images.
from data.image_corruptor import ImageCorruptor
from data.image_label_parser import VsNetImageLabeler
from utils.util import opt_get
class ImagePairWithCorrespondingPointsDataset(Dataset): class ImagePairWithCorrespondingPointsDataset(Dataset):
@ -70,7 +58,7 @@ if __name__ == '__main__':
'path': 'F:\\dlas\\codes\\scripts\\ui\\image_pair_labeler\\results', 'path': 'F:\\dlas\\codes\\scripts\\ui\\image_pair_labeler\\results',
'size': 256 'size': 256
} }
output_path = '.' output_path = '..'
ds = DataLoader(ImagePairWithCorrespondingPointsDataset(opt), shuffle=True, num_workers=0) ds = DataLoader(ImagePairWithCorrespondingPointsDataset(opt), shuffle=True, num_workers=0)
for i, d in tqdm(enumerate(ds)): for i, d in tqdm(enumerate(ds)):

View File

@ -1,4 +1,4 @@
from data.base_unsupervised_image_dataset import BaseUnsupervisedImageDataset from data.images.base_unsupervised_image_dataset import BaseUnsupervisedImageDataset
import numpy as np import numpy as np
import torch import torch
from bisect import bisect_left from bisect import bisect_left

View File

@ -4,13 +4,9 @@ import cv2
import torch import torch
import torch.utils.data as data import torch.utils.data as data
import data.util as util import data.util as util
from PIL import Image, ImageOps
from io import BytesIO
import torchvision.transforms.functional as F
# Reads full-quality images and pulls tiles at regular zoom intervals from them. Only usable for training purposes. # Reads full-quality images and pulls tiles at regular zoom intervals from them. Only usable for training purposes.
from data.image_corruptor import ImageCorruptor from data.images.image_corruptor import ImageCorruptor
# Selects the smallest dimension from the image and crops it randomly so the other dimension matches. The cropping # Selects the smallest dimension from the image and crops it randomly so the other dimension matches. The cropping

View File

@ -1,4 +1,4 @@
from data.base_unsupervised_image_dataset import BaseUnsupervisedImageDataset from data.images.base_unsupervised_image_dataset import BaseUnsupervisedImageDataset
import numpy as np import numpy as np
import torch import torch
from bisect import bisect_left from bisect import bisect_left

View File

@ -2,8 +2,7 @@ import random
from bisect import bisect_left from bisect import bisect_left
import numpy as np import numpy as np
import torch import torch
from torch.utils import data from data.images.base_unsupervised_image_dataset import BaseUnsupervisedImageDataset
from data.base_unsupervised_image_dataset import BaseUnsupervisedImageDataset
# Builds a dataset composed of a set of folders. Each folder represents a single high resolution image that has been # Builds a dataset composed of a set of folders. Each folder represents a single high resolution image that has been

View File

@ -1,10 +1,9 @@
import torch
from torch.utils.data import Dataset from torch.utils.data import Dataset
import torchvision.transforms as T import torchvision.transforms as T
from torchvision import datasets from torchvision import datasets
# Wrapper for basic pytorch datasets which re-wraps them into a format usable by ExtensibleTrainer. # Wrapper for basic pytorch datasets which re-wraps them into a format usable by ExtensibleTrainer.
from data.cifar import CIFAR100, CIFAR10 from data.images.cifar import CIFAR100, CIFAR10
from utils.util import opt_get from utils.util import opt_get

View File

@ -9,7 +9,7 @@ import torchvision
from kornia import filters from kornia import filters
from torch import nn from torch import nn
from data.byol_attachment import RandomApply from data.images.byol_attachment import RandomApply
from trainer.networks import register_model, create_model from trainer.networks import register_model, create_model
from utils.util import checkpoint, opt_get from utils.util import checkpoint, opt_get

View File

@ -4,7 +4,7 @@ import torch
import torch.nn.functional as F import torch.nn.functional as F
from torch import nn from torch import nn
from data.byol_attachment import reconstructed_shared_regions from data.images.byol_attachment import reconstructed_shared_regions
from models.image_latents.byol.byol_model_wrapper import singleton, EMA, get_module_device, set_requires_grad, \ from models.image_latents.byol.byol_model_wrapper import singleton, EMA, get_module_device, set_requires_grad, \
update_moving_average update_moving_average
from trainer.networks import create_model, register_model from trainer.networks import create_model, register_model

View File

@ -9,7 +9,7 @@ from torch.utils.data import DataLoader
from torchvision.transforms import ToTensor from torchvision.transforms import ToTensor
from tqdm import tqdm from tqdm import tqdm
from data.image_folder_dataset import ImageFolderDataset from data.images.image_folder_dataset import ImageFolderDataset
from models.classifiers.resnet_with_checkpointing import resnet50 from models.classifiers.resnet_with_checkpointing import resnet50
# Computes the structural euclidean distance between [x,y]. "Structural" here means the [h,w] dimensions are preserved # Computes the structural euclidean distance between [x,y]. "Structural" here means the [h,w] dimensions are preserved

View File

@ -9,7 +9,7 @@ from torch.utils.data import DataLoader
from torchvision.transforms import ToTensor, Normalize from torchvision.transforms import ToTensor, Normalize
from tqdm import tqdm from tqdm import tqdm
from data.image_folder_dataset import ImageFolderDataset from data.images.image_folder_dataset import ImageFolderDataset
from models.segformer.segformer import Segformer from models.segformer.segformer import Segformer
# Computes the structural euclidean distance between [x,y]. "Structural" here means the [h,w] dimensions are preserved # Computes the structural euclidean distance between [x,y]. "Structural" here means the [h,w] dimensions are preserved

View File

@ -10,7 +10,7 @@ from torchvision.transforms import ToTensor, Resize
from tqdm import tqdm from tqdm import tqdm
import numpy as np import numpy as np
from data.image_folder_dataset import ImageFolderDataset from data.images.image_folder_dataset import ImageFolderDataset
from models.image_latents.spinenet_arch import SpineNet from models.image_latents.spinenet_arch import SpineNet

View File

@ -13,7 +13,7 @@ from tqdm import tqdm
import utils.options as option import utils.options as option
import utils import utils
from data.image_corruptor import ImageCorruptor from data.images.image_corruptor import ImageCorruptor
from trainer.ExtensibleTrainer import ExtensibleTrainer from trainer.ExtensibleTrainer import ExtensibleTrainer
from utils import util from utils import util

View File

@ -11,7 +11,7 @@ import torch
import torchvision import torchvision
from PIL import ImageTk from PIL import ImageTk
from data.image_label_parser import VsNetImageLabeler from data.images.image_label_parser import VsNetImageLabeler
from scripts.ui.image_labeler.pretrained_image_patch_classifier import PretrainedImagePatchClassifier from scripts.ui.image_labeler.pretrained_image_patch_classifier import PretrainedImagePatchClassifier
# Globals used to define state that event handlers might operate on. # Globals used to define state that event handlers might operate on.

View File

@ -1,6 +1,6 @@
import orjson import orjson
from data.image_label_parser import VsNetImageLabeler from data.images.image_label_parser import VsNetImageLabeler
# Translates from the label JSON output of the VS.NET UI to something more compact and usable. # Translates from the label JSON output of the VS.NET UI to something more compact and usable.

View File

@ -3,7 +3,7 @@ import shutil
from torch.utils.data import DataLoader from torch.utils.data import DataLoader
from data.single_image_dataset import SingleImageDataset from data.images.single_image_dataset import SingleImageDataset
from tqdm import tqdm from tqdm import tqdm
import torch import torch

View File

@ -5,7 +5,7 @@ import torch
import torchvision import torchvision
from torch.cuda.amp import autocast from torch.cuda.amp import autocast
from data.multiscale_dataset import build_multiscale_patch_index_map from data.images.multiscale_dataset import build_multiscale_patch_index_map
from trainer.inject import Injector from trainer.inject import Injector
from trainer.losses import extract_params_from_state from trainer.losses import extract_params_from_state
import os.path as osp import os.path as osp

View File

@ -5,7 +5,7 @@ from tqdm import tqdm
import trainer.eval.evaluator as evaluator import trainer.eval.evaluator as evaluator
# Evaluate how close to true Gaussian a flow network predicts in a "normal" pass given a LQ/HQ image pair. # Evaluate how close to true Gaussian a flow network predicts in a "normal" pass given a LQ/HQ image pair.
from data.image_folder_dataset import ImageFolderDataset from data.images.image_folder_dataset import ImageFolderDataset
from models.image_generation.srflow import GaussianDiag from models.image_generation.srflow import GaussianDiag

View File

@ -1,18 +1,13 @@
import os
import torch import torch
import os.path as osp
import torchvision
from torch.nn import MSELoss from torch.nn import MSELoss
from torch.utils.data import DataLoader from torch.utils.data import DataLoader
from tqdm import tqdm from tqdm import tqdm
import trainer.eval.evaluator as evaluator import trainer.eval.evaluator as evaluator
from pytorch_fid import fid_score
from data.image_pair_with_corresponding_points_dataset import ImagePairWithCorrespondingPointsDataset from data.images.image_pair_with_corresponding_points_dataset import ImagePairWithCorrespondingPointsDataset
from models.segformer.segformer import Segformer from models.segformer.segformer import Segformer
from utils.util import opt_get
# Uses two datasets: a "similar" and "dissimilar" dataset, each of which contains pairs of images and similar/dissimilar # Uses two datasets: a "similar" and "dissimilar" dataset, each of which contains pairs of images and similar/dissimilar
# points in those datasets. Uses the provided network to compute a latent vector for both similar and dissimilar. # points in those datasets. Uses the provided network to compute a latent vector for both similar and dissimilar.

View File

@ -11,7 +11,7 @@ from pytorch_fid import fid_score
# Evaluate that feeds a LR structure into the input, then calculates a FID score on the results added to # Evaluate that feeds a LR structure into the input, then calculates a FID score on the results added to
# the interpolated LR structure. # the interpolated LR structure.
from data.stylegan2_dataset import Stylegan2Dataset from data.images.stylegan2_dataset import Stylegan2Dataset
class SrStyleTransferEvaluator(evaluator.Evaluator): class SrStyleTransferEvaluator(evaluator.Evaluator):