From 602f86bfa46ef965e50591dd75fa969622ff6fa9 Mon Sep 17 00:00:00 2001 From: James Betker Date: Wed, 6 May 2020 17:25:48 -0600 Subject: [PATCH] Random config changes --- .../test/test_ESRGAN_adrianna_full.yml | 2 +- .../options/train/train_ESRGAN_blacked_xl.yml | 22 ++++++++++--------- codes/options/train/train_ESRGAN_res.yml | 8 ++++--- codes/train.py | 4 ++-- 4 files changed, 20 insertions(+), 16 deletions(-) diff --git a/codes/options/test/test_ESRGAN_adrianna_full.yml b/codes/options/test/test_ESRGAN_adrianna_full.yml index 8d64c2b9..c006f68a 100644 --- a/codes/options/test/test_ESRGAN_adrianna_full.yml +++ b/codes/options/test/test_ESRGAN_adrianna_full.yml @@ -10,7 +10,7 @@ datasets: test_1: # the 1st test dataset name: set5 mode: LQ - batch_size: 16 + batch_size: 3 dataroot_LQ: ..\..\datasets\adrianna\full_extract #### network structures diff --git a/codes/options/train/train_ESRGAN_blacked_xl.yml b/codes/options/train/train_ESRGAN_blacked_xl.yml index 39467615..871c72a7 100644 --- a/codes/options/train/train_ESRGAN_blacked_xl.yml +++ b/codes/options/train/train_ESRGAN_blacked_xl.yml @@ -12,12 +12,12 @@ datasets: train: name: vixcloseup mode: LQGT - dataroot_GT: K:\4k6k\4k_closeup\hr - dataroot_LQ: K:\4k6k\4k_closeup\lr_corrupted + dataroot_GT: E:\4k6k\datasets\4k_closeup\hr + dataroot_LQ: [E:\4k6k\datasets\4k_closeup\lr_corrupted, E:\4k6k\datasets\4k_closeup\lr_c_blurred] doCrop: false use_shuffle: true - n_workers: 10 # per GPU - batch_size: 16 + n_workers: 12 # per GPU + batch_size: 15 target_size: 256 color: RGB val: @@ -30,16 +30,18 @@ datasets: network_G: which_model_G: ResGen nf: 256 + nb_denoiser: 20 + nb_upsampler: 10 network_D: which_model_D: discriminator_resnet_passthrough - nf: 42 + nf: 64 #### path path: - #pretrain_model_G: ../experiments/blacked_fix_and_upconv_xl_part1/models/3000_G.pth + #pretrain_model_G: ../experiments/pretrained_resnet_G.pth #pretrain_model_D: ~ strict_load: true - resume_state: ~ + resume_state: ../experiments/blacked_fix_and_upconv_xl/training_state/2500.state #### training settings: learning rate scheme, loss train: @@ -57,7 +59,7 @@ train: warmup_iter: -1 # no warm up lr_steps: [20000, 40000, 50000, 60000] lr_gamma: 0.5 - mega_batch_factor: 2 + mega_batch_factor: 3 pixel_criterion: l1 pixel_weight: !!float 1e-2 @@ -69,8 +71,8 @@ train: gan_type: gan # gan | ragan gan_weight: !!float 1e-2 - D_update_ratio: 1 - D_init_iters: -1 + D_update_ratio: 2 + D_init_iters: 0 manual_seed: 10 val_freq: !!float 5e2 diff --git a/codes/options/train/train_ESRGAN_res.yml b/codes/options/train/train_ESRGAN_res.yml index 80f4249b..0e42b883 100644 --- a/codes/options/train/train_ESRGAN_res.yml +++ b/codes/options/train/train_ESRGAN_res.yml @@ -16,7 +16,7 @@ datasets: dataroot_LQ: E:/4k6k/datasets/div2k/DIV2K800_sub_bicLRx4 use_shuffle: true - n_workers: 10 # per GPU + n_workers: 0 # per GPU batch_size: 24 target_size: 128 use_flip: true @@ -32,6 +32,8 @@ datasets: network_G: which_model_G: ResGen nf: 256 + nb_denoiser: 2 + nb_upsampler: 28 network_D: which_model_D: discriminator_resnet_passthrough nf: 42 @@ -41,7 +43,7 @@ path: #pretrain_model_G: ../experiments/blacked_fix_and_upconv_xl_part1/models/3000_G.pth #pretrain_model_D: ~ strict_load: true - resume_state: ~ + resume_state: ../experiments/esrgan_res/training_state/15500.state #### training settings: learning rate scheme, loss train: @@ -71,7 +73,7 @@ train: gan_type: gan # gan | ragan gan_weight: !!float 1e-2 - D_update_ratio: 1 + D_update_ratio: 2 D_init_iters: -1 manual_seed: 10 diff --git a/codes/train.py b/codes/train.py index ba2c2e45..18691a4d 100644 --- a/codes/train.py +++ b/codes/train.py @@ -30,7 +30,7 @@ def init_dist(backend='nccl', **kwargs): def main(): #### options parser = argparse.ArgumentParser() - parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='options/train/train_ESRGAN_res.yml') + parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='options/train/train_ESRGAN_blacked_xl.yml') parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) @@ -147,7 +147,7 @@ def main(): current_step = resume_state['iter'] model.resume_training(resume_state) # handle optimizers and schedulers else: - current_step = 0 + current_step = -1 start_epoch = 0 #### training