Config updates

This commit is contained in:
James Betker 2020-05-13 09:20:28 -06:00
parent fc3ec8e3a2
commit 037a5a3cdb
7 changed files with 50 additions and 182 deletions

View File

@ -13,7 +13,7 @@ datasets:
mode: LQ
batch_size: 13
dataroot_LQ: ..\..\datasets\kayden\images
start_at: 10000
start_at: 0
#### network structures
network_G:

View File

@ -0,0 +1,28 @@
name: vix_corrupt
suffix: ~ # add suffix to saved images
model: sr
distortion: sr
scale: 4
crop_border: ~ # crop border when evaluation. If None(~), crop the scale pixels
gpu_ids: [0]
amp_opt_level: O3
datasets:
test_1: # the 1st test dataset
name: vix_lr
mode: LQ
batch_size: 64
dataroot_LQ: K:\\4k6k\\4k_closeup\\lr_corrupted
start_at: 35000 #ready to go.
#### network structures
network_G:
which_model_G: ResGenV2
nf: 192
nb_denoiser: 20
nb_upsampler: 0
upscale_applications: 0
#### path
path:
pretrain_model_G: ../experiments/pretrained_corruptors/decolorize_nonoise.pth

View File

@ -1,84 +0,0 @@
#### general settings
name: blacked_fix_and_upconv
use_tb_logger: true
model: srgan
distortion: sr
scale: 4
gpu_ids: [0]
amp_opt_level: O1
#### datasets
datasets:
train:
name: vixcloseup
mode: LQGT
dataroot_GT: /content/hr
dataroot_LQ: /content/lr_corrupted
doCrop: false
use_shuffle: true
n_workers: 12 # per GPU
batch_size: 12
target_size: 256
color: RGB
val:
name: adrianna_val
mode: LQGT
dataroot_GT: /content/val/hhq
dataroot_LQ: /content/val/hr
#### network structures
network_G:
which_model_G: RRDBNet
in_nc: 3
out_nc: 3
nf: 48
nb: 23
network_D:
which_model_D: discriminator_resnet
in_nc: 3
nf: 48
#### path
path:
pretrain_model_G: experiments/7000_G.pth
pretrain_model_D: experiments/7000_D.pth
strict_load: true
resume_state: ~
#### training settings: learning rate scheme, loss
train:
lr_G: !!float 6e-5
weight_decay_G: 0
beta1_G: 0.9
beta2_G: 0.99
lr_D: !!float 8e-5
weight_decay_D: 0
beta1_D: 0.9
beta2_D: 0.99
lr_scheme: MultiStepLR
niter: 400000
warmup_iter: -1 # no warm up
lr_steps: [20000, 40000, 60000, 80000]
lr_gamma: 0.5
pixel_criterion: l1
pixel_weight: !!float 1e-2
feature_criterion: l1
feature_weight: 1
feature_weight_decay: 1
feature_weight_decay_steps: 500
feature_weight_minimum: 1
gan_type: gan # gan | ragan
gan_weight: !!float 1e-2
D_update_ratio: 1
D_init_iters: 0
manual_seed: 10
val_freq: !!float 5e2
#### logger
logger:
print_freq: 50
save_checkpoint_freq: !!float 5e2

View File

@ -1,83 +0,0 @@
#### general settings
name: corruptGAN_4k_lqprn_closeup_hq_to_hq
use_tb_logger: true
model: corruptgan
distortion: downsample
scale: 1
gpu_ids: [0]
amp_opt_level: O0
#### datasets
datasets:
train:
name: blacked
mode: downsample
dataroot_GT: K:\\4k6k\\4k_closeup\\hr
dataroot_LQ: E:\\4k6k\\adrianna\\for_training\\hr
mismatched_Data_OK: true
doCrop: false
use_shuffle: true
n_workers: 4 # per GPU
batch_size: 16
target_size: 256
use_flip: false
use_rot: false
color: RGB
val:
name: blacked_val
mode: downsample
target_size: 256
dataroot_GT: ../datasets/blacked/val/hr
dataroot_LQ: ../datasets/blacked/val/hr
#### network structures
network_G:
which_model_G: HighToLowResNet
in_nc: 3
out_nc: 3
nf: 16
network_D:
which_model_D: discriminator_vgg_128
in_nc: 3
nf: 96
#### path
path:
pretrain_model_G: ~
resume_state: ~
strict_load: true
#### training settings: learning rate scheme, loss
train:
lr_G: !!float 1e-4
weight_decay_G: 0
beta1_G: 0.9
beta2_G: 0.99
lr_D: !!float 1e-4
weight_decay_D: 0
beta1_D: 0.9
beta2_D: 0.99
lr_scheme: MultiStepLR
niter: 400000
warmup_iter: -1 # no warm up
lr_steps: [4000, 8000, 12000, 15000, 20000]
lr_gamma: 0.5
pixel_criterion: l1
pixel_weight: !!float 1e-2
feature_criterion: l1
feature_weight: 0
gan_type: ragan # gan | ragan
gan_weight: !!float 1e-1
D_update_ratio: 1
D_init_iters: 0
manual_seed: 10
val_freq: !!float 5e2
#### logger
logger:
print_freq: 50
save_checkpoint_freq: !!float 5e2

View File

@ -5,19 +5,19 @@ model: corruptgan
distortion: downsample
scale: 1
gpu_ids: [0]
amp_opt_level: O1
amp_opt_level: O0
#### datasets
datasets:
train:
name: blacked
mode: downsample
dataroot_GT: K:\\4k6k\\4k_closeup\\lr_corrupted
dataroot_GT: E:\\4k6k\\datasets\\vixen\\4k_closeup\\lr_corrupted
dataroot_LQ: G:\\data\\pr_upsample\\ultra_lowq\\for_training\\lr
mismatched_Data_OK: true
use_shuffle: true
n_workers: 0 # per GPU
batch_size: 1
batch_size: 24
target_size: 64
use_flip: false
use_rot: false
@ -33,26 +33,27 @@ datasets:
#### network structures
network_G:
which_model_G: ResGenV2
nf: 16
nb_denoiser: 5
nf: 192
nb_denoiser: 20
nb_upsampler: 0
upscale_applications: 0
inject_noise: False
network_D:
which_model_D: discriminator_vgg_128
in_nc: 3
nf: 16
nf: 64
#### path
path:
pretrain_model_G: ~
pretrain_model_D: ~
resume_state: ~
resume_state: ../experiments/train_vix_corrupt/training_state/19000.state
strict_load: true
#### training settings: learning rate scheme, loss
train:
lr_G: !!float 5e-5
lr_G: !!float 1e-4
weight_decay_G: 0
beta1_G: 0.9
beta2_G: 0.99
@ -60,19 +61,25 @@ train:
weight_decay_D: 0
beta1_D: 0.9
beta2_D: 0.99
D_noise_theta_init: .01
D_noise_final_it: 20000
D_noise_theta_floor: .005
lr_scheme: MultiStepLR
niter: 400000
warmup_iter: -1 # no warm up
lr_steps: [30000, 60000, 100000, 200000]
lr_steps: [15000, 50000, 100000, 200000]
lr_gamma: 0.5
pixel_criterion: l2
pixel_weight: !!float 1e-2
feature_criterion: l1
feature_weight: 0
gan_type: gan # gan | ragan
gan_weight: !!float 1e-1
feature_weight: 1.0
feature_weight_decay: .98
feature_weight_decay_steps: 1000
feature_weight_minimum: .5
gan_type: ragan # gan | ragan
gan_weight: .1
mega_batch_factor: 1
D_update_ratio: 1

View File

@ -15,7 +15,7 @@ if __name__ == "__main__":
#### options
want_just_images = True
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, help='Path to options YMAL file.', default='options/test/test_ESRGAN_adrianna_full.yml')
parser.add_argument('-opt', type=str, help='Path to options YMAL file.', default='options/test/test_vix_corrupt.yml')
opt = option.parse(parser.parse_args().opt, is_train=False)
opt = option.dict_to_nonedict(opt)

View File

@ -30,7 +30,7 @@ def init_dist(backend='nccl', **kwargs):
def main():
#### options
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='options/train/train_ESRGAN_blacked_xl.yml')
parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='options/train/train_vix_corrupt.yml')
parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)