Remove working options from repo

This commit is contained in:
James Betker 2020-05-15 07:50:55 -06:00
parent bd4d478572
commit cdf641e3e2
9 changed files with 1 additions and 558 deletions

1
.gitignore vendored
View File

@ -2,6 +2,7 @@ experiments/*
results/*
tb_logger/*
datasets/*
options/*
.vscode
*.html

View File

@ -1,28 +0,0 @@
name: adrianna
suffix: ~ # add suffix to saved images
model: sr
distortion: sr
scale: 4
crop_border: ~ # crop border when evaluation. If None(~), crop the scale pixels
gpu_ids: [0]
amp_opt_level: O3
datasets:
test_1: # the 1st test dataset
name: kayden
mode: LQ
batch_size: 13
dataroot_LQ: ..\..\datasets\kayden\images
start_at: 0
#### network structures
network_G:
which_model_G: ResGen
nf: 256
nb_denoiser: 20
nb_upsampler: 10
upscale_applications: 1
#### path
path:
pretrain_model_G: ../experiments/resgen_vgg_disc_vixen_40000.pth

View File

@ -1,29 +0,0 @@
name: RRDB_ESRGAN_x4
suffix: ~ # add suffix to saved images
model: corruptgan
distortion: downsample
scale: 4
crop_border: ~ # crop border when evaluation. If None(~), crop the scale pixels
gpu_ids: [0]
datasets:
test_1: # the 1st test dataset
name: vixen
mode: downsample
dataroot_GT: K:\4k6k\vixen4k\hr
dataroot_LQ: K:\4k6k\vixen4k\lr
batch_size: 100
n_workers: 4 # per GPU
target_size: 64
#### network structures
network_G:
which_model_G: HighToLowResNet
in_nc: 3
out_nc: 3
nf: 128
nb: 30
#### path
path:
pretrain_model_G: ../experiments/blacked_adrianna_corrupt_G.pth

View File

@ -1,28 +0,0 @@
name: vix_corrupt
suffix: ~ # add suffix to saved images
model: sr
distortion: sr
scale: 4
crop_border: ~ # crop border when evaluation. If None(~), crop the scale pixels
gpu_ids: [0]
amp_opt_level: O3
datasets:
test_1: # the 1st test dataset
name: vix_lr
mode: LQ
batch_size: 64
dataroot_LQ: K:\\4k6k\\4k_closeup\\lr_corrupted
start_at: 35000 #ready to go.
#### network structures
network_G:
which_model_G: ResGenV2
nf: 192
nb_denoiser: 20
nb_upsampler: 0
upscale_applications: 0
#### path
path:
pretrain_model_G: ../experiments/pretrained_corruptors/decolorize_nonoise.pth

View File

@ -1,90 +0,0 @@
#### general settings
name: corruptGAN_4k_lqprn_closeup_flat_net
use_tb_logger: true
model: corruptgan
distortion: downsample
scale: 4
gpu_ids: [0]
amp_opt_level: O1
#### datasets
datasets:
train:
name: blacked
mode: downsample
dataroot_GT: K:\\4k6k\\4k_closeup\\hr
dataroot_LQ: E:\\4k6k\\datasets\\ultra_lowq\\for_training
mismatched_Data_OK: true
use_shuffle: true
n_workers: 8 # per GPU
batch_size: 32
target_size: 64
use_flip: false
use_rot: false
doCrop: false
color: RGB
val:
name: blacked_val
mode: downsample
target_size: 64
dataroot_GT: E:\\4k6k\\datasets\\blacked\\val\\hr
dataroot_LQ: E:\\4k6k\\datasets\\blacked\\val\\lr
#### network structures
network_G:
which_model_G: FlatProcessorNet
in_nc: 3
out_nc: 3
nf: 32
ra_blocks: 6
assembler_blocks: 4
network_D:
#which_model_D: discriminator_vgg_128
#in_nc: 3
#nf: 64
which_model_D: discriminator_resnet
in_nc: 3
nf: 64
#### path
path:
pretrain_model_G: ../experiments/corruptGAN_4k_lqprn_closeup_flat_net/models/29000_G.pth
pretrain_model_D: ../experiments/corruptGAN_4k_lqprn_closeup_flat_net/models/29000_D.pth
resume_state: ../experiments/corruptGAN_4k_lqprn_closeup_flat_net/training_state/29000.state
strict_load: true
#### training settings: learning rate scheme, loss
train:
lr_G: !!float 5e-5
weight_decay_G: 0
beta1_G: 0.9
beta2_G: 0.99
lr_D: !!float 1e-4
weight_decay_D: 0
beta1_D: 0.9
beta2_D: 0.99
lr_scheme: MultiStepLR
niter: 400000
warmup_iter: -1 # no warm up
lr_steps: [12000, 24000, 36000, 48000, 64000]
lr_gamma: 0.5
pixel_criterion: l2
pixel_weight: !!float 1e-2
feature_criterion: l1
feature_weight: 0
gan_type: gan # gan | ragan
gan_weight: !!float 1e-1
D_update_ratio: 1
D_init_iters: -1
manual_seed: 10
val_freq: !!float 5e2
#### logger
logger:
print_freq: 50
save_checkpoint_freq: !!float 5e2

View File

@ -1,83 +0,0 @@
#### general settings
name: resgen_movies
use_tb_logger: true
model: srgan
distortion: sr
scale: 4
gpu_ids: [0]
amp_opt_level: O1
#### datasets
datasets:
train:
name: movies
mode: LQGT
dataroot_GT: F:\\upsample_reg\\for_training\\hr
dataroot_LQ: F:\\upsample_reg\\for_training\\lr_corrupted
doCrop: false
use_shuffle: true
n_workers: 8 # per GPU
batch_size: 8
target_size: 256
color: RGB
val:
name: movies_val
mode: LQGT
dataroot_GT: F:\\upsample_reg\\for_training\\val\\hr
dataroot_LQ: F:\\upsample_reg\\for_training\\val\\lr
#### network structures
network_G:
which_model_G: ResGenV2
nf: 256
nb_denoiser: 20
nb_upsampler: 10
network_D:
which_model_D: discriminator_resnet_passthrough
nf: 64
#### path
path:
#pretrain_model_G: ~
#pretrain_model_D: ~
strict_load: true
resume_state: ~
#### training settings: learning rate scheme, loss
train:
lr_G: !!float 6e-5
weight_decay_G: 0
beta1_G: 0.9
beta2_G: 0.99
lr_D: !!float 6e-5
weight_decay_D: 0
beta1_D: 0.9
beta2_D: 0.99
lr_scheme: MultiStepLR
niter: 400000
warmup_iter: -1 # no warm up
lr_steps: [20000, 60000, 80000, 100000]
lr_gamma: 0.5
mega_batch_factor: 1
pixel_criterion: l1
pixel_weight: !!float 1e-2
feature_criterion: l1
feature_weight: 1
feature_weight_decay: 1
feature_weight_decay_steps: 1
feature_weight_minimum: 1
gan_type: gan # gan | ragan
gan_weight: !!float 1e-2
D_update_ratio: 1
D_init_iters: 0
manual_seed: 10
val_freq: !!float 5e2
#### logger
logger:
print_freq: 50
save_checkpoint_freq: !!float 5e2

View File

@ -1,97 +0,0 @@
#### general settings
name: train_vix_corrupt
use_tb_logger: true
model: corruptgan
distortion: downsample
scale: 1
gpu_ids: [0]
amp_opt_level: O0
#### datasets
datasets:
train:
name: blacked
mode: downsample
dataroot_GT: E:\\4k6k\\datasets\\vixen\\4k_closeup\\lr_corrupted
dataroot_LQ: G:\\data\\pr_upsample\\ultra_lowq\\for_training\\lr
mismatched_Data_OK: true
use_shuffle: true
n_workers: 12 # per GPU
batch_size: 24
target_size: 64
use_flip: false
use_rot: false
doCrop: false
color: RGB
val:
name: blacked_val
mode: downsample
target_size: 64
dataroot_GT: E:\\4k6k\\datasets\\vixen\\val
dataroot_LQ: E:\\4k6k\\datasets\\vixen\\val
#### network structures
network_G:
which_model_G: ResGenV2
nf: 192
nb_denoiser: 20
nb_upsampler: 0
upscale_applications: 0
inject_noise: False
network_D:
which_model_D: discriminator_vgg_128
in_nc: 3
nf: 64
#### path
path:
pretrain_model_G: ~
pretrain_model_D: ~
resume_state: ../experiments/train_vix_corrupt/training_state/47000.state
strict_load: true
#### training settings: learning rate scheme, loss
train:
lr_G: !!float 1e-4
weight_decay_G: 0
beta1_G: 0.9
beta2_G: 0.99
lr_D: !!float 1e-4
weight_decay_D: 0
beta1_D: 0.9
beta2_D: 0.99
D_noise_theta_init: .01
D_noise_final_it: 20000
D_noise_theta_floor: .005
lr_scheme: MultiStepLR
niter: 400000
warmup_iter: -1 # no warm up
lr_steps: [15000, 50000, 100000, 200000]
lr_gamma: 0.5
pixel_criterion: l2
pixel_weight: !!float 1e-2
feature_criterion: l1
feature_weight: .5
feature_weight_decay: .98
feature_weight_decay_steps: 1000
feature_weight_minimum: .5
gan_type: ragan # gan | ragan
gan_weight: .1
mega_batch_factor: 1
swapout_G_freq: 113
swapout_D_freq: 223
swapout_duration: 40
D_update_ratio: 1
D_init_iters: -1
manual_seed: 10
val_freq: !!float 5e2
#### logger
logger:
print_freq: 50
save_checkpoint_freq: 500

View File

@ -1,97 +0,0 @@
#### general settings
name: train_vix_corrupt_tiled
use_tb_logger: true
model: corruptgan
distortion: downsample
scale: 1
gpu_ids: [0]
amp_opt_level: O0
#### datasets
datasets:
train:
name: vix_corrupt
mode: downsample
dataroot_GT: H:\\vix\\lr
dataroot_LQ: H:\\ultra_lq\\tiled\\lr
mismatched_Data_OK: true
use_shuffle: true
n_workers: 14 # per GPU
batch_size: 48
target_size: 64
use_flip: true
use_rot: true
doCrop: false
color: RGB
val:
name: vix_val
mode: downsample
target_size: 64
dataroot_GT: E:\\4k6k\\datasets\\vixen\\val
dataroot_LQ: E:\\4k6k\\datasets\\vixen\\val
#### network structures
network_G:
which_model_G: ResGenV2
nf: 192
nb_denoiser: 20
nb_upsampler: 0
upscale_applications: 0
inject_noise: False
network_D:
which_model_D: discriminator_vgg_128
in_nc: 3
nf: 64
#### path
path:
pretrain_model_G: ~
pretrain_model_D: ~
resume_state: ../experiments/train_vix_corrupt_tiled/training_state/16000.state
strict_load: true
#### training settings: learning rate scheme, loss
train:
lr_G: !!float 1e-4
weight_decay_G: 0
beta1_G: 0.9
beta2_G: 0.99
lr_D: !!float 1e-4
weight_decay_D: 0
beta1_D: 0.9
beta2_D: 0.99
D_noise_theta_init: .01
D_noise_final_it: 20000
D_noise_theta_floor: .005
lr_scheme: MultiStepLR
niter: 400000
warmup_iter: -1 # no warm up
lr_steps: [15000, 50000, 100000, 200000]
lr_gamma: 0.5
pixel_criterion: l2
pixel_weight: !!float 1e-2
feature_criterion: l1
feature_weight: .6
feature_weight_decay: .98
feature_weight_decay_steps: 1000
feature_weight_minimum: .5
gan_type: ragan # gan | ragan
gan_weight: .1
mega_batch_factor: 2
swapout_G_freq: 113
swapout_D_freq: 223
swapout_duration: 40
D_update_ratio: 1
D_init_iters: -1
manual_seed: 10
val_freq: !!float 5e2
#### logger
logger:
print_freq: 50
save_checkpoint_freq: 500

View File

@ -1,106 +0,0 @@
#### general settings
name: train_vix_resgenv2
use_tb_logger: true
model: srgan
distortion: sr
scale: 4
gpu_ids: [0]
amp_opt_level: O1
#### datasets
datasets:
train:
name: vixcloseup
mode: LQGT
dataroot_GT: E:\4k6k\datasets\vixen\vix_tiled\hr
dataroot_LQ: E:\4k6k\datasets\vixen\vix_tiled\lr
use_flip: true
use_rot: true
doCrop: false
use_shuffle: true
n_workers: 4 # per GPU
batch_size: 8
target_size: 256
color: RGB
val:
name: adrianna_val
mode: LQGT
dataroot_GT: E:\4k6k\datasets\adrianna\val\hhq
dataroot_LQ: E:\4k6k\datasets\adrianna\val\hr
#### network structures
network_G:
which_model_G: ResGenV2
nf: 256
nb_denoiser: 20
nb_upsampler: 10
upscale_applications: 2
network_D:
which_model_D: discriminator_resnet_passthrough
nf: 64
# LR corruption network.
network_C:
which_model_G: ResGenV2
nf: 192
nb_denoiser: 20
nb_upsampler: 0
upscale_applications: 0
inject_noise: False
#### path
path:
pretrained_corruptors_dir: ../experiments/pretrained_corruptors
#pretrain_model_G: ../experiments/pretrained_resnet_G.pth
#pretrain_model_D: ~
strict_load: true
resume_state: ~
#### training settings: learning rate scheme, loss
train:
lr_G: !!float 1e-4
weight_decay_G: 0
beta1_G: 0.9
beta2_G: 0.99
lr_D: !!float 1e-4
weight_decay_D: 0
beta1_D: 0.9
beta2_D: 0.99
lr_scheme: MultiStepLR
niter: 400000
warmup_iter: -1 # no warm up
lr_steps: [20000, 60000, 80000, 100000]
lr_gamma: 0.5
mega_batch_factor: 2
swapout_G_freq: 113
swapout_D_freq: 223
swapout_duration: 40
corruptor_swapout_steps: 1000
pixel_criterion: l1
pixel_weight: .01
feature_criterion: l1
feature_weight: 1
feature_weight_decay: 1
feature_weight_decay_steps: 500
feature_weight_minimum: 1
gan_type: ragan # gan | ragan
gan_weight: .01
D_update_ratio: 1
D_init_iters: 0
D_noise_theta_init: .005 # Just fixed noise.
D_noise_final_it: 1
D_noise_theta_floor: .005
manual_seed: 10
val_freq: !!float 5e2
#### logger
logger:
print_freq: 50
save_checkpoint_freq: !!float 5e2