forked from mrq/DL-Art-School
Random config changes
This commit is contained in:
parent
574e7e882b
commit
602f86bfa4
|
@ -10,7 +10,7 @@ datasets:
|
||||||
test_1: # the 1st test dataset
|
test_1: # the 1st test dataset
|
||||||
name: set5
|
name: set5
|
||||||
mode: LQ
|
mode: LQ
|
||||||
batch_size: 16
|
batch_size: 3
|
||||||
dataroot_LQ: ..\..\datasets\adrianna\full_extract
|
dataroot_LQ: ..\..\datasets\adrianna\full_extract
|
||||||
|
|
||||||
#### network structures
|
#### network structures
|
||||||
|
|
|
@ -12,12 +12,12 @@ datasets:
|
||||||
train:
|
train:
|
||||||
name: vixcloseup
|
name: vixcloseup
|
||||||
mode: LQGT
|
mode: LQGT
|
||||||
dataroot_GT: K:\4k6k\4k_closeup\hr
|
dataroot_GT: E:\4k6k\datasets\4k_closeup\hr
|
||||||
dataroot_LQ: K:\4k6k\4k_closeup\lr_corrupted
|
dataroot_LQ: [E:\4k6k\datasets\4k_closeup\lr_corrupted, E:\4k6k\datasets\4k_closeup\lr_c_blurred]
|
||||||
doCrop: false
|
doCrop: false
|
||||||
use_shuffle: true
|
use_shuffle: true
|
||||||
n_workers: 10 # per GPU
|
n_workers: 12 # per GPU
|
||||||
batch_size: 16
|
batch_size: 15
|
||||||
target_size: 256
|
target_size: 256
|
||||||
color: RGB
|
color: RGB
|
||||||
val:
|
val:
|
||||||
|
@ -30,16 +30,18 @@ datasets:
|
||||||
network_G:
|
network_G:
|
||||||
which_model_G: ResGen
|
which_model_G: ResGen
|
||||||
nf: 256
|
nf: 256
|
||||||
|
nb_denoiser: 20
|
||||||
|
nb_upsampler: 10
|
||||||
network_D:
|
network_D:
|
||||||
which_model_D: discriminator_resnet_passthrough
|
which_model_D: discriminator_resnet_passthrough
|
||||||
nf: 42
|
nf: 64
|
||||||
|
|
||||||
#### path
|
#### path
|
||||||
path:
|
path:
|
||||||
#pretrain_model_G: ../experiments/blacked_fix_and_upconv_xl_part1/models/3000_G.pth
|
#pretrain_model_G: ../experiments/pretrained_resnet_G.pth
|
||||||
#pretrain_model_D: ~
|
#pretrain_model_D: ~
|
||||||
strict_load: true
|
strict_load: true
|
||||||
resume_state: ~
|
resume_state: ../experiments/blacked_fix_and_upconv_xl/training_state/2500.state
|
||||||
|
|
||||||
#### training settings: learning rate scheme, loss
|
#### training settings: learning rate scheme, loss
|
||||||
train:
|
train:
|
||||||
|
@ -57,7 +59,7 @@ train:
|
||||||
warmup_iter: -1 # no warm up
|
warmup_iter: -1 # no warm up
|
||||||
lr_steps: [20000, 40000, 50000, 60000]
|
lr_steps: [20000, 40000, 50000, 60000]
|
||||||
lr_gamma: 0.5
|
lr_gamma: 0.5
|
||||||
mega_batch_factor: 2
|
mega_batch_factor: 3
|
||||||
|
|
||||||
pixel_criterion: l1
|
pixel_criterion: l1
|
||||||
pixel_weight: !!float 1e-2
|
pixel_weight: !!float 1e-2
|
||||||
|
@ -69,8 +71,8 @@ train:
|
||||||
gan_type: gan # gan | ragan
|
gan_type: gan # gan | ragan
|
||||||
gan_weight: !!float 1e-2
|
gan_weight: !!float 1e-2
|
||||||
|
|
||||||
D_update_ratio: 1
|
D_update_ratio: 2
|
||||||
D_init_iters: -1
|
D_init_iters: 0
|
||||||
|
|
||||||
manual_seed: 10
|
manual_seed: 10
|
||||||
val_freq: !!float 5e2
|
val_freq: !!float 5e2
|
||||||
|
|
|
@ -16,7 +16,7 @@ datasets:
|
||||||
dataroot_LQ: E:/4k6k/datasets/div2k/DIV2K800_sub_bicLRx4
|
dataroot_LQ: E:/4k6k/datasets/div2k/DIV2K800_sub_bicLRx4
|
||||||
|
|
||||||
use_shuffle: true
|
use_shuffle: true
|
||||||
n_workers: 10 # per GPU
|
n_workers: 0 # per GPU
|
||||||
batch_size: 24
|
batch_size: 24
|
||||||
target_size: 128
|
target_size: 128
|
||||||
use_flip: true
|
use_flip: true
|
||||||
|
@ -32,6 +32,8 @@ datasets:
|
||||||
network_G:
|
network_G:
|
||||||
which_model_G: ResGen
|
which_model_G: ResGen
|
||||||
nf: 256
|
nf: 256
|
||||||
|
nb_denoiser: 2
|
||||||
|
nb_upsampler: 28
|
||||||
network_D:
|
network_D:
|
||||||
which_model_D: discriminator_resnet_passthrough
|
which_model_D: discriminator_resnet_passthrough
|
||||||
nf: 42
|
nf: 42
|
||||||
|
@ -41,7 +43,7 @@ path:
|
||||||
#pretrain_model_G: ../experiments/blacked_fix_and_upconv_xl_part1/models/3000_G.pth
|
#pretrain_model_G: ../experiments/blacked_fix_and_upconv_xl_part1/models/3000_G.pth
|
||||||
#pretrain_model_D: ~
|
#pretrain_model_D: ~
|
||||||
strict_load: true
|
strict_load: true
|
||||||
resume_state: ~
|
resume_state: ../experiments/esrgan_res/training_state/15500.state
|
||||||
|
|
||||||
#### training settings: learning rate scheme, loss
|
#### training settings: learning rate scheme, loss
|
||||||
train:
|
train:
|
||||||
|
@ -71,7 +73,7 @@ train:
|
||||||
gan_type: gan # gan | ragan
|
gan_type: gan # gan | ragan
|
||||||
gan_weight: !!float 1e-2
|
gan_weight: !!float 1e-2
|
||||||
|
|
||||||
D_update_ratio: 1
|
D_update_ratio: 2
|
||||||
D_init_iters: -1
|
D_init_iters: -1
|
||||||
|
|
||||||
manual_seed: 10
|
manual_seed: 10
|
||||||
|
|
|
@ -30,7 +30,7 @@ def init_dist(backend='nccl', **kwargs):
|
||||||
def main():
|
def main():
|
||||||
#### options
|
#### options
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='options/train/train_ESRGAN_res.yml')
|
parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='options/train/train_ESRGAN_blacked_xl.yml')
|
||||||
parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none',
|
parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none',
|
||||||
help='job launcher')
|
help='job launcher')
|
||||||
parser.add_argument('--local_rank', type=int, default=0)
|
parser.add_argument('--local_rank', type=int, default=0)
|
||||||
|
@ -147,7 +147,7 @@ def main():
|
||||||
current_step = resume_state['iter']
|
current_step = resume_state['iter']
|
||||||
model.resume_training(resume_state) # handle optimizers and schedulers
|
model.resume_training(resume_state) # handle optimizers and schedulers
|
||||||
else:
|
else:
|
||||||
current_step = 0
|
current_step = -1
|
||||||
start_epoch = 0
|
start_epoch = 0
|
||||||
|
|
||||||
#### training
|
#### training
|
||||||
|
|
Loading…
Reference in New Issue
Block a user