#### general settings name: train_vix_corrupt use_tb_logger: true model: corruptgan distortion: downsample scale: 1 gpu_ids: [0] amp_opt_level: O1 #### datasets datasets: train: name: blacked mode: downsample dataroot_GT: K:\\4k6k\\4k_closeup\\lr_corrupted dataroot_LQ: G:\\data\\pr_upsample\\ultra_lowq\\for_training\\lr mismatched_Data_OK: true use_shuffle: true n_workers: 0 # per GPU batch_size: 1 target_size: 64 use_flip: false use_rot: false doCrop: false color: RGB val: name: blacked_val mode: downsample target_size: 64 dataroot_GT: E:\\4k6k\\datasets\\vixen\\val dataroot_LQ: E:\\4k6k\\datasets\\vixen\\val #### network structures network_G: which_model_G: ResGenV2 nf: 16 nb_denoiser: 5 nb_upsampler: 0 upscale_applications: 0 network_D: which_model_D: discriminator_vgg_128 in_nc: 3 nf: 16 #### path path: pretrain_model_G: ~ pretrain_model_D: ~ resume_state: ~ strict_load: true #### training settings: learning rate scheme, loss train: lr_G: !!float 5e-5 weight_decay_G: 0 beta1_G: 0.9 beta2_G: 0.99 lr_D: !!float 1e-4 weight_decay_D: 0 beta1_D: 0.9 beta2_D: 0.99 lr_scheme: MultiStepLR niter: 400000 warmup_iter: -1 # no warm up lr_steps: [30000, 60000, 100000, 200000] lr_gamma: 0.5 pixel_criterion: l2 pixel_weight: !!float 1e-2 feature_criterion: l1 feature_weight: 0 gan_type: gan # gan | ragan gan_weight: !!float 1e-1 mega_batch_factor: 1 D_update_ratio: 1 D_init_iters: -1 manual_seed: 10 val_freq: !!float 5e2 #### logger logger: print_freq: 50 save_checkpoint_freq: !!float 5e2