Make RRDB usable in the current iteration

This commit is contained in:
James Betker 2020-05-16 18:36:30 -06:00
parent b95c4087d1
commit 9cde58be80
4 changed files with 18 additions and 12 deletions

View File

@ -156,8 +156,12 @@ class FixupResNet(nn.Module):
return nn.Sequential(*layers)
def forward(self, x):
# This class expects a medium skip (half-res) and low skip (quarter-res) provided as a tuple in the input.
x, med_skip, lo_skip = x
if len(x) == 3:
# This class can take a medium skip (half-res) and low skip (quarter-res) provided as a tuple in the input.
x, med_skip, lo_skip = x
else:
# Or just a tuple with only the high res input (this assumes number_skips was set right).
x = x[0]
x = self.layer0(x)
if self.number_skips > 0:

View File

@ -46,10 +46,11 @@ class RRDB(nn.Module):
class RRDBNet(nn.Module):
def __init__(self, in_nc, out_nc, nf, nb, gc=32, interpolation_scale_factor=2):
def __init__(self, in_nc, out_nc, nf, nb, gc=32, scale=2):
super(RRDBNet, self).__init__()
RRDB_block_f = functools.partial(RRDB, nf=nf, gc=gc)
self.scale = scale
self.conv_first = nn.Conv2d(in_nc, nf, 7, 1, padding=3, bias=True)
self.RRDB_trunk = arch_util.make_layer(RRDB_block_f, nb)
self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
@ -61,15 +62,17 @@ class RRDBNet(nn.Module):
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
self.interpolation_scale_factor = interpolation_scale_factor
def forward(self, x):
fea = self.conv_first(x)
trunk = self.trunk_conv(self.RRDB_trunk(fea))
fea = fea + trunk
fea = self.lrelu(self.upconv1(F.interpolate(fea, scale_factor=self.interpolation_scale_factor, mode='nearest')))
fea = self.lrelu(self.upconv2(F.interpolate(fea, scale_factor=self.interpolation_scale_factor, mode='nearest')))
if self.scale >= 2:
fea = F.interpolate(fea, scale_factor=2, mode='nearest')
fea = self.lrelu(self.upconv1(fea))
if self.scale >= 4:
fea = F.interpolate(fea, scale_factor=2, mode='nearest')
fea = self.lrelu(self.upconv2(fea))
out = self.conv_last(self.lrelu(self.HRconv(fea)))
return out
return (out,)

View File

@ -25,9 +25,8 @@ def define_G(opt, net_key='network_G'):
nf=opt_net['nf'], nb=opt_net['nb'], upscale=opt_net['scale'])
elif which_model == 'RRDBNet':
# RRDB does scaling in two steps, so take the sqrt of the scale we actually want to achieve and feed it to RRDB.
scale_per_step = math.sqrt(scale)
netG = RRDBNet_arch.RRDBNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'],
nf=opt_net['nf'], nb=opt_net['nb'], interpolation_scale_factor=scale_per_step)
nf=opt_net['nf'], nb=opt_net['nb'], scale=scale)
elif which_model == 'RRDBNetXL':
scale_per_step = math.sqrt(scale)
netG = RRDBNetXL_arch.RRDBNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'],

View File

@ -30,7 +30,7 @@ def init_dist(backend='nccl', **kwargs):
def main():
#### options
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='../options/train_vix_resgenv2.yml')
parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='../options/train_vix_rrdb_v2.yml')
parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
@ -147,7 +147,7 @@ def main():
current_step = resume_state['iter']
model.resume_training(resume_state) # handle optimizers and schedulers
else:
current_step = 0
current_step = -1
start_epoch = 0
#### training