forked from mrq/DL-Art-School
Add AssistedRRDB and remove RRDBNetXL
This commit is contained in:
parent
445e7e7053
commit
9b44f6f5c0
|
@ -1,98 +0,0 @@
|
|||
import functools
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import models.archs.arch_util as arch_util
|
||||
|
||||
|
||||
class ResidualDenseBlock_5C(nn.Module):
|
||||
def __init__(self, nf=64, gc=32, bias=True):
|
||||
super(ResidualDenseBlock_5C, self).__init__()
|
||||
# gc: growth channel, i.e. intermediate channels
|
||||
self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias)
|
||||
self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias)
|
||||
self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias=bias)
|
||||
self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias=bias)
|
||||
self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias=bias)
|
||||
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
|
||||
|
||||
# initialization
|
||||
arch_util.initialize_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5],
|
||||
0.1)
|
||||
|
||||
def forward(self, x):
|
||||
x1 = self.lrelu(self.conv1(x))
|
||||
x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))
|
||||
x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))
|
||||
x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))
|
||||
x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
|
||||
return x5 * 0.2 + x
|
||||
|
||||
|
||||
class RRDB(nn.Module):
|
||||
'''Residual in Residual Dense Block'''
|
||||
|
||||
def __init__(self, nf, gc=32):
|
||||
super(RRDB, self).__init__()
|
||||
self.RDB1 = ResidualDenseBlock_5C(nf, gc)
|
||||
self.RDB2 = ResidualDenseBlock_5C(nf, gc)
|
||||
self.RDB3 = ResidualDenseBlock_5C(nf, gc)
|
||||
|
||||
def forward(self, x):
|
||||
out = self.RDB1(x)
|
||||
out = self.RDB2(out)
|
||||
out = self.RDB3(out)
|
||||
return out * 0.2 + x
|
||||
|
||||
|
||||
class RRDBNet(nn.Module):
|
||||
def __init__(self, in_nc, out_nc, nf, nb_lo, nb_med, nb_hi, gc=32, interpolation_scale_factor=2):
|
||||
super(RRDBNet, self).__init__()
|
||||
nfmed = int(nf/2)
|
||||
nfhi = int(nf/8)
|
||||
gcmed = int(gc/2)
|
||||
gchi = int(gc/8)
|
||||
RRDB_block_f_lo = functools.partial(RRDB, nf=nf, gc=gc)
|
||||
RRDB_block_f_lo_med = functools.partial(RRDB, nf=nfmed, gc=gcmed)
|
||||
RRDB_block_f_lo_hi = functools.partial(RRDB, nf=nfhi, gc=gchi)
|
||||
|
||||
self.conv_first = nn.Conv2d(in_nc, nf, 7, 1, padding=3, bias=True)
|
||||
self.RRDB_trunk_lo = arch_util.make_layer(RRDB_block_f_lo, nb_lo)
|
||||
self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
|
||||
self.lo_skip_conv1 = nn.Conv2d(nf, nf, 3, 1, padding=1, bias=True)
|
||||
self.lo_skip_conv2 = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)
|
||||
|
||||
#### upsampling
|
||||
self.upconv1 = nn.Conv2d(nf, nfmed, 3, 1, padding=1, bias=True)
|
||||
self.RRDB_trunk_med = arch_util.make_layer(RRDB_block_f_lo_med, nb_med)
|
||||
self.trunk_conv_med = nn.Conv2d(nfmed, nfmed, 3, 1, 1, bias=True)
|
||||
self.med_skip_conv1 = nn.Conv2d(nfmed, nfmed, 3, 1, padding=1, bias=True)
|
||||
self.med_skip_conv2 = nn.Conv2d(nfmed, out_nc, 3, 1, 1, bias=True)
|
||||
|
||||
self.upconv2 = nn.Conv2d(nfmed, nfhi, 3, 1, padding=1, bias=True)
|
||||
self.RRDB_trunk_hi = arch_util.make_layer(RRDB_block_f_lo_hi, nb_hi)
|
||||
self.trunk_conv_hi = nn.Conv2d(nfhi, nfhi, 3, 1, 1, bias=True)
|
||||
self.HRconv = nn.Conv2d(nfhi, nfhi, 5, 1, padding=2, bias=True)
|
||||
self.conv_last = nn.Conv2d(nfhi, out_nc, 3, 1, 1, bias=True)
|
||||
|
||||
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
|
||||
|
||||
self.interpolation_scale_factor = interpolation_scale_factor
|
||||
|
||||
def forward(self, x):
|
||||
fea = self.conv_first(x)
|
||||
branch = self.trunk_conv(self.RRDB_trunk_lo(fea))
|
||||
fea = (fea + branch) / 2
|
||||
lo_skip = self.lo_skip_conv2(self.lrelu(self.lo_skip_conv1(fea)))
|
||||
|
||||
fea = self.lrelu(self.upconv1(F.interpolate(fea, scale_factor=self.interpolation_scale_factor, mode='nearest')))
|
||||
branch = self.trunk_conv_med(self.RRDB_trunk_med(fea))
|
||||
fea = (fea + branch) / 2
|
||||
med_skip = self.med_skip_conv2(self.lrelu(self.med_skip_conv1(fea)))
|
||||
|
||||
fea = self.lrelu(self.upconv2(F.interpolate(fea, scale_factor=self.interpolation_scale_factor, mode='nearest')))
|
||||
branch = self.trunk_conv_hi(self.RRDB_trunk_hi(fea))
|
||||
fea = (fea + branch) / 2
|
||||
out = self.conv_last(self.lrelu(self.HRconv(fea)))
|
||||
|
||||
return out, med_skip, lo_skip
|
|
@ -3,6 +3,7 @@ import torch
|
|||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import models.archs.arch_util as arch_util
|
||||
import torchvision
|
||||
|
||||
|
||||
class ResidualDenseBlock_5C(nn.Module):
|
||||
|
@ -76,3 +77,82 @@ class RRDBNet(nn.Module):
|
|||
out = self.conv_last(self.lrelu(self.HRconv(fea)))
|
||||
|
||||
return (out,)
|
||||
|
||||
# Variant of RRDBNet that is "assisted" by an external pretrained image classifier whose
|
||||
# intermediate layers have been splayed out, pixel-shuffled, and fed back in.
|
||||
class AssistedRRDBNet(nn.Module):
|
||||
# in_nc=number of input channels.
|
||||
# out_nc=number of output channels.
|
||||
# nf=internal filter count
|
||||
# nb=number of additional blocks after the assistance layers.
|
||||
# gc=growth channel inside of residual blocks
|
||||
# scale=the number of times the output is doubled in size.
|
||||
def __init__(self, in_nc, out_nc, nf, nb, gc=32, scale=2):
|
||||
super(AssistedRRDBNet, self).__init__()
|
||||
self.scale = scale
|
||||
self.conv_first = nn.Conv2d(in_nc, nf, 7, 1, padding=3, bias=True)
|
||||
|
||||
# Set-up the assist-net, which should do feature extraction for us.
|
||||
self.assistnet = torchvision.models.wide_resnet50_2(pretrained=True)
|
||||
self.set_enable_assistnet_training(False)
|
||||
assist_nf = [2, 4, 8, 16] # Fixed for resnet. Re-evaluate if using other networks.
|
||||
self.assist1 = RRDB(nf + assist_nf[0], gc)
|
||||
self.assist2 = RRDB(nf + sum(assist_nf[:2]), gc)
|
||||
self.assist3 = RRDB(nf + sum(assist_nf[:3]), gc)
|
||||
self.assist4 = RRDB(nf + sum(assist_nf), gc)
|
||||
nf = nf + sum(assist_nf)
|
||||
|
||||
# After this, it's just a "standard" RRDB net.
|
||||
RRDB_block_f = functools.partial(RRDB, nf=nf, gc=gc)
|
||||
self.RRDB_trunk = arch_util.make_layer(RRDB_block_f, nb)
|
||||
self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
|
||||
#### upsampling
|
||||
self.upconv1 = nn.Conv2d(nf, nf, 5, 1, padding=2, bias=True)
|
||||
self.upconv2 = nn.Conv2d(nf, nf, 5, 1, padding=2, bias=True)
|
||||
self.HRconv = nn.Conv2d(nf, nf, 5, 1, padding=2, bias=True)
|
||||
self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)
|
||||
|
||||
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
|
||||
|
||||
def set_enable_assistnet_training(self, en):
|
||||
for p in self.assistnet.parameters():
|
||||
p.requires_grad = en
|
||||
|
||||
def res_extract(self, x):
|
||||
x = self.assistnet.conv1(x)
|
||||
x = self.assistnet.bn1(x)
|
||||
x = self.assistnet.relu(x)
|
||||
x = self.assistnet.maxpool(x)
|
||||
|
||||
x = self.assistnet.layer1(x)
|
||||
l1 = F.pixel_shuffle(x, 4)
|
||||
x = self.assistnet.layer2(x)
|
||||
l2 = F.pixel_shuffle(x, 8)
|
||||
x = self.assistnet.layer3(x)
|
||||
l3 = F.pixel_shuffle(x, 16)
|
||||
x = self.assistnet.layer4(x)
|
||||
l4 = F.pixel_shuffle(x, 32)
|
||||
return l1, l2, l3, l4
|
||||
|
||||
def forward(self, x):
|
||||
# Invoke the assistant net first.
|
||||
l1, l2, l3, l4 = self.res_extract(x)
|
||||
|
||||
fea = self.conv_first(x)
|
||||
fea = self.assist1(torch.cat([fea, l4], dim=1))
|
||||
fea = self.assist2(torch.cat([fea, l3], dim=1))
|
||||
fea = self.assist3(torch.cat([fea, l2], dim=1))
|
||||
fea = self.assist4(torch.cat([fea, l1], dim=1))
|
||||
|
||||
trunk = self.trunk_conv(self.RRDB_trunk(fea))
|
||||
fea = fea + trunk
|
||||
|
||||
if self.scale >= 2:
|
||||
fea = F.interpolate(fea, scale_factor=2, mode='nearest')
|
||||
fea = self.lrelu(self.upconv1(fea))
|
||||
if self.scale >= 4:
|
||||
fea = F.interpolate(fea, scale_factor=2, mode='nearest')
|
||||
fea = self.lrelu(self.upconv2(fea))
|
||||
out = self.conv_last(self.lrelu(self.HRconv(fea)))
|
||||
|
||||
return (out,)
|
|
@ -5,12 +5,9 @@ import models.archs.DiscriminatorResnet_arch as DiscriminatorResnet_arch
|
|||
import models.archs.DiscriminatorResnet_arch_passthrough as DiscriminatorResnet_arch_passthrough
|
||||
import models.archs.FlatProcessorNetNew_arch as FlatProcessorNetNew_arch
|
||||
import models.archs.RRDBNet_arch as RRDBNet_arch
|
||||
import models.archs.RRDBNetXL_arch as RRDBNetXL_arch
|
||||
#import models.archs.EDVR_arch as EDVR_arch
|
||||
import models.archs.HighToLowResNet as HighToLowResNet
|
||||
import models.archs.FlatProcessorNet_arch as FlatProcessorNet_arch
|
||||
import models.archs.arch_util as arch_utils
|
||||
import models.archs.ResGen_arch as ResGen_arch
|
||||
import models.archs.biggan_gen_arch as biggan_arch
|
||||
import math
|
||||
|
||||
# Generator
|
||||
|
@ -27,11 +24,9 @@ def define_G(opt, net_key='network_G'):
|
|||
# RRDB does scaling in two steps, so take the sqrt of the scale we actually want to achieve and feed it to RRDB.
|
||||
netG = RRDBNet_arch.RRDBNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'],
|
||||
nf=opt_net['nf'], nb=opt_net['nb'], scale=scale)
|
||||
elif which_model == 'RRDBNetXL':
|
||||
scale_per_step = math.sqrt(scale)
|
||||
netG = RRDBNetXL_arch.RRDBNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'],
|
||||
nf=opt_net['nf'], nb_lo=opt_net['nblo'], nb_med=opt_net['nbmed'], nb_hi=opt_net['nbhi'],
|
||||
interpolation_scale_factor=scale_per_step)
|
||||
elif which_model == 'AssistedRRDBNet':
|
||||
netG = RRDBNet_arch.AssistedRRDBNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'],
|
||||
nf=opt_net['nf'], nb=opt_net['nb'], scale=scale)
|
||||
elif which_model == 'ResGen':
|
||||
netG = ResGen_arch.fixup_resnet34(nb_denoiser=opt_net['nb_denoiser'], nb_upsampler=opt_net['nb_upsampler'],
|
||||
upscale_applications=opt_net['upscale_applications'], num_filters=opt_net['nf'])
|
||||
|
@ -39,6 +34,8 @@ def define_G(opt, net_key='network_G'):
|
|||
netG = ResGen_arch.fixup_resnet34_v2(nb_denoiser=opt_net['nb_denoiser'], nb_upsampler=opt_net['nb_upsampler'],
|
||||
upscale_applications=opt_net['upscale_applications'], num_filters=opt_net['nf'],
|
||||
inject_noise=opt_net['inject_noise'])
|
||||
elif which_model == "BigGan":
|
||||
netG = biggan_arch.biggan_medium(filters=opt_net['nf'])
|
||||
|
||||
# image corruption
|
||||
elif which_model == 'HighToLowResNet':
|
||||
|
|
Loading…
Reference in New Issue
Block a user