DL-Art-School/codes/models/archs/RRDBNet_arch.py

158 lines
6.1 KiB
Python
Raw Normal View History

2019-08-23 13:42:47 +00:00
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
import models.archs.arch_util as arch_util
2020-05-24 03:09:21 +00:00
import torchvision
2019-08-23 13:42:47 +00:00
class ResidualDenseBlock_5C(nn.Module):
def __init__(self, nf=64, gc=32, bias=True):
super(ResidualDenseBlock_5C, self).__init__()
# gc: growth channel, i.e. intermediate channels
self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias)
self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias)
self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias=bias)
self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias=bias)
self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias=bias)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
# initialization
arch_util.initialize_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5],
0.1)
def forward(self, x):
x1 = self.lrelu(self.conv1(x))
x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))
x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))
x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))
x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
return x5 * 0.2 + x
class RRDB(nn.Module):
'''Residual in Residual Dense Block'''
def __init__(self, nf, gc=32):
super(RRDB, self).__init__()
self.RDB1 = ResidualDenseBlock_5C(nf, gc)
self.RDB2 = ResidualDenseBlock_5C(nf, gc)
self.RDB3 = ResidualDenseBlock_5C(nf, gc)
def forward(self, x):
out = self.RDB1(x)
out = self.RDB2(out)
out = self.RDB3(out)
return out * 0.2 + x
class RRDBNet(nn.Module):
def __init__(self, in_nc, out_nc, nf, nb, gc=32, scale=2):
2019-08-23 13:42:47 +00:00
super(RRDBNet, self).__init__()
RRDB_block_f = functools.partial(RRDB, nf=nf, gc=gc)
self.scale = scale
2020-05-02 01:56:14 +00:00
self.conv_first = nn.Conv2d(in_nc, nf, 7, 1, padding=3, bias=True)
2019-08-23 13:42:47 +00:00
self.RRDB_trunk = arch_util.make_layer(RRDB_block_f, nb)
self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
#### upsampling
2020-05-02 01:56:14 +00:00
self.upconv1 = nn.Conv2d(nf, nf, 5, 1, padding=2, bias=True)
self.upconv2 = nn.Conv2d(nf, nf, 5, 1, padding=2, bias=True)
self.HRconv = nn.Conv2d(nf, nf, 5, 1, padding=2, bias=True)
2019-08-23 13:42:47 +00:00
self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x):
fea = self.conv_first(x)
trunk = self.trunk_conv(self.RRDB_trunk(fea))
fea = fea + trunk
if self.scale >= 2:
fea = F.interpolate(fea, scale_factor=2, mode='nearest')
fea = self.lrelu(self.upconv1(fea))
if self.scale >= 4:
fea = F.interpolate(fea, scale_factor=2, mode='nearest')
fea = self.lrelu(self.upconv2(fea))
2019-08-23 13:42:47 +00:00
out = self.conv_last(self.lrelu(self.HRconv(fea)))
return (out,)
2020-05-24 03:09:21 +00:00
# Variant of RRDBNet that is "assisted" by an external pretrained image classifier whose
# intermediate layers have been splayed out, pixel-shuffled, and fed back in.
class AssistedRRDBNet(nn.Module):
# in_nc=number of input channels.
# out_nc=number of output channels.
# nf=internal filter count
# nb=number of additional blocks after the assistance layers.
# gc=growth channel inside of residual blocks
# scale=the number of times the output is doubled in size.
def __init__(self, in_nc, out_nc, nf, nb, gc=32, scale=2):
super(AssistedRRDBNet, self).__init__()
self.scale = scale
self.conv_first = nn.Conv2d(in_nc, nf, 7, 1, padding=3, bias=True)
# Set-up the assist-net, which should do feature extraction for us.
self.assistnet = torchvision.models.wide_resnet50_2(pretrained=True)
self.set_enable_assistnet_training(False)
assist_nf = [2, 4, 8, 16] # Fixed for resnet. Re-evaluate if using other networks.
self.assist1 = RRDB(nf + assist_nf[0], gc)
self.assist2 = RRDB(nf + sum(assist_nf[:2]), gc)
self.assist3 = RRDB(nf + sum(assist_nf[:3]), gc)
self.assist4 = RRDB(nf + sum(assist_nf), gc)
nf = nf + sum(assist_nf)
# After this, it's just a "standard" RRDB net.
RRDB_block_f = functools.partial(RRDB, nf=nf, gc=gc)
self.RRDB_trunk = arch_util.make_layer(RRDB_block_f, nb)
self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
#### upsampling
self.upconv1 = nn.Conv2d(nf, nf, 5, 1, padding=2, bias=True)
self.upconv2 = nn.Conv2d(nf, nf, 5, 1, padding=2, bias=True)
self.HRconv = nn.Conv2d(nf, nf, 5, 1, padding=2, bias=True)
self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def set_enable_assistnet_training(self, en):
for p in self.assistnet.parameters():
p.requires_grad = en
def res_extract(self, x):
x = self.assistnet.conv1(x)
x = self.assistnet.bn1(x)
x = self.assistnet.relu(x)
x = self.assistnet.maxpool(x)
x = self.assistnet.layer1(x)
l1 = F.pixel_shuffle(x, 4)
x = self.assistnet.layer2(x)
l2 = F.pixel_shuffle(x, 8)
x = self.assistnet.layer3(x)
l3 = F.pixel_shuffle(x, 16)
x = self.assistnet.layer4(x)
l4 = F.pixel_shuffle(x, 32)
return l1, l2, l3, l4
def forward(self, x):
# Invoke the assistant net first.
l1, l2, l3, l4 = self.res_extract(x)
fea = self.conv_first(x)
fea = self.assist1(torch.cat([fea, l4], dim=1))
fea = self.assist2(torch.cat([fea, l3], dim=1))
fea = self.assist3(torch.cat([fea, l2], dim=1))
fea = self.assist4(torch.cat([fea, l1], dim=1))
trunk = self.trunk_conv(self.RRDB_trunk(fea))
fea = fea + trunk
if self.scale >= 2:
fea = F.interpolate(fea, scale_factor=2, mode='nearest')
fea = self.lrelu(self.upconv1(fea))
if self.scale >= 4:
fea = F.interpolate(fea, scale_factor=2, mode='nearest')
fea = self.lrelu(self.upconv2(fea))
out = self.conv_last(self.lrelu(self.HRconv(fea)))
return (out,)