Add simplified SPSR architecture
Basically just cleaning up the code, removing some bad conventions, and reducing complexity somewhat so that I can play around with this arch a bit more easily.
This commit is contained in:
parent
47e24039b5
commit
0d070b47a7
|
@ -394,9 +394,8 @@ class SRGANModel(BaseModel):
|
|||
fea_GenOut = fake_GenOut
|
||||
using_gan_img = False
|
||||
# Get image gradients for later use.
|
||||
fake_H_grad = self.get_grad(fake_GenOut)
|
||||
var_H_grad = self.get_grad(var_H)
|
||||
var_ref_grad = self.get_grad(var_ref)
|
||||
fake_H_grad = self.get_grad_nopadding(fake_GenOut)
|
||||
var_ref_grad = self.get_grad_nopadding(var_ref)
|
||||
var_H_grad_nopadding = self.get_grad_nopadding(var_H)
|
||||
self.spsr_grad_GenOut.append(grad_LR)
|
||||
else:
|
||||
|
@ -426,7 +425,7 @@ class SRGANModel(BaseModel):
|
|||
l_g_pix_log = l_g_pix / self.l_pix_w
|
||||
l_g_total += l_g_pix
|
||||
if self.spsr_enabled and self.cri_pix_grad: # gradient pixel loss
|
||||
l_g_pix_grad = self.l_pix_grad_w * self.cri_pix_grad(fake_H_grad, var_H_grad)
|
||||
l_g_pix_grad = self.l_pix_grad_w * self.cri_pix_grad(fake_H_grad, var_H_grad_nopadding)
|
||||
l_g_total += l_g_pix_grad
|
||||
if self.spsr_enabled and self.cri_pix_branch: # branch pixel loss
|
||||
l_g_pix_grad_branch = self.l_pix_branch_w * self.cri_pix_branch(fake_H_branch,
|
||||
|
@ -660,8 +659,8 @@ class SRGANModel(BaseModel):
|
|||
self.optimizer_D_grad.zero_grad()
|
||||
|
||||
for var_ref, fake_H in zip(self.var_ref, self.fake_H):
|
||||
fake_H_grad = self.get_grad(fake_H)
|
||||
var_ref_grad = self.get_grad(var_ref)
|
||||
fake_H_grad = self.get_grad_nopadding(fake_H)
|
||||
var_ref_grad = self.get_grad_nopadding(var_ref)
|
||||
pred_d_real_grad = self.netD_grad(var_ref_grad)
|
||||
pred_d_fake_grad = self.netD_grad(fake_H_grad.detach()) # detach to avoid BP to G
|
||||
if self.opt['train']['gan_type'] == 'gan':
|
||||
|
|
|
@ -4,6 +4,7 @@ import torch.nn as nn
|
|||
import torch.nn.functional as F
|
||||
from models.archs import SPSR_util as B
|
||||
from .RRDBNet_arch import RRDB
|
||||
from models.archs.arch_util import ConvGnLelu, ExpansionBlock, UpconvBlock
|
||||
|
||||
|
||||
class ImageGradient(nn.Module):
|
||||
|
@ -91,7 +92,7 @@ class SPSRNet(nn.Module):
|
|||
LR_conv = B.conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode)
|
||||
|
||||
if upsample_mode == 'upconv':
|
||||
upsample_block = B.upconv_blcok
|
||||
upsample_block = B.upconv_block
|
||||
elif upsample_mode == 'pixelshuffle':
|
||||
upsample_block = B.pixelshuffle_block
|
||||
else:
|
||||
|
@ -129,7 +130,7 @@ class SPSRNet(nn.Module):
|
|||
self.b_LR_conv = B.conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode)
|
||||
|
||||
if upsample_mode == 'upconv':
|
||||
upsample_block = B.upconv_blcok
|
||||
upsample_block = B.upconv_block
|
||||
elif upsample_mode == 'pixelshuffle':
|
||||
upsample_block = B.pixelshuffle_block
|
||||
else:
|
||||
|
@ -224,3 +225,114 @@ class SPSRNet(nn.Module):
|
|||
#########
|
||||
return x_out_branch, x_out, x_grad
|
||||
|
||||
|
||||
class SPSRNetSimplified(nn.Module):
|
||||
def __init__(self, in_nc, out_nc, nf, nb, upscale=4):
|
||||
super(SPSRNetSimplified, self).__init__()
|
||||
n_upscale = int(math.log(upscale, 2))
|
||||
|
||||
# Feature branch
|
||||
self.model_fea_conv = ConvGnLelu(in_nc, nf, kernel_size=3, norm=False, activation=False)
|
||||
self.model_shortcut_blk = nn.Sequential(*[RRDB(nf, gc=32) for _ in range(nb)])
|
||||
self.feature_lr_conv = ConvGnLelu(nf, nf, kernel_size=3, norm=False, activation=False)
|
||||
self.model_upsampler = nn.Sequential(*[UpconvBlock(nf, nf, block=ConvGnLelu, norm=False, activation=False, bias=False) for _ in range(n_upscale)])
|
||||
self.feature_hr_conv1 = ConvGnLelu(nf, nf, kernel_size=3, norm=False, activation=True, bias=False)
|
||||
self.feature_hr_conv2 = ConvGnLelu(nf, nf, kernel_size=3, norm=False, activation=False, bias=False)
|
||||
|
||||
# Grad branch
|
||||
self.get_g_nopadding = ImageGradientNoPadding()
|
||||
self.b_fea_conv = ConvGnLelu(in_nc, nf, kernel_size=3, norm=False, activation=False, bias=False)
|
||||
self.b_concat_decimate_1 = ConvGnLelu(2 * nf, nf, kernel_size=1, norm=False, activation=False, bias=False)
|
||||
self.b_proc_block_1 = RRDB(nf, gc=32)
|
||||
self.b_concat_decimate_2 = ConvGnLelu(2 * nf, nf, kernel_size=1, norm=False, activation=False, bias=False)
|
||||
self.b_proc_block_2 = RRDB(nf, gc=32)
|
||||
self.b_concat_decimate_3 = ConvGnLelu(2 * nf, nf, kernel_size=1, norm=False, activation=False, bias=False)
|
||||
self.b_proc_block_3 = RRDB(nf, gc=32)
|
||||
self.b_concat_decimate_4 = ConvGnLelu(2 * nf, nf, kernel_size=1, norm=False, activation=False, bias=False)
|
||||
self.b_proc_block_4 = RRDB(nf, gc=32)
|
||||
# Upsampling
|
||||
self.grad_lr_conv = ConvGnLelu(nf, nf, kernel_size=3, norm=False, activation=True, bias=False)
|
||||
b_upsampler = nn.Sequential(*[UpconvBlock(nf, nf, block=ConvGnLelu, norm=False, activation=False, bias=False) for _ in range(n_upscale)])
|
||||
grad_hr_conv1 = ConvGnLelu(nf, nf, kernel_size=3, norm=False, activation=True, bias=False)
|
||||
grad_hr_conv2 = ConvGnLelu(nf, nf, kernel_size=3, norm=False, activation=False, bias=False)
|
||||
self.branch_upsample = B.sequential(*b_upsampler, grad_hr_conv1, grad_hr_conv2)
|
||||
# Conv used to output grad branch shortcut.
|
||||
self.grad_branch_output_conv = ConvGnLelu(nf, out_nc, kernel_size=1, norm=False, activation=False, bias=False)
|
||||
|
||||
# Conjoin branch.
|
||||
# Note: "_branch_pretrain" is a special tag used to denote parameters that get pretrained before the rest.
|
||||
self._branch_pretrain_concat = ConvGnLelu(nf * 2, nf, kernel_size=1, norm=False, activation=False, bias=False)
|
||||
self._branch_pretrain_block = RRDB(nf * 2, gc=32)
|
||||
self._branch_pretrain_HR_conv0 = ConvGnLelu(nf, nf, kernel_size=3, norm=False, activation=True, bias=False)
|
||||
self._branch_pretrain_HR_conv1 = ConvGnLelu(nf, out_nc, kernel_size=3, norm=False, activation=False, bias=False)
|
||||
|
||||
def forward(self, x):
|
||||
|
||||
x_grad = self.get_g_nopadding(x)
|
||||
x = self.model_fea_conv(x)
|
||||
|
||||
x_ori = x
|
||||
for i in range(5):
|
||||
x = self.model_shortcut_blk[i](x)
|
||||
x_fea1 = x
|
||||
|
||||
for i in range(5):
|
||||
x = self.model_shortcut_blk[i + 5](x)
|
||||
x_fea2 = x
|
||||
|
||||
for i in range(5):
|
||||
x = self.model_shortcut_blk[i + 10](x)
|
||||
x_fea3 = x
|
||||
|
||||
for i in range(5):
|
||||
x = self.model_shortcut_blk[i + 15](x)
|
||||
x_fea4 = x
|
||||
|
||||
x = self.model_shortcut_blk[20:](x)
|
||||
x = self.feature_lr_conv(x)
|
||||
|
||||
# short cut
|
||||
x = x_ori + x
|
||||
x = self.model_upsampler(x)
|
||||
x = self.feature_hr_conv1(x)
|
||||
x = self.feature_hr_conv2(x)
|
||||
|
||||
x_b_fea = self.b_fea_conv(x_grad)
|
||||
x_cat_1 = torch.cat([x_b_fea, x_fea1], dim=1)
|
||||
|
||||
x_cat_1 = self.b_concat_decimate_1(x_cat_1)
|
||||
x_cat_1 = self.b_proc_block_1(x_cat_1)
|
||||
|
||||
x_cat_2 = torch.cat([x_cat_1, x_fea2], dim=1)
|
||||
|
||||
x_cat_2 = self.b_concat_decimate_2(x_cat_2)
|
||||
x_cat_2 = self.b_proc_block_2(x_cat_2)
|
||||
|
||||
x_cat_3 = torch.cat([x_cat_2, x_fea3], dim=1)
|
||||
|
||||
x_cat_3 = self.b_concat_decimate_3(x_cat_3)
|
||||
x_cat_3 = self.b_proc_block_3(x_cat_3)
|
||||
|
||||
x_cat_4 = torch.cat([x_cat_3, x_fea4], dim=1)
|
||||
|
||||
x_cat_4 = self.b_concat_decimate_4(x_cat_4)
|
||||
x_cat_4 = self.b_proc_block_4(x_cat_4)
|
||||
|
||||
x_cat_4 = self.grad_lr_conv(x_cat_4)
|
||||
|
||||
# short cut
|
||||
x_cat_4 = x_cat_4 + x_b_fea
|
||||
x_branch = self.branch_upsample(x_cat_4)
|
||||
x_out_branch = self.grad_branch_output_conv(x_branch)
|
||||
|
||||
########
|
||||
x_branch_d = x_branch
|
||||
x__branch_pretrain_cat = torch.cat([x_branch_d, x], dim=1)
|
||||
x__branch_pretrain_cat = self._branch_pretrain_block(x__branch_pretrain_cat)
|
||||
x_out = self._branch_pretrain_concat(x__branch_pretrain_cat)
|
||||
x_out = self._branch_pretrain_HR_conv0(x_out)
|
||||
x_out = self._branch_pretrain_HR_conv1(x_out)
|
||||
|
||||
#########
|
||||
return x_out_branch, x_out, x_grad
|
||||
|
||||
|
|
|
@ -153,8 +153,8 @@ def pixelshuffle_block(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1,
|
|||
return sequential(conv, pixel_shuffle, n, a)
|
||||
|
||||
|
||||
def upconv_blcok(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True, \
|
||||
pad_type='zero', norm_type=None, act_type='relu', mode='nearest'):
|
||||
def upconv_block(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True, \
|
||||
pad_type='zero', norm_type=None, act_type='relu', mode='nearest'):
|
||||
# Up conv
|
||||
# described in https://distill.pub/2016/deconv-checkerboard/
|
||||
upsample = nn.Upsample(scale_factor=upscale_factor, mode=mode)
|
||||
|
|
|
@ -388,4 +388,33 @@ class ExpansionBlock(nn.Module):
|
|||
x = self.decimate(x)
|
||||
p = self.process_passthrough(passthrough)
|
||||
x = self.conjoin(torch.cat([x, p], dim=1))
|
||||
return self.process(x)
|
||||
|
||||
|
||||
# Similar to ExpansionBlock but does not upsample.
|
||||
class ConjoinBlock(nn.Module):
|
||||
def __init__(self, filters_in, filters_out=None, block=ConvGnSilu, norm=True):
|
||||
super(ConjoinBlock, self).__init__()
|
||||
if filters_out is None:
|
||||
filters_out = filters_in
|
||||
self.decimate = block(filters_in*2, filters_out, kernel_size=1, bias=False, activation=False, norm=norm)
|
||||
self.process = block(filters_out, filters_out, kernel_size=3, bias=False, activation=True, norm=norm)
|
||||
|
||||
# input is the feature signal with shape (b, f, w, h)
|
||||
# passthrough is the structure signal with shape (b, f/2, w*2, h*2)
|
||||
# output is conjoined upsample with shape (b, f/2, w*2, h*2)
|
||||
def forward(self, input, passthrough):
|
||||
x = torch.cat([input, passthrough], dim=1)
|
||||
x = self.decimate(x)
|
||||
return self.process(x)
|
||||
|
||||
|
||||
# Basic convolutional upsampling block that uses interpolate.
|
||||
class UpconvBlock(nn.Module):
|
||||
def __init__(self, filters_in, filters_out=None, block=ConvGnSilu, norm=True, activation=True, bias=False):
|
||||
super(UpconvBlock, self).__init__()
|
||||
self.process = block(filters_out, filters_out, kernel_size=3, bias=bias, activation=activation, norm=norm)
|
||||
|
||||
def forward(self, x):
|
||||
x = F.interpolate(x, scale_factor=2, mode="nearest")
|
||||
return self.process(x)
|
|
@ -105,6 +105,9 @@ def define_G(opt, net_key='network_G'):
|
|||
act_type='leakyrelu', mode=opt_net['mode'], upsample_mode='upconv')
|
||||
if opt['is_train']:
|
||||
arch_util.initialize_weights(netG, scale=.1)
|
||||
elif which_model == 'spsr_net_improved':
|
||||
netG = spsr.SPSRNetSimplified(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'],
|
||||
nb=opt_net['nb'], upscale=opt_net['scale'])
|
||||
|
||||
# image corruption
|
||||
elif which_model == 'HighToLowResNet':
|
||||
|
|
Loading…
Reference in New Issue
Block a user