Back to pyramids, no rrdb
This commit is contained in:
parent
42a97de756
commit
b4136d766a
|
@ -6,7 +6,7 @@ from models.archs.arch_util import ConvBnLelu, ConvGnLelu, ExpansionBlock, ConvG
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F
|
||||||
from models.archs.SwitchedResidualGenerator_arch import gather_2d
|
from models.archs.SwitchedResidualGenerator_arch import gather_2d
|
||||||
from models.archs.pyramid_arch import Pyramid
|
from models.archs.pyramid_arch import Pyramid
|
||||||
from utils.util import checkpoint, sequential_checkpoint
|
from utils.util import checkpoint
|
||||||
|
|
||||||
|
|
||||||
class Discriminator_VGG_128(nn.Module):
|
class Discriminator_VGG_128(nn.Module):
|
||||||
|
@ -662,24 +662,22 @@ class SingleImageQualityEstimator(nn.Module):
|
||||||
return fea
|
return fea
|
||||||
|
|
||||||
|
|
||||||
class RRDBDiscriminator(nn.Module):
|
class PyramidDiscriminator(nn.Module):
|
||||||
def __init__(self, in_nc, nf, block=ConvGnLelu):
|
def __init__(self, in_nc, nf, block=ConvGnLelu):
|
||||||
super(RRDBDiscriminator, self).__init__()
|
super(PyramidDiscriminator, self).__init__()
|
||||||
self.initial_conv = block(in_nc, nf, kernel_size=3, stride=2, bias=True, norm=False, activation=True)
|
self.initial_conv = block(in_nc, nf, kernel_size=3, stride=2, bias=True, norm=False, activation=True)
|
||||||
self.trunk = nn.ModuleList(*[RRDBWithBypass(nf),
|
self.top_proc = nn.Sequential(*[ConvGnLelu(nf, nf, kernel_size=3, stride=2, bias=False, norm=True, activation=True)])
|
||||||
RRDBWithBypass(nf),
|
self.pyramid = Pyramid(nf, depth=3, processing_convs_per_layer=2, processing_at_point=2,
|
||||||
RRDBWithBypass(nf),
|
scale_per_level=1.5, norm=True, return_outlevels=False)
|
||||||
RRDBWithBypass(nf),
|
self.bottom_proc = nn.Sequential(*[
|
||||||
RRDBWithBypass(nf)])
|
ConvGnLelu(nf, nf // 2, kernel_size=1, activation=True, norm=True, bias=True),
|
||||||
|
ConvGnLelu(nf // 2, nf // 4, kernel_size=1, activation=True, norm=True, bias=True),
|
||||||
self.tail = nn.Sequential(*[
|
ConvGnLelu(nf // 4, 1, activation=False, norm=False, bias=True)])
|
||||||
ConvGnLelu(nf, nf // 2, kernel_size=1, activation=True, norm=True, bias=True),
|
|
||||||
ConvGnLelu(nf // 2, nf // 4, kernel_size=1, activation=True, norm=True, bias=True),
|
|
||||||
ConvGnLelu(nf // 4, 1, activation=False, norm=False, bias=True)])
|
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
fea = self.initial_conv(x)
|
fea = self.initial_conv(x)
|
||||||
fea = sequential_checkpoint(self.top_proc, 2, fea)
|
fea = checkpoint(self.top_proc, fea)
|
||||||
fea = checkpoint(self.tail, fea)
|
fea = checkpoint(self.pyramid, fea)
|
||||||
|
fea = checkpoint(self.bottom_proc, fea)
|
||||||
return torch.mean(fea, dim=[1,2,3])
|
return torch.mean(fea, dim=[1,2,3])
|
||||||
|
|
||||||
|
|
|
@ -187,8 +187,8 @@ def define_D_net(opt_net, img_sz=None, wrap=False):
|
||||||
netD = SRGAN_arch.RefDiscriminatorVgg128(in_nc=opt_net['in_nc'], nf=opt_net['nf'], input_img_factor=img_sz / 128)
|
netD = SRGAN_arch.RefDiscriminatorVgg128(in_nc=opt_net['in_nc'], nf=opt_net['nf'], input_img_factor=img_sz / 128)
|
||||||
elif which_model == "psnr_approximator":
|
elif which_model == "psnr_approximator":
|
||||||
netD = SRGAN_arch.PsnrApproximator(nf=opt_net['nf'], input_img_factor=img_sz / 128)
|
netD = SRGAN_arch.PsnrApproximator(nf=opt_net['nf'], input_img_factor=img_sz / 128)
|
||||||
elif which_model == "rrdb_disc":
|
elif which_model == "pyramid_disc":
|
||||||
netD = SRGAN_arch.RRDBDiscriminator(in_nc=3, nf=opt_net['nf'])
|
netD = SRGAN_arch.PyramidDiscriminator(in_nc=3, nf=opt_net['nf'])
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError('Discriminator model [{:s}] not recognized'.format(which_model))
|
raise NotImplementedError('Discriminator model [{:s}] not recognized'.format(which_model))
|
||||||
return netD
|
return netD
|
||||||
|
|
Loading…
Reference in New Issue
Block a user