From 9fee1cec713b0392ca567dc9565ce15ca7064205 Mon Sep 17 00:00:00 2001 From: James Betker Date: Tue, 29 Sep 2020 17:19:59 -0600 Subject: [PATCH] spsr7 adjustments --- codes/models/archs/SPSR_arch.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/codes/models/archs/SPSR_arch.py b/codes/models/archs/SPSR_arch.py index 3b8f2d67..02a2415f 100644 --- a/codes/models/archs/SPSR_arch.py +++ b/codes/models/archs/SPSR_arch.py @@ -528,16 +528,16 @@ class Spsr7(nn.Module): transform_count=self.transformation_counts, init_temp=init_temperature, add_scalable_noise_to_transforms=False, feed_transforms_into_multiplexer=True) self.sw1_out = nn.Sequential(ConvGnLelu(nf, nf, kernel_size=3, norm=False, activation=True), - ConvGnLelu(nf, 3, kernel_size=1, norm=False, activation=False, bias=True)) + ConvGnLelu(nf, 3, kernel_size=1, norm=False, activation=False)) self.sw2 = ConfigurableSwitchComputer(transformation_filters, multiplx_fn, pre_transform_block=None, transform_block=transform_fn, attention_norm=True, transform_count=self.transformation_counts, init_temp=init_temperature, add_scalable_noise_to_transforms=False, feed_transforms_into_multiplexer=True) - self.feature_lr_conv = ConvGnLelu(nf, nf, kernel_size=3, norm=True, activation=False) + self.feature_lr_conv = ConvGnLelu(nf, nf, kernel_size=3, norm=True, activation=False, bias=False) self.feature_lr_conv2 = ConvGnLelu(nf, nf, kernel_size=3, norm=False, activation=False, bias=False) self.sw2_out = nn.Sequential(ConvGnLelu(nf, nf, kernel_size=3, norm=False, activation=True), - ConvGnLelu(nf, 3, kernel_size=1, norm=False, activation=False, bias=True)) + ConvGnLelu(nf, 3, kernel_size=1, norm=False, activation=False)) # Grad branch. Note - groupnorm on this branch is REALLY bad. Avoid it like the plague. self.get_g_nopadding = ImageGradientNoPadding() @@ -587,6 +587,7 @@ class Spsr7(nn.Module): x2 = x1 x2, a2 = self.sw2(x2, True, identity=x1, att_in=(x2, ref_embedding)) + x2 = self.feature_lr_conv2(self.feature_lr_conv(x2)) s2out = self.sw2_out(x2) x_grad = self.grad_conv(x_grad)