From db52bec4ab51b7109ce7255f9f8e849ba61f79b3 Mon Sep 17 00:00:00 2001 From: James Betker Date: Mon, 28 Sep 2020 22:08:31 -0600 Subject: [PATCH] spsr6 This is meant to be a variant of SPSR5 that harkens back to the simpler earlier architectures that do not have embeddings or ref_ inputs, but do have deep multiplexers. It does, however, use some of the new conjoin mechanisms. --- codes/models/archs/SPSR_arch.py | 126 +++++++++++++++++- .../archs/SwitchedResidualGenerator_arch.py | 42 ++++++ codes/models/networks.py | 5 + 3 files changed, 172 insertions(+), 1 deletion(-) diff --git a/codes/models/archs/SPSR_arch.py b/codes/models/archs/SPSR_arch.py index ba628274..1c8e7d7f 100644 --- a/codes/models/archs/SPSR_arch.py +++ b/codes/models/archs/SPSR_arch.py @@ -5,9 +5,10 @@ import torch.nn.functional as F from models.archs import SPSR_util as B from .RRDBNet_arch import RRDB from models.archs.arch_util import ConvGnLelu, UpconvBlock, ConjoinBlock, ConvGnSilu, MultiConvBlock, ReferenceJoinBlock -from models.archs.SwitchedResidualGenerator_arch import ConvBasisMultiplexer, ConfigurableSwitchComputer, ReferencingConvMultiplexer, ReferenceImageBranch, AdaInConvBlock, ProcessingBranchWithStochasticity, EmbeddingMultiplexer, QueryKeyMultiplexer +from models.archs.SwitchedResidualGenerator_arch import ConvBasisMultiplexer, ConfigurableSwitchComputer, ReferencingConvMultiplexer, ReferenceImageBranch, AdaInConvBlock, ProcessingBranchWithStochasticity, EmbeddingMultiplexer, QueryKeyMultiplexer, QueryKeyPyramidMultiplexer from switched_conv_util import save_attention_to_image_rgb from switched_conv import compute_attention_specificity +from torch.utils.checkpoint import checkpoint import functools import os import torchvision @@ -812,3 +813,126 @@ class Spsr5(nn.Module): return val +class Spsr6(nn.Module): + def __init__(self, in_nc, out_nc, nf, xforms=8, upscale=4, multiplexer_reductions=3, init_temperature=10): + super(Spsr6, self).__init__() + n_upscale = int(math.log(upscale, 2)) + + # switch options + transformation_filters = nf + self.transformation_counts = xforms + multiplx_fn = functools.partial(QueryKeyPyramidMultiplexer, transformation_filters, reductions=multiplexer_reductions) + transform_fn = functools.partial(MultiConvBlock, transformation_filters, int(transformation_filters * 1.5), + transformation_filters, kernel_size=3, depth=3, + weight_init_factor=.1) + + # Feature branch + self.model_fea_conv = ConvGnLelu(in_nc, nf, kernel_size=7, norm=False, activation=False) + self.sw1 = ConfigurableSwitchComputer(transformation_filters, multiplx_fn, + pre_transform_block=None, transform_block=transform_fn, + attention_norm=True, + transform_count=self.transformation_counts, init_temp=init_temperature, + add_scalable_noise_to_transforms=False, feed_transforms_into_multiplexer=True) + self.sw2 = ConfigurableSwitchComputer(transformation_filters, multiplx_fn, + pre_transform_block=None, transform_block=transform_fn, + attention_norm=True, + transform_count=self.transformation_counts, init_temp=init_temperature, + add_scalable_noise_to_transforms=False, feed_transforms_into_multiplexer=True) + self.feature_lr_conv = ConvGnLelu(nf, nf, kernel_size=3, norm=True, activation=False) + self.feature_lr_conv2 = ConvGnLelu(nf, nf, kernel_size=3, norm=False, activation=False, bias=False) + + # Grad branch. Note - groupnorm on this branch is REALLY bad. Avoid it like the plague. + self.get_g_nopadding = ImageGradientNoPadding() + self.grad_conv = ConvGnLelu(in_nc, nf, kernel_size=3, norm=False, activation=False, bias=False) + self.grad_ref_join = ReferenceJoinBlock(nf, residual_weight_init_factor=.3, final_norm=False) + self.sw_grad = ConfigurableSwitchComputer(transformation_filters, multiplx_fn, + pre_transform_block=None, transform_block=transform_fn, + attention_norm=True, + transform_count=self.transformation_counts // 2, init_temp=init_temperature, + add_scalable_noise_to_transforms=False, feed_transforms_into_multiplexer=True) + self.grad_lr_conv = ConvGnLelu(nf, nf, kernel_size=3, norm=False, activation=True, bias=True) + self.grad_lr_conv2 = ConvGnLelu(nf, nf, kernel_size=1, norm=False, activation=True, bias=True) + self.upsample_grad = nn.Sequential(*[UpconvBlock(nf, nf, block=ConvGnLelu, norm=False, activation=True, bias=False) for _ in range(n_upscale)]) + self.grad_branch_output_conv = ConvGnLelu(nf, out_nc, kernel_size=1, norm=False, activation=False, bias=True) + + # Join branch (grad+fea) + self.noise_ref_join_conjoin = ReferenceJoinBlock(nf, residual_weight_init_factor=.1) + self.conjoin_ref_join = ReferenceJoinBlock(nf, residual_weight_init_factor=.3) + self.conjoin_sw = ConfigurableSwitchComputer(transformation_filters, multiplx_fn, + pre_transform_block=None, transform_block=transform_fn, + attention_norm=True, + transform_count=self.transformation_counts, init_temp=init_temperature, + add_scalable_noise_to_transforms=False, feed_transforms_into_multiplexer=True) + self.final_lr_conv = ConvGnLelu(nf, nf, kernel_size=3, norm=False, activation=True, bias=True) + self.upsample = nn.Sequential(*[UpconvBlock(nf, nf, block=ConvGnLelu, norm=False, activation=True, bias=True) for _ in range(n_upscale)]) + self.final_hr_conv1 = ConvGnLelu(nf, nf, kernel_size=3, norm=False, activation=False, bias=True) + self.final_hr_conv2 = ConvGnLelu(nf, out_nc, kernel_size=1, norm=False, activation=False, bias=False) + self.switches = [self.sw1, self.sw2, self.sw_grad, self.conjoin_sw] + self.attentions = None + self.init_temperature = init_temperature + self.final_temperature_step = 10000 + self.lr = None + + def forward(self, x): + # The attention_maps debugger outputs . Save that here. + self.lr = x.detach().cpu() + + x_grad = self.get_g_nopadding(x) + + x = self.model_fea_conv(x) + x1 = x + x1, a1 = self.sw1(x1, True, identity=x) + + x2 = x1 + x2, a2 = self.sw2(x2, True, identity=x1) + + x_grad = self.grad_conv(x_grad) + x_grad_identity = x_grad + x_grad, grad_fea_std = self.grad_ref_join(x_grad, x1) + x_grad, a3 = self.sw_grad(x_grad, True, identity=x_grad_identity) + x_grad = self.grad_lr_conv(x_grad) + x_grad = self.grad_lr_conv2(x_grad) + x_grad_out = self.upsample_grad(x_grad) + x_grad_out = self.grad_branch_output_conv(x_grad_out) + + x_out = x2 + x_out, fea_grad_std = self.conjoin_ref_join(x_out, x_grad) + x_out, a4 = self.conjoin_sw(x_out, True, identity=x2) + x_out = self.final_lr_conv(x_out) + x_out = checkpoint(self.upsample, x_out) + x_out = checkpoint(self.final_hr_conv1, x_out) + x_out = self.final_hr_conv2(x_out) + + self.attentions = [a1, a2, a3, a4] + self.grad_fea_std = grad_fea_std.detach().cpu() + self.fea_grad_std = fea_grad_std.detach().cpu() + return x_grad_out, x_out, x_grad + + def set_temperature(self, temp): + [sw.set_temperature(temp) for sw in self.switches] + + def update_for_step(self, step, experiments_path='.'): + if self.attentions: + temp = max(1, 1 + self.init_temperature * + (self.final_temperature_step - step) / self.final_temperature_step) + self.set_temperature(temp) + if step % 500 == 0: + output_path = os.path.join(experiments_path, "attention_maps") + prefix = "amap_%i_a%i_%%i.png" + [save_attention_to_image_rgb(output_path, self.attentions[i], self.transformation_counts, prefix % (step, i), step, output_mag=False) for i in range(len(self.attentions))] + torchvision.utils.save_image(self.lr, os.path.join(experiments_path, "attention_maps", "amap_%i_base_image.png" % (step,))) + + def get_debug_values(self, step, net_name): + temp = self.switches[0].switch.temperature + mean_hists = [compute_attention_specificity(att, 2) for att in self.attentions] + means = [i[0] for i in mean_hists] + hists = [i[1].clone().detach().cpu().flatten() for i in mean_hists] + val = {"switch_temperature": temp, + "grad_branch_feat_intg_std_dev": self.grad_fea_std, + "conjoin_branch_grad_intg_std_dev": self.fea_grad_std} + for i in range(len(means)): + val["switch_%i_specificity" % (i,)] = means[i] + val["switch_%i_histogram" % (i,)] = hists[i] + return val + + diff --git a/codes/models/archs/SwitchedResidualGenerator_arch.py b/codes/models/archs/SwitchedResidualGenerator_arch.py index f9bf0799..40ad3930 100644 --- a/codes/models/archs/SwitchedResidualGenerator_arch.py +++ b/codes/models/archs/SwitchedResidualGenerator_arch.py @@ -571,7 +571,49 @@ class QueryKeyMultiplexer(nn.Module): v = self.cbl2(v) return v.view(b, t, h, w) + +class QueryKeyPyramidMultiplexer(nn.Module): + def __init__(self, nf, multiplexer_channels, reductions=3): + super(QueryKeyPyramidMultiplexer, self).__init__() + + # Blocks used to create the query + self.input_process = ConvGnSilu(nf, nf, activation=True, norm=False, bias=True) + self.reduction_blocks = nn.ModuleList([HalvingProcessingBlock(nf * 2 ** i) for i in range(reductions)]) + reduction_filters = nf * 2 ** reductions + self.processing_blocks = nn.Sequential(OrderedDict([('block%i' % (i,), ConvGnSilu(reduction_filters, reduction_filters, kernel_size=1, norm=True, bias=False)) for i in range(3)])) + self.expansion_blocks = nn.ModuleList([ExpansionBlock2(reduction_filters // (2 ** i)) for i in range(reductions)]) + + # Blocks used to create the key + self.key_process = ConvGnSilu(nf, nf, kernel_size=1, activation=True, norm=False, bias=True) + + # Postprocessing blocks. + self.query_key_combine = ConvGnSilu(nf*2, nf, kernel_size=3, activation=True, norm=False, bias=False) + self.cbl0 = ConvGnSilu(nf, nf, kernel_size=3, activation=True, norm=True, bias=False) + self.cbl1 = ConvGnSilu(nf, nf // 2, kernel_size=1, norm=True, bias=False, num_groups=4) + self.cbl2 = ConvGnSilu(nf // 2, 1, kernel_size=1, norm=False, bias=False) + + def forward(self, x, transformations): + q = self.input_process(x) + reduction_identities = [] + for b in self.reduction_blocks: + reduction_identities.append(q) + q = b(q) + q = self.processing_blocks(q) + for i, b in enumerate(self.expansion_blocks): + q = b(q, reduction_identities[-i - 1]) + + b, t, f, h, w = transformations.shape + k = transformations.view(b * t, f, h, w) + k = self.key_process(k) + + q = q.view(b, 1, f, h, w).repeat(1, t, 1, 1, 1).view(b * t, f, h, w) + v = self.query_key_combine(torch.cat([q, k], dim=1)) + v = self.cbl0(v) + v = self.cbl1(v) + v = self.cbl2(v) + + return v.view(b, t, h, w) if __name__ == '__main__': bb = BackboneEncoder(64) diff --git a/codes/models/networks.py b/codes/models/networks.py index f06c87c0..1dfb8600 100644 --- a/codes/models/networks.py +++ b/codes/models/networks.py @@ -73,6 +73,11 @@ def define_G(opt, net_key='network_G', scale=None): netG = spsr.Spsr5(in_nc=3, out_nc=3, nf=opt_net['nf'], xforms=xforms, upscale=opt_net['scale'], multiplexer_reductions=opt_net['multiplexer_reductions'] if 'multiplexer_reductions' in opt_net.keys() else 2, init_temperature=opt_net['temperature'] if 'temperature' in opt_net.keys() else 10) + elif which_model == "spsr6": + xforms = opt_net['num_transforms'] if 'num_transforms' in opt_net.keys() else 8 + netG = spsr.Spsr6(in_nc=3, out_nc=3, nf=opt_net['nf'], xforms=xforms, upscale=opt_net['scale'], + multiplexer_reductions=opt_net['multiplexer_reductions'] if 'multiplexer_reductions' in opt_net.keys() else 3, + init_temperature=opt_net['temperature'] if 'temperature' in opt_net.keys() else 10) elif which_model == "ssgr1": xforms = opt_net['num_transforms'] if 'num_transforms' in opt_net.keys() else 8 netG = ssg.SSGr1(in_nc=3, out_nc=3, nf=opt_net['nf'], xforms=xforms, upscale=opt_net['scale'],