From 8a4eb8241db5746a47f2d660e1e5e2fb63dc1387 Mon Sep 17 00:00:00 2001 From: James Betker Date: Tue, 7 Jul 2020 13:46:40 -0600 Subject: [PATCH] SRG3 work Operates on top of a pre-trained SpineNET backbone (trained on CoCo 2017 with RetinaNet) This variant is extremely shallow. --- .../archs/SwitchedResidualGenerator_arch.py | 112 ++++++++++-------- codes/models/archs/spinenet_arch.py | 61 +++++++++- codes/models/networks.py | 7 +- codes/train.py | 2 +- codes/utils/numeric_stability.py | 6 + 5 files changed, 127 insertions(+), 61 deletions(-) diff --git a/codes/models/archs/SwitchedResidualGenerator_arch.py b/codes/models/archs/SwitchedResidualGenerator_arch.py index c9bb9ea2..3ebb6ea5 100644 --- a/codes/models/archs/SwitchedResidualGenerator_arch.py +++ b/codes/models/archs/SwitchedResidualGenerator_arch.py @@ -78,20 +78,36 @@ class ConvBasisMultiplexer(nn.Module): return x -class SpineNetMultiplexer(nn.Module): - def __init__(self, input_channels, transform_count): - super(SpineNetMultiplexer, self).__init__() - self.backbone = SpineNet('49', in_channels=input_channels) - self.rdc1 = ConvBnSilu(256, 128, kernel_size=3, bias=False) - self.rdc2 = ConvBnSilu(128, 64, kernel_size=3, bias=False) - self.rdc3 = ConvBnSilu(64, transform_count, bias=False, bn=False, relu=False) +class CachedBackboneWrapper: + def __init__(self, backbone: nn.Module): + self.backbone = backbone + + def __call__(self, *args): + self.cache = self.backbone(*args) + return self.cache + + def get_forward_result(self): + return self.cache + + +class BackboneMultiplexer(nn.Module): + def __init__(self, backbone: CachedBackboneWrapper, transform_count): + super(BackboneMultiplexer, self).__init__() + self.backbone = backbone + self.proc = nn.Sequential(ConvBnSilu(256, 256, kernel_size=3, bias=True), + ConvBnSilu(256, 256, kernel_size=3, bias=False)) + self.up1 = nn.Sequential(ConvBnSilu(256, 128, kernel_size=3, bias=False, bn=False, silu=False), + ConvBnSilu(128, 128, kernel_size=3, bias=False)) + self.up2 = nn.Sequential(ConvBnSilu(128, 64, kernel_size=3, bias=False, bn=False, silu=False), + ConvBnSilu(64, 64, kernel_size=3, bias=False)) + self.final = ConvBnSilu(64, transform_count, bias=False, bn=False, silu=False) def forward(self, x): - spine = self.backbone(x) - feat = self.rdc1(spine[0]) - feat = self.rdc2(feat) - feat = self.rdc3(feat) - return feat + spine = self.backbone.get_forward_result() + feat = self.proc(spine[0]) + feat = self.up1(F.interpolate(feat, scale_factor=2, mode="nearest")) + feat = self.up2(F.interpolate(feat, scale_factor=2, mode="nearest")) + return self.final(feat) class ConfigurableSwitchComputer(nn.Module): @@ -233,55 +249,56 @@ class Interpolate(nn.Module): class ConfigurableSwitchedResidualGenerator3(nn.Module): - def __init__(self, trans_counts, - trans_kernel_sizes, - trans_layers, transformation_filters, initial_temp=20, final_temperature_step=50000, + def __init__(self, base_filters, trans_count, initial_temp=20, final_temperature_step=50000, heightened_temp_min=1, - heightened_final_step=50000, upsample_factor=1, enable_negative_transforms=False, - add_scalable_noise_to_transforms=False): + heightened_final_step=50000, upsample_factor=4): super(ConfigurableSwitchedResidualGenerator3, self).__init__() - switches = [] - for trans_count, kernel, layers in zip(trans_counts, trans_kernel_sizes, trans_layers): - multiplx_fn = functools.partial(SpineNetMultiplexer, 3) - switches.append(ConfigurableSwitchComputer(base_filters=3, multiplexer_net=multiplx_fn, - pre_transform_block=functools.partial(nn.Sequential, - ConvBnLelu(3, transformation_filters, kernel_size=1, stride=4, bn=False, lelu=False, bias=False), - ResidualDenseBlock_5C( - transformation_filters), - ResidualDenseBlock_5C( - transformation_filters)), - transform_block=functools.partial(nn.Sequential, - ResidualDenseBlock_5C(transformation_filters), - Interpolate(4), - ConvBnLelu(transformation_filters, transformation_filters // 2, kernel_size=3, bias=False, bn=False), - ConvBnLelu(transformation_filters // 2, 3, kernel_size=1, bias=False, bn=False, lelu=False)), - transform_count=trans_count, init_temp=initial_temp, - enable_negative_transforms=enable_negative_transforms, - add_scalable_noise_to_transforms=add_scalable_noise_to_transforms, - init_scalar=.01)) + self.initial_conv = ConvBnLelu(3, base_filters, bn=False, lelu=False, bias=True) + self.sw_conv = ConvBnLelu(base_filters, base_filters, lelu=False, bias=True) + self.upconv1 = ConvBnLelu(base_filters, base_filters, bn=False, bias=True) + self.upconv2 = ConvBnLelu(base_filters, base_filters, bn=False, bias=True) + self.hr_conv = ConvBnLelu(base_filters, base_filters, bn=False, bias=True) + self.final_conv = ConvBnLelu(base_filters, 3, bn=False, lelu=False, bias=True) - self.switches = nn.ModuleList(switches) - self.transformation_counts = trans_counts + self.backbone = SpineNet('49', in_channels=3, use_input_norm=True) + for p in self.backbone.parameters(recurse=True): + p.requires_grad = False + self.backbone_wrapper = CachedBackboneWrapper(self.backbone) + multiplx_fn = functools.partial(BackboneMultiplexer, self.backbone_wrapper) + pretransform_fn = functools.partial(nn.Sequential, ConvBnLelu(base_filters, base_filters, kernel_size=3, bn=False, lelu=False, bias=False)) + transform_fn = functools.partial(MultiConvBlock, base_filters, int(base_filters * 1.5), base_filters, kernel_size=3, depth=4) + self.switch = ConfigurableSwitchComputer(base_filters, multiplx_fn, pretransform_fn, transform_fn, trans_count, init_temp=initial_temp, + enable_negative_transforms=False, add_scalable_noise_to_transforms=True, init_scalar=.1) + + self.transformation_counts = trans_count self.init_temperature = initial_temp self.final_temperature_step = final_temperature_step self.heightened_temp_min = heightened_temp_min self.heightened_final_step = heightened_final_step self.attentions = None self.upsample_factor = upsample_factor + self.backbone_forward = None + + def get_forward_results(self): + return self.backbone_forward def forward(self, x): - if self.upsample_factor > 1: - x = F.interpolate(x, scale_factor=self.upsample_factor, mode="nearest") + self.backbone_forward = self.backbone_wrapper(F.interpolate(x, scale_factor=2, mode="nearest")) + + x = self.initial_conv(x) self.attentions = [] - for i, sw in enumerate(self.switches): - x, att = sw.forward(x, True) - self.attentions.append(att) + x, att = self.switch(x, output_attention_weights=True) + self.attentions.append(att) - return x, + x = self.upconv1(F.interpolate(x, scale_factor=2, mode="nearest")) + if self.upsample_factor > 2: + x = F.interpolate(x, scale_factor=2, mode="nearest") + x = self.upconv2(x) + return self.final_conv(self.hr_conv(x)), def set_temperature(self, temp): - [sw.set_temperature(temp) for sw in self.switches] + self.switch.set_temperature(temp) def update_for_step(self, step, experiments_path='.'): if self.attentions: @@ -299,11 +316,10 @@ class ConfigurableSwitchedResidualGenerator3(nn.Module): temp = 1 / temp self.set_temperature(temp) if step % 50 == 0: - [save_attention_to_image(experiments_path, self.attentions[i], self.transformation_counts[i], step, - "a%i" % (i + 1,)) for i in range(len(self.switches))] + save_attention_to_image(experiments_path, self.attentions[0], self.transformation_counts, step, "a%i" % (1,), l_mult=10) def get_debug_values(self, step): - temp = self.switches[0].switch.temperature + temp = self.switch.switch.temperature mean_hists = [compute_attention_specificity(att, 2) for att in self.attentions] means = [i[0] for i in mean_hists] hists = [i[1].clone().detach().cpu().flatten() for i in mean_hists] diff --git a/codes/models/archs/spinenet_arch.py b/codes/models/archs/spinenet_arch.py index 7fcd9b17..2e30b82b 100644 --- a/codes/models/archs/spinenet_arch.py +++ b/codes/models/archs/spinenet_arch.py @@ -1,12 +1,48 @@ # Taken and modified from https://github.com/lucifer443/SpineNet-Pytorch/blob/master/mmdet/models/backbones/spinenet.py +import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.init import kaiming_normal from torchvision.models.resnet import BasicBlock, Bottleneck from torch.nn.modules.batchnorm import _BatchNorm -from models.archs.arch_util import ConvBnRelu + + +''' Convenience class with Conv->BN->ReLU. Includes weight initialization and auto-padding for standard + kernel sizes. ''' +class ConvBnRelu(nn.Module): + def __init__(self, filters_in, filters_out, kernel_size=3, stride=1, relu=True, bn=True, bias=True): + super(ConvBnRelu, self).__init__() + padding_map = {1: 0, 3: 1, 5: 2, 7: 3} + assert kernel_size in padding_map.keys() + self.conv = nn.Conv2d(filters_in, filters_out, kernel_size, stride, padding_map[kernel_size], bias=bias) + if bn: + self.bn = nn.BatchNorm2d(filters_out) + else: + self.bn = None + if relu: + self.relu = nn.ReLU() + else: + self.relu = None + + # Init params. + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu' if self.relu else 'linear') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def forward(self, x): + x = self.conv(x) + if self.bn: + x = self.bn(x) + if self.relu: + return self.relu(x) + else: + return x + def constant_init(module, val, bias=0): if hasattr(module, 'weight') and module.weight is not None: @@ -213,7 +249,11 @@ class SpineNet(nn.Module): arch, in_channels=3, output_level=[3, 4, 5, 6, 7], - zero_init_residual=True): + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + zero_init_residual=True, + activation='relu', + use_input_norm=False): super(SpineNet, self).__init__() self._block_specs = build_block_specs()[2:] self._endpoints_num_filters = SCALING_MAP[arch]['endpoints_num_filters'] @@ -225,6 +265,7 @@ class SpineNet(nn.Module): self.zero_init_residual = zero_init_residual assert min(output_level) > 2 and max(output_level) < 8, "Output level out of range" self.output_level = output_level + self.use_input_norm = use_input_norm self._make_stem_layer(in_channels) self._make_scale_permuted_network() @@ -237,7 +278,8 @@ class SpineNet(nn.Module): in_channels, 64, kernel_size=7, - stride=2) # Original paper had stride=2 and a maxpool after. + stride=2) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) # Build the initial level 2 blocks. self.init_block1 = make_res_layer( @@ -286,12 +328,19 @@ class SpineNet(nn.Module): if self.zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): - constant_init(m.norm3, 0) + constant_init(m.bn3, 0) elif isinstance(m, BasicBlock): - constant_init(m.norm2, 0) + constant_init(m.bn2, 0) def forward(self, input): - feat = self.conv1(input) + # Spinenet is pretrained on the standard pytorch input norm. The image will need to + # be normalized before feeding it through. + if self.use_input_norm: + mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(input.device) + std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(input.device) + input = (input - mean) / std + + feat = self.maxpool(self.conv1(input)) feat1 = self.init_block1(feat) feat2 = self.init_block2(feat1) block_feats = [feat1, feat2] diff --git a/codes/models/networks.py b/codes/models/networks.py index ecb61a83..23270e72 100644 --- a/codes/models/networks.py +++ b/codes/models/networks.py @@ -68,12 +68,7 @@ def define_G(opt, net_key='network_G'): heightened_temp_min=opt_net['heightened_temp_min'], heightened_final_step=opt_net['heightened_final_step'], upsample_factor=scale, add_scalable_noise_to_transforms=opt_net['add_noise']) elif which_model == "ConfigurableSwitchedResidualGenerator3": - netG = SwitchedGen_arch.ConfigurableSwitchedResidualGenerator3(trans_counts=opt_net['trans_counts'], - trans_kernel_sizes=opt_net['trans_kernel_sizes'], trans_layers=opt_net['trans_layers'], - transformation_filters=opt_net['transformation_filters'], - initial_temp=opt_net['temperature'], final_temperature_step=opt_net['temperature_final_step'], - heightened_temp_min=opt_net['heightened_temp_min'], heightened_final_step=opt_net['heightened_final_step'], - upsample_factor=scale, add_scalable_noise_to_transforms=opt_net['add_noise']) + netG = SwitchedGen_arch.ConfigurableSwitchedResidualGenerator3(base_filters=opt_net['base_filters'], trans_count=opt_net['trans_count']) elif which_model == "NestedSwitchGenerator": netG = ng.NestedSwitchedGenerator(switch_filters=opt_net['switch_filters'], switch_reductions=opt_net['switch_reductions'], diff --git a/codes/train.py b/codes/train.py index 15c525f5..e429b815 100644 --- a/codes/train.py +++ b/codes/train.py @@ -33,7 +33,7 @@ def init_dist(backend='nccl', **kwargs): def main(): #### options parser = argparse.ArgumentParser() - parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='../options/train_div2k_rrdb.yml') + parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='../options/train_div2k_srg3.yml') parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) diff --git a/codes/utils/numeric_stability.py b/codes/utils/numeric_stability.py index 6bcd4a3c..28b3c0d6 100644 --- a/codes/utils/numeric_stability.py +++ b/codes/utils/numeric_stability.py @@ -97,6 +97,7 @@ if __name__ == "__main__": torch.randn(1, 3, 64, 64), device='cuda') ''' + ''' test_stability(functools.partial(srg.ConfigurableSwitchedResidualGenerator2, switch_filters=[32,32,32,32], switch_growths=[16,16,16,16], @@ -110,6 +111,7 @@ if __name__ == "__main__": torch.randn(1, 3, 64, 64), device='cuda') ''' + ''' test_stability(functools.partial(srg1.ConfigurableSwitchedResidualGenerator, switch_filters=[32,32,32,32], switch_growths=[16,16,16,16], @@ -123,3 +125,7 @@ if __name__ == "__main__": torch.randn(1, 3, 64, 64), device='cuda') ''' + test_stability(functools.partial(srg.ConfigurableSwitchedResidualGenerator3, + 64, 16), + torch.randn(1, 3, 64, 64), + device='cuda') \ No newline at end of file