SRG3 work
Operates on top of a pre-trained SpineNET backbone (trained on CoCo 2017 with RetinaNet) This variant is extremely shallow.
This commit is contained in:
parent
0acad81035
commit
8a4eb8241d
|
@ -78,20 +78,36 @@ class ConvBasisMultiplexer(nn.Module):
|
||||||
return x
|
return x
|
||||||
|
|
||||||
|
|
||||||
class SpineNetMultiplexer(nn.Module):
|
class CachedBackboneWrapper:
|
||||||
def __init__(self, input_channels, transform_count):
|
def __init__(self, backbone: nn.Module):
|
||||||
super(SpineNetMultiplexer, self).__init__()
|
self.backbone = backbone
|
||||||
self.backbone = SpineNet('49', in_channels=input_channels)
|
|
||||||
self.rdc1 = ConvBnSilu(256, 128, kernel_size=3, bias=False)
|
def __call__(self, *args):
|
||||||
self.rdc2 = ConvBnSilu(128, 64, kernel_size=3, bias=False)
|
self.cache = self.backbone(*args)
|
||||||
self.rdc3 = ConvBnSilu(64, transform_count, bias=False, bn=False, relu=False)
|
return self.cache
|
||||||
|
|
||||||
|
def get_forward_result(self):
|
||||||
|
return self.cache
|
||||||
|
|
||||||
|
|
||||||
|
class BackboneMultiplexer(nn.Module):
|
||||||
|
def __init__(self, backbone: CachedBackboneWrapper, transform_count):
|
||||||
|
super(BackboneMultiplexer, self).__init__()
|
||||||
|
self.backbone = backbone
|
||||||
|
self.proc = nn.Sequential(ConvBnSilu(256, 256, kernel_size=3, bias=True),
|
||||||
|
ConvBnSilu(256, 256, kernel_size=3, bias=False))
|
||||||
|
self.up1 = nn.Sequential(ConvBnSilu(256, 128, kernel_size=3, bias=False, bn=False, silu=False),
|
||||||
|
ConvBnSilu(128, 128, kernel_size=3, bias=False))
|
||||||
|
self.up2 = nn.Sequential(ConvBnSilu(128, 64, kernel_size=3, bias=False, bn=False, silu=False),
|
||||||
|
ConvBnSilu(64, 64, kernel_size=3, bias=False))
|
||||||
|
self.final = ConvBnSilu(64, transform_count, bias=False, bn=False, silu=False)
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
spine = self.backbone(x)
|
spine = self.backbone.get_forward_result()
|
||||||
feat = self.rdc1(spine[0])
|
feat = self.proc(spine[0])
|
||||||
feat = self.rdc2(feat)
|
feat = self.up1(F.interpolate(feat, scale_factor=2, mode="nearest"))
|
||||||
feat = self.rdc3(feat)
|
feat = self.up2(F.interpolate(feat, scale_factor=2, mode="nearest"))
|
||||||
return feat
|
return self.final(feat)
|
||||||
|
|
||||||
|
|
||||||
class ConfigurableSwitchComputer(nn.Module):
|
class ConfigurableSwitchComputer(nn.Module):
|
||||||
|
@ -233,55 +249,56 @@ class Interpolate(nn.Module):
|
||||||
|
|
||||||
|
|
||||||
class ConfigurableSwitchedResidualGenerator3(nn.Module):
|
class ConfigurableSwitchedResidualGenerator3(nn.Module):
|
||||||
def __init__(self, trans_counts,
|
def __init__(self, base_filters, trans_count, initial_temp=20, final_temperature_step=50000,
|
||||||
trans_kernel_sizes,
|
|
||||||
trans_layers, transformation_filters, initial_temp=20, final_temperature_step=50000,
|
|
||||||
heightened_temp_min=1,
|
heightened_temp_min=1,
|
||||||
heightened_final_step=50000, upsample_factor=1, enable_negative_transforms=False,
|
heightened_final_step=50000, upsample_factor=4):
|
||||||
add_scalable_noise_to_transforms=False):
|
|
||||||
super(ConfigurableSwitchedResidualGenerator3, self).__init__()
|
super(ConfigurableSwitchedResidualGenerator3, self).__init__()
|
||||||
switches = []
|
self.initial_conv = ConvBnLelu(3, base_filters, bn=False, lelu=False, bias=True)
|
||||||
for trans_count, kernel, layers in zip(trans_counts, trans_kernel_sizes, trans_layers):
|
self.sw_conv = ConvBnLelu(base_filters, base_filters, lelu=False, bias=True)
|
||||||
multiplx_fn = functools.partial(SpineNetMultiplexer, 3)
|
self.upconv1 = ConvBnLelu(base_filters, base_filters, bn=False, bias=True)
|
||||||
switches.append(ConfigurableSwitchComputer(base_filters=3, multiplexer_net=multiplx_fn,
|
self.upconv2 = ConvBnLelu(base_filters, base_filters, bn=False, bias=True)
|
||||||
pre_transform_block=functools.partial(nn.Sequential,
|
self.hr_conv = ConvBnLelu(base_filters, base_filters, bn=False, bias=True)
|
||||||
ConvBnLelu(3, transformation_filters, kernel_size=1, stride=4, bn=False, lelu=False, bias=False),
|
self.final_conv = ConvBnLelu(base_filters, 3, bn=False, lelu=False, bias=True)
|
||||||
ResidualDenseBlock_5C(
|
|
||||||
transformation_filters),
|
|
||||||
ResidualDenseBlock_5C(
|
|
||||||
transformation_filters)),
|
|
||||||
transform_block=functools.partial(nn.Sequential,
|
|
||||||
ResidualDenseBlock_5C(transformation_filters),
|
|
||||||
Interpolate(4),
|
|
||||||
ConvBnLelu(transformation_filters, transformation_filters // 2, kernel_size=3, bias=False, bn=False),
|
|
||||||
ConvBnLelu(transformation_filters // 2, 3, kernel_size=1, bias=False, bn=False, lelu=False)),
|
|
||||||
transform_count=trans_count, init_temp=initial_temp,
|
|
||||||
enable_negative_transforms=enable_negative_transforms,
|
|
||||||
add_scalable_noise_to_transforms=add_scalable_noise_to_transforms,
|
|
||||||
init_scalar=.01))
|
|
||||||
|
|
||||||
self.switches = nn.ModuleList(switches)
|
self.backbone = SpineNet('49', in_channels=3, use_input_norm=True)
|
||||||
self.transformation_counts = trans_counts
|
for p in self.backbone.parameters(recurse=True):
|
||||||
|
p.requires_grad = False
|
||||||
|
self.backbone_wrapper = CachedBackboneWrapper(self.backbone)
|
||||||
|
multiplx_fn = functools.partial(BackboneMultiplexer, self.backbone_wrapper)
|
||||||
|
pretransform_fn = functools.partial(nn.Sequential, ConvBnLelu(base_filters, base_filters, kernel_size=3, bn=False, lelu=False, bias=False))
|
||||||
|
transform_fn = functools.partial(MultiConvBlock, base_filters, int(base_filters * 1.5), base_filters, kernel_size=3, depth=4)
|
||||||
|
self.switch = ConfigurableSwitchComputer(base_filters, multiplx_fn, pretransform_fn, transform_fn, trans_count, init_temp=initial_temp,
|
||||||
|
enable_negative_transforms=False, add_scalable_noise_to_transforms=True, init_scalar=.1)
|
||||||
|
|
||||||
|
self.transformation_counts = trans_count
|
||||||
self.init_temperature = initial_temp
|
self.init_temperature = initial_temp
|
||||||
self.final_temperature_step = final_temperature_step
|
self.final_temperature_step = final_temperature_step
|
||||||
self.heightened_temp_min = heightened_temp_min
|
self.heightened_temp_min = heightened_temp_min
|
||||||
self.heightened_final_step = heightened_final_step
|
self.heightened_final_step = heightened_final_step
|
||||||
self.attentions = None
|
self.attentions = None
|
||||||
self.upsample_factor = upsample_factor
|
self.upsample_factor = upsample_factor
|
||||||
|
self.backbone_forward = None
|
||||||
|
|
||||||
|
def get_forward_results(self):
|
||||||
|
return self.backbone_forward
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
if self.upsample_factor > 1:
|
self.backbone_forward = self.backbone_wrapper(F.interpolate(x, scale_factor=2, mode="nearest"))
|
||||||
x = F.interpolate(x, scale_factor=self.upsample_factor, mode="nearest")
|
|
||||||
|
x = self.initial_conv(x)
|
||||||
|
|
||||||
self.attentions = []
|
self.attentions = []
|
||||||
for i, sw in enumerate(self.switches):
|
x, att = self.switch(x, output_attention_weights=True)
|
||||||
x, att = sw.forward(x, True)
|
self.attentions.append(att)
|
||||||
self.attentions.append(att)
|
|
||||||
|
|
||||||
return x,
|
x = self.upconv1(F.interpolate(x, scale_factor=2, mode="nearest"))
|
||||||
|
if self.upsample_factor > 2:
|
||||||
|
x = F.interpolate(x, scale_factor=2, mode="nearest")
|
||||||
|
x = self.upconv2(x)
|
||||||
|
return self.final_conv(self.hr_conv(x)),
|
||||||
|
|
||||||
def set_temperature(self, temp):
|
def set_temperature(self, temp):
|
||||||
[sw.set_temperature(temp) for sw in self.switches]
|
self.switch.set_temperature(temp)
|
||||||
|
|
||||||
def update_for_step(self, step, experiments_path='.'):
|
def update_for_step(self, step, experiments_path='.'):
|
||||||
if self.attentions:
|
if self.attentions:
|
||||||
|
@ -299,11 +316,10 @@ class ConfigurableSwitchedResidualGenerator3(nn.Module):
|
||||||
temp = 1 / temp
|
temp = 1 / temp
|
||||||
self.set_temperature(temp)
|
self.set_temperature(temp)
|
||||||
if step % 50 == 0:
|
if step % 50 == 0:
|
||||||
[save_attention_to_image(experiments_path, self.attentions[i], self.transformation_counts[i], step,
|
save_attention_to_image(experiments_path, self.attentions[0], self.transformation_counts, step, "a%i" % (1,), l_mult=10)
|
||||||
"a%i" % (i + 1,)) for i in range(len(self.switches))]
|
|
||||||
|
|
||||||
def get_debug_values(self, step):
|
def get_debug_values(self, step):
|
||||||
temp = self.switches[0].switch.temperature
|
temp = self.switch.switch.temperature
|
||||||
mean_hists = [compute_attention_specificity(att, 2) for att in self.attentions]
|
mean_hists = [compute_attention_specificity(att, 2) for att in self.attentions]
|
||||||
means = [i[0] for i in mean_hists]
|
means = [i[0] for i in mean_hists]
|
||||||
hists = [i[1].clone().detach().cpu().flatten() for i in mean_hists]
|
hists = [i[1].clone().detach().cpu().flatten() for i in mean_hists]
|
||||||
|
|
|
@ -1,12 +1,48 @@
|
||||||
# Taken and modified from https://github.com/lucifer443/SpineNet-Pytorch/blob/master/mmdet/models/backbones/spinenet.py
|
# Taken and modified from https://github.com/lucifer443/SpineNet-Pytorch/blob/master/mmdet/models/backbones/spinenet.py
|
||||||
|
|
||||||
|
import torch
|
||||||
import torch.nn as nn
|
import torch.nn as nn
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F
|
||||||
from torch.nn.init import kaiming_normal
|
from torch.nn.init import kaiming_normal
|
||||||
|
|
||||||
from torchvision.models.resnet import BasicBlock, Bottleneck
|
from torchvision.models.resnet import BasicBlock, Bottleneck
|
||||||
from torch.nn.modules.batchnorm import _BatchNorm
|
from torch.nn.modules.batchnorm import _BatchNorm
|
||||||
from models.archs.arch_util import ConvBnRelu
|
|
||||||
|
|
||||||
|
''' Convenience class with Conv->BN->ReLU. Includes weight initialization and auto-padding for standard
|
||||||
|
kernel sizes. '''
|
||||||
|
class ConvBnRelu(nn.Module):
|
||||||
|
def __init__(self, filters_in, filters_out, kernel_size=3, stride=1, relu=True, bn=True, bias=True):
|
||||||
|
super(ConvBnRelu, self).__init__()
|
||||||
|
padding_map = {1: 0, 3: 1, 5: 2, 7: 3}
|
||||||
|
assert kernel_size in padding_map.keys()
|
||||||
|
self.conv = nn.Conv2d(filters_in, filters_out, kernel_size, stride, padding_map[kernel_size], bias=bias)
|
||||||
|
if bn:
|
||||||
|
self.bn = nn.BatchNorm2d(filters_out)
|
||||||
|
else:
|
||||||
|
self.bn = None
|
||||||
|
if relu:
|
||||||
|
self.relu = nn.ReLU()
|
||||||
|
else:
|
||||||
|
self.relu = None
|
||||||
|
|
||||||
|
# Init params.
|
||||||
|
for m in self.modules():
|
||||||
|
if isinstance(m, nn.Conv2d):
|
||||||
|
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu' if self.relu else 'linear')
|
||||||
|
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
|
||||||
|
nn.init.constant_(m.weight, 1)
|
||||||
|
nn.init.constant_(m.bias, 0)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
x = self.conv(x)
|
||||||
|
if self.bn:
|
||||||
|
x = self.bn(x)
|
||||||
|
if self.relu:
|
||||||
|
return self.relu(x)
|
||||||
|
else:
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
def constant_init(module, val, bias=0):
|
def constant_init(module, val, bias=0):
|
||||||
if hasattr(module, 'weight') and module.weight is not None:
|
if hasattr(module, 'weight') and module.weight is not None:
|
||||||
|
@ -213,7 +249,11 @@ class SpineNet(nn.Module):
|
||||||
arch,
|
arch,
|
||||||
in_channels=3,
|
in_channels=3,
|
||||||
output_level=[3, 4, 5, 6, 7],
|
output_level=[3, 4, 5, 6, 7],
|
||||||
zero_init_residual=True):
|
conv_cfg=None,
|
||||||
|
norm_cfg=dict(type='BN', requires_grad=True),
|
||||||
|
zero_init_residual=True,
|
||||||
|
activation='relu',
|
||||||
|
use_input_norm=False):
|
||||||
super(SpineNet, self).__init__()
|
super(SpineNet, self).__init__()
|
||||||
self._block_specs = build_block_specs()[2:]
|
self._block_specs = build_block_specs()[2:]
|
||||||
self._endpoints_num_filters = SCALING_MAP[arch]['endpoints_num_filters']
|
self._endpoints_num_filters = SCALING_MAP[arch]['endpoints_num_filters']
|
||||||
|
@ -225,6 +265,7 @@ class SpineNet(nn.Module):
|
||||||
self.zero_init_residual = zero_init_residual
|
self.zero_init_residual = zero_init_residual
|
||||||
assert min(output_level) > 2 and max(output_level) < 8, "Output level out of range"
|
assert min(output_level) > 2 and max(output_level) < 8, "Output level out of range"
|
||||||
self.output_level = output_level
|
self.output_level = output_level
|
||||||
|
self.use_input_norm = use_input_norm
|
||||||
|
|
||||||
self._make_stem_layer(in_channels)
|
self._make_stem_layer(in_channels)
|
||||||
self._make_scale_permuted_network()
|
self._make_scale_permuted_network()
|
||||||
|
@ -237,7 +278,8 @@ class SpineNet(nn.Module):
|
||||||
in_channels,
|
in_channels,
|
||||||
64,
|
64,
|
||||||
kernel_size=7,
|
kernel_size=7,
|
||||||
stride=2) # Original paper had stride=2 and a maxpool after.
|
stride=2)
|
||||||
|
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
||||||
|
|
||||||
# Build the initial level 2 blocks.
|
# Build the initial level 2 blocks.
|
||||||
self.init_block1 = make_res_layer(
|
self.init_block1 = make_res_layer(
|
||||||
|
@ -286,12 +328,19 @@ class SpineNet(nn.Module):
|
||||||
if self.zero_init_residual:
|
if self.zero_init_residual:
|
||||||
for m in self.modules():
|
for m in self.modules():
|
||||||
if isinstance(m, Bottleneck):
|
if isinstance(m, Bottleneck):
|
||||||
constant_init(m.norm3, 0)
|
constant_init(m.bn3, 0)
|
||||||
elif isinstance(m, BasicBlock):
|
elif isinstance(m, BasicBlock):
|
||||||
constant_init(m.norm2, 0)
|
constant_init(m.bn2, 0)
|
||||||
|
|
||||||
def forward(self, input):
|
def forward(self, input):
|
||||||
feat = self.conv1(input)
|
# Spinenet is pretrained on the standard pytorch input norm. The image will need to
|
||||||
|
# be normalized before feeding it through.
|
||||||
|
if self.use_input_norm:
|
||||||
|
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(input.device)
|
||||||
|
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(input.device)
|
||||||
|
input = (input - mean) / std
|
||||||
|
|
||||||
|
feat = self.maxpool(self.conv1(input))
|
||||||
feat1 = self.init_block1(feat)
|
feat1 = self.init_block1(feat)
|
||||||
feat2 = self.init_block2(feat1)
|
feat2 = self.init_block2(feat1)
|
||||||
block_feats = [feat1, feat2]
|
block_feats = [feat1, feat2]
|
||||||
|
|
|
@ -68,12 +68,7 @@ def define_G(opt, net_key='network_G'):
|
||||||
heightened_temp_min=opt_net['heightened_temp_min'], heightened_final_step=opt_net['heightened_final_step'],
|
heightened_temp_min=opt_net['heightened_temp_min'], heightened_final_step=opt_net['heightened_final_step'],
|
||||||
upsample_factor=scale, add_scalable_noise_to_transforms=opt_net['add_noise'])
|
upsample_factor=scale, add_scalable_noise_to_transforms=opt_net['add_noise'])
|
||||||
elif which_model == "ConfigurableSwitchedResidualGenerator3":
|
elif which_model == "ConfigurableSwitchedResidualGenerator3":
|
||||||
netG = SwitchedGen_arch.ConfigurableSwitchedResidualGenerator3(trans_counts=opt_net['trans_counts'],
|
netG = SwitchedGen_arch.ConfigurableSwitchedResidualGenerator3(base_filters=opt_net['base_filters'], trans_count=opt_net['trans_count'])
|
||||||
trans_kernel_sizes=opt_net['trans_kernel_sizes'], trans_layers=opt_net['trans_layers'],
|
|
||||||
transformation_filters=opt_net['transformation_filters'],
|
|
||||||
initial_temp=opt_net['temperature'], final_temperature_step=opt_net['temperature_final_step'],
|
|
||||||
heightened_temp_min=opt_net['heightened_temp_min'], heightened_final_step=opt_net['heightened_final_step'],
|
|
||||||
upsample_factor=scale, add_scalable_noise_to_transforms=opt_net['add_noise'])
|
|
||||||
elif which_model == "NestedSwitchGenerator":
|
elif which_model == "NestedSwitchGenerator":
|
||||||
netG = ng.NestedSwitchedGenerator(switch_filters=opt_net['switch_filters'],
|
netG = ng.NestedSwitchedGenerator(switch_filters=opt_net['switch_filters'],
|
||||||
switch_reductions=opt_net['switch_reductions'],
|
switch_reductions=opt_net['switch_reductions'],
|
||||||
|
|
|
@ -33,7 +33,7 @@ def init_dist(backend='nccl', **kwargs):
|
||||||
def main():
|
def main():
|
||||||
#### options
|
#### options
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='../options/train_div2k_rrdb.yml')
|
parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='../options/train_div2k_srg3.yml')
|
||||||
parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none',
|
parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none',
|
||||||
help='job launcher')
|
help='job launcher')
|
||||||
parser.add_argument('--local_rank', type=int, default=0)
|
parser.add_argument('--local_rank', type=int, default=0)
|
||||||
|
|
|
@ -97,6 +97,7 @@ if __name__ == "__main__":
|
||||||
torch.randn(1, 3, 64, 64),
|
torch.randn(1, 3, 64, 64),
|
||||||
device='cuda')
|
device='cuda')
|
||||||
'''
|
'''
|
||||||
|
'''
|
||||||
test_stability(functools.partial(srg.ConfigurableSwitchedResidualGenerator2,
|
test_stability(functools.partial(srg.ConfigurableSwitchedResidualGenerator2,
|
||||||
switch_filters=[32,32,32,32],
|
switch_filters=[32,32,32,32],
|
||||||
switch_growths=[16,16,16,16],
|
switch_growths=[16,16,16,16],
|
||||||
|
@ -110,6 +111,7 @@ if __name__ == "__main__":
|
||||||
torch.randn(1, 3, 64, 64),
|
torch.randn(1, 3, 64, 64),
|
||||||
device='cuda')
|
device='cuda')
|
||||||
'''
|
'''
|
||||||
|
'''
|
||||||
test_stability(functools.partial(srg1.ConfigurableSwitchedResidualGenerator,
|
test_stability(functools.partial(srg1.ConfigurableSwitchedResidualGenerator,
|
||||||
switch_filters=[32,32,32,32],
|
switch_filters=[32,32,32,32],
|
||||||
switch_growths=[16,16,16,16],
|
switch_growths=[16,16,16,16],
|
||||||
|
@ -123,3 +125,7 @@ if __name__ == "__main__":
|
||||||
torch.randn(1, 3, 64, 64),
|
torch.randn(1, 3, 64, 64),
|
||||||
device='cuda')
|
device='cuda')
|
||||||
'''
|
'''
|
||||||
|
test_stability(functools.partial(srg.ConfigurableSwitchedResidualGenerator3,
|
||||||
|
64, 16),
|
||||||
|
torch.randn(1, 3, 64, 64),
|
||||||
|
device='cuda')
|
Loading…
Reference in New Issue
Block a user