forked from mrq/DL-Art-School
SRG v2 - Move to Relu, rely on Module-based initialization
This commit is contained in:
parent
ee6443ad7d
commit
b945021c90
|
@ -102,8 +102,8 @@ class MultiConvBlock(nn.Module):
|
|||
class HalvingProcessingBlock(nn.Module):
|
||||
def __init__(self, filters):
|
||||
super(HalvingProcessingBlock, self).__init__()
|
||||
self.bnconv1 = ConvBnLelu(filters, filters * 2, stride=2, bn=False)
|
||||
self.bnconv2 = ConvBnLelu(filters * 2, filters * 2, bn=True)
|
||||
self.bnconv1 = ConvBnLelu(filters, filters * 2, stride=2, bn=False, bias=False)
|
||||
self.bnconv2 = ConvBnLelu(filters * 2, filters * 2, bn=True, bias=False)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.bnconv1(x)
|
||||
|
@ -116,7 +116,7 @@ def create_sequential_growing_processing_block(filters_init, filter_growth, num_
|
|||
convs = []
|
||||
current_filters = filters_init
|
||||
for i in range(num_convs):
|
||||
convs.append(ConvBnLelu(current_filters, current_filters + filter_growth, bn=True))
|
||||
convs.append(ConvBnRelu(current_filters, current_filters + filter_growth, bn=True, bias=False))
|
||||
current_filters += filter_growth
|
||||
return nn.Sequential(*convs), current_filters
|
||||
|
||||
|
@ -222,15 +222,15 @@ class ConfigurableSwitchComputer(nn.Module):
|
|||
class ConvBasisMultiplexer(nn.Module):
|
||||
def __init__(self, input_channels, base_filters, growth, reductions, processing_depth, multiplexer_channels, use_bn=True):
|
||||
super(ConvBasisMultiplexer, self).__init__()
|
||||
self.filter_conv = ConvBnLelu(input_channels, base_filters)
|
||||
self.filter_conv = ConvBnRelu(input_channels, base_filters, bias=True)
|
||||
self.reduction_blocks = nn.Sequential(OrderedDict([('block%i:' % (i,), HalvingProcessingBlock(base_filters * 2 ** i)) for i in range(reductions)]))
|
||||
reduction_filters = base_filters * 2 ** reductions
|
||||
self.processing_blocks, self.output_filter_count = create_sequential_growing_processing_block(reduction_filters, growth, processing_depth)
|
||||
|
||||
gap = self.output_filter_count - multiplexer_channels
|
||||
self.cbl1 = ConvBnLelu(self.output_filter_count, self.output_filter_count - (gap // 4), bn=use_bn)
|
||||
self.cbl2 = ConvBnLelu(self.output_filter_count - (gap // 4), self.output_filter_count - (gap // 2), bn=use_bn)
|
||||
self.cbl3 = ConvBnLelu(self.output_filter_count - (gap // 2), multiplexer_channels)
|
||||
self.cbl1 = ConvBnRelu(self.output_filter_count, self.output_filter_count - (gap // 2), bn=use_bn, bias=False)
|
||||
self.cbl2 = ConvBnRelu(self.output_filter_count - (gap // 2), self.output_filter_count - (3 * gap // 4), bn=use_bn, bias=False)
|
||||
self.cbl3 = ConvBnRelu(self.output_filter_count - (3 * gap // 4), multiplexer_channels, bias=True)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.filter_conv(x)
|
||||
|
@ -351,17 +351,14 @@ class ConfigurableSwitchedResidualGenerator2(nn.Module):
|
|||
switches = []
|
||||
post_switch_proc = []
|
||||
self.initial_conv = ConvBnLelu(3, transformation_filters, bn=False)
|
||||
self.proc_conv = ConvBnLelu(transformation_filters, transformation_filters, bn=False)
|
||||
self.proc_conv = ConvBnLelu(transformation_filters, transformation_filters, bn=False, bias=False)
|
||||
self.final_conv = ConvBnLelu(transformation_filters, 3, bn=False, lelu=False)
|
||||
for filters, growth, sw_reduce, sw_proc, trans_count, kernel, layers in zip(switch_filters, switch_growths, switch_reductions, switch_processing_layers, trans_counts, trans_kernel_sizes, trans_layers):
|
||||
multiplx_fn = functools.partial(ConvBasisMultiplexer, transformation_filters, filters, growth, sw_reduce, sw_proc, trans_count)
|
||||
switches.append(ConfigurableSwitchComputer(multiplx_fn, functools.partial(MultiConvBlock, transformation_filters, transformation_filters, transformation_filters, kernel_size=kernel, depth=layers), trans_count, initial_temp, enable_negative_transforms=enable_negative_transforms, add_scalable_noise_to_transforms=add_scalable_noise_to_transforms))
|
||||
post_switch_proc.append(ConvBnLelu(transformation_filters, transformation_filters, bn=False))
|
||||
initialize_weights(switches, 1)
|
||||
# Initialize the transforms with a lesser weight, since they are repeatedly added on to the resultant image.
|
||||
initialize_weights([s.transforms for s in switches], .2 / len(switches))
|
||||
self.switches = nn.ModuleList(switches)
|
||||
initialize_weights([p for p in post_switch_proc], .01)
|
||||
self.post_switch_convs = nn.ModuleList(post_switch_proc)
|
||||
self.transformation_counts = trans_counts
|
||||
self.init_temperature = initial_temp
|
||||
|
@ -382,14 +379,12 @@ class ConfigurableSwitchedResidualGenerator2(nn.Module):
|
|||
x = x + sw_out
|
||||
x = x + conv(x)
|
||||
|
||||
# This network is entirely a "repair" network and operates on full-resolution images. Upsample first if that
|
||||
# is called for, then repair.
|
||||
if self.upsample_factor > 1:
|
||||
x = F.interpolate(x, scale_factor=self.upsample_factor, mode="nearest")
|
||||
|
||||
x = self.proc_conv(x)
|
||||
x = self.final_conv(x)
|
||||
return x,
|
||||
return x / 13,
|
||||
|
||||
def set_temperature(self, temp):
|
||||
[sw.set_temperature(temp) for sw in self.switches]
|
||||
|
|
Loading…
Reference in New Issue
Block a user