From 42a10b34cea704e9931f228b0f26fa24f8a59d91 Mon Sep 17 00:00:00 2001
From: James Betker <jbetker@gmail.com>
Date: Wed, 24 Jun 2020 21:15:17 -0600
Subject: [PATCH] Re-enable batch norm on switch processing blocks

Found out that batch norm is causing the switches to init really poorly -
not using a significant number of transforms. Might be a great time to
re-consider using the attention norm, but for now just re-enable it.
---
 codes/models/archs/SwitchedResidualGenerator_arch.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/codes/models/archs/SwitchedResidualGenerator_arch.py b/codes/models/archs/SwitchedResidualGenerator_arch.py
index 5377553a..cf8780a1 100644
--- a/codes/models/archs/SwitchedResidualGenerator_arch.py
+++ b/codes/models/archs/SwitchedResidualGenerator_arch.py
@@ -59,7 +59,7 @@ class HalvingProcessingBlock(nn.Module):
     def __init__(self, filters):
         super(HalvingProcessingBlock, self).__init__()
         self.bnconv1 = ConvBnLelu(filters, filters * 2, stride=2, bn=False)
-        self.bnconv2 = ConvBnLelu(filters * 2, filters * 2, bn=False)
+        self.bnconv2 = ConvBnLelu(filters * 2, filters * 2, bn=True)
 
     def forward(self, x):
         x = self.bnconv1(x)
@@ -72,7 +72,7 @@ def create_sequential_growing_processing_block(filters_init, filter_growth, num_
     convs = []
     current_filters = filters_init
     for i in range(num_convs):
-        convs.append(ConvBnLelu(current_filters, current_filters + filter_growth, bn=False))
+        convs.append(ConvBnLelu(current_filters, current_filters + filter_growth, bn=True))
         current_filters += filter_growth
     return nn.Sequential(*convs), current_filters