diff --git a/codes/models/archs/discriminator_vgg_arch.py b/codes/models/archs/discriminator_vgg_arch.py
index 27dd6a1f..006974ec 100644
--- a/codes/models/archs/discriminator_vgg_arch.py
+++ b/codes/models/archs/discriminator_vgg_arch.py
@@ -4,7 +4,8 @@ import torchvision
 
 
 class Discriminator_VGG_128(nn.Module):
-    def __init__(self, in_nc, nf):
+    # input_img_factor = multiplier to support images over 128x128. Only certain factors are supported.
+    def __init__(self, in_nc, nf, input_img_factor=1):
         super(Discriminator_VGG_128, self).__init__()
         # [64, 128, 128]
         self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)
@@ -31,7 +32,7 @@ class Discriminator_VGG_128(nn.Module):
         self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)
         self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)
 
-        self.linear1 = nn.Linear(512 * 4 * 4, 100)
+        self.linear1 = nn.Linear(512 * 4 * input_img_factor * 4 * input_img_factor, 100)
         self.linear2 = nn.Linear(100, 1)
 
         # activation function
diff --git a/codes/models/networks.py b/codes/models/networks.py
index 2a679134..ddd666a4 100644
--- a/codes/models/networks.py
+++ b/codes/models/networks.py
@@ -32,11 +32,12 @@ def define_G(opt):
 
 # Discriminator
 def define_D(opt):
+    img_sz = opt['datasets']['train']['GT_size']
     opt_net = opt['network_D']
     which_model = opt_net['which_model_D']
 
     if which_model == 'discriminator_vgg_128':
-        netD = SRGAN_arch.Discriminator_VGG_128(in_nc=opt_net['in_nc'], nf=opt_net['nf'])
+        netD = SRGAN_arch.Discriminator_VGG_128(in_nc=opt_net['in_nc'], nf=opt_net['nf'], input_img_factor=img_sz / 128)
     else:
         raise NotImplementedError('Discriminator model [{:s}] not recognized'.format(which_model))
     return netD