2020-10-31 17:08:55 +00:00
|
|
|
import random
|
|
|
|
|
2020-08-22 14:24:34 +00:00
|
|
|
import torch.nn
|
2021-11-11 03:06:33 +00:00
|
|
|
import torchaudio.functional
|
2021-01-07 19:14:55 +00:00
|
|
|
from kornia.augmentation import RandomResizedCrop
|
2020-10-22 20:39:19 +00:00
|
|
|
from torch.cuda.amp import autocast
|
|
|
|
|
2021-11-23 00:16:39 +00:00
|
|
|
from data.audio.unsupervised_audio_dataset import load_audio
|
2021-09-17 04:43:10 +00:00
|
|
|
from trainer.inject import Injector, create_injector
|
2021-01-07 19:14:55 +00:00
|
|
|
from trainer.losses import extract_params_from_state
|
2021-11-23 00:16:39 +00:00
|
|
|
from utils.audio import plot_spectrogram
|
2021-02-08 15:09:21 +00:00
|
|
|
from utils.util import opt_get
|
2021-01-07 19:14:55 +00:00
|
|
|
from utils.weight_scheduler import get_scheduler_for_opt
|
2020-08-22 14:24:34 +00:00
|
|
|
|
|
|
|
|
2021-12-03 04:04:49 +00:00
|
|
|
# Transfers the state in the input key to the output key
|
|
|
|
class DirectInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super().__init__(opt, env)
|
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
return {self.output: state[self.input]}
|
|
|
|
|
|
|
|
|
|
|
|
# Allows multiple injectors to be used on sequential steps.
|
|
|
|
class StepInterleaveInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super().__init__(opt, env)
|
|
|
|
for inj in opt['subinjectors'].keys():
|
|
|
|
o = opt.copy()
|
|
|
|
o['subinjectors'] = opt['subtype']
|
|
|
|
o['in'] = '_in'
|
|
|
|
o['out'] = '_out'
|
|
|
|
self.injector = create_injector(o, self.env)
|
|
|
|
self.aslist = opt['aslist'] if 'aslist' in opt.keys() else False
|
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
injs = []
|
|
|
|
st = state.copy()
|
|
|
|
inputs = state[self.opt['in']]
|
|
|
|
for i in range(inputs.shape[1]):
|
|
|
|
st['_in'] = inputs[:, i]
|
|
|
|
injs.append(self.injector(st)['_out'])
|
|
|
|
if self.aslist:
|
|
|
|
return {self.output: injs}
|
|
|
|
else:
|
|
|
|
return {self.output: torch.stack(injs, dim=1)}
|
|
|
|
|
|
|
|
|
2021-08-01 13:54:21 +00:00
|
|
|
class PadInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super().__init__(opt, env)
|
|
|
|
self.multiple = opt['multiple']
|
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
ldim = state[self.input].shape[-1]
|
|
|
|
mod = self.multiple-(ldim % self.multiple)
|
|
|
|
t = state[self.input]
|
|
|
|
if mod != 0:
|
|
|
|
t = torch.nn.functional.pad(t, (0, mod))
|
|
|
|
return {self.output: t}
|
|
|
|
|
|
|
|
|
2021-07-26 22:27:31 +00:00
|
|
|
class SqueezeInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super().__init__(opt, env)
|
|
|
|
self.dim = opt['dim']
|
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
return {self.output: state[self.input].squeeze(dim=self.dim)}
|
|
|
|
|
|
|
|
|
2020-08-23 23:22:34 +00:00
|
|
|
# Uses a generator to synthesize an image from [in] and injects the results into [out]
|
|
|
|
# Note that results are *not* detached.
|
2020-12-30 03:58:02 +00:00
|
|
|
class GeneratorInjector(Injector):
|
2020-08-23 23:22:34 +00:00
|
|
|
def __init__(self, opt, env):
|
2020-12-30 03:58:02 +00:00
|
|
|
super(GeneratorInjector, self).__init__(opt, env)
|
2020-11-23 18:31:11 +00:00
|
|
|
self.grad = opt['grad'] if 'grad' in opt.keys() else True
|
2021-07-09 05:07:36 +00:00
|
|
|
self.method = opt_get(opt, ['method'], None) # If specified, this method is called instead of __call__()
|
2021-11-04 16:09:24 +00:00
|
|
|
self.args = opt_get(opt, ['args'], {})
|
2022-01-22 15:23:29 +00:00
|
|
|
self.fp16_override = opt_get(opt, ['fp16'], True)
|
2020-08-23 23:22:34 +00:00
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
gen = self.env['generators'][self.opt['generator']]
|
2021-07-09 05:07:36 +00:00
|
|
|
|
|
|
|
if self.method is not None and hasattr(gen, 'module'):
|
|
|
|
gen = gen.module # Dereference DDP wrapper.
|
|
|
|
method = gen if self.method is None else getattr(gen, self.method)
|
|
|
|
|
2022-01-22 15:23:29 +00:00
|
|
|
with autocast(enabled=self.env['opt']['fp16'] and self.fp16_override):
|
2020-10-22 20:39:19 +00:00
|
|
|
if isinstance(self.input, list):
|
|
|
|
params = extract_params_from_state(self.input, state)
|
2020-11-23 18:31:11 +00:00
|
|
|
else:
|
|
|
|
params = [state[self.input]]
|
|
|
|
if self.grad:
|
2021-11-04 16:09:24 +00:00
|
|
|
results = method(*params, **self.args)
|
2020-10-22 20:39:19 +00:00
|
|
|
else:
|
2020-11-23 18:31:11 +00:00
|
|
|
with torch.no_grad():
|
2021-11-04 16:09:24 +00:00
|
|
|
results = method(*params, **self.args)
|
2020-08-23 23:22:34 +00:00
|
|
|
new_state = {}
|
|
|
|
if isinstance(self.output, list):
|
2021-08-13 21:02:18 +00:00
|
|
|
# Only dereference tuples or lists, not tensors. IF YOU REACH THIS ERROR, REMOVE THE BRACES AROUND YOUR OUTPUTS IN THE YAML CONFIG
|
2020-09-12 04:57:06 +00:00
|
|
|
assert isinstance(results, list) or isinstance(results, tuple)
|
2020-08-23 23:22:34 +00:00
|
|
|
for i, k in enumerate(self.output):
|
|
|
|
new_state[k] = results[i]
|
|
|
|
else:
|
|
|
|
new_state[self.output] = results
|
|
|
|
|
|
|
|
return new_state
|
|
|
|
|
|
|
|
|
2020-09-17 19:30:32 +00:00
|
|
|
# Injects a result from a discriminator network into the state.
|
|
|
|
class DiscriminatorInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super(DiscriminatorInjector, self).__init__(opt, env)
|
|
|
|
|
|
|
|
def forward(self, state):
|
2020-10-31 17:08:55 +00:00
|
|
|
with autocast(enabled=self.env['opt']['fp16']):
|
|
|
|
d = self.env['discriminators'][self.opt['discriminator']]
|
|
|
|
if isinstance(self.input, list):
|
|
|
|
params = [state[i] for i in self.input]
|
|
|
|
results = d(*params)
|
|
|
|
else:
|
|
|
|
results = d(state[self.input])
|
2020-09-17 19:30:32 +00:00
|
|
|
new_state = {}
|
|
|
|
if isinstance(self.output, list):
|
|
|
|
# Only dereference tuples or lists, not tensors.
|
|
|
|
assert isinstance(results, list) or isinstance(results, tuple)
|
|
|
|
for i, k in enumerate(self.output):
|
|
|
|
new_state[k] = results[i]
|
|
|
|
else:
|
|
|
|
new_state[self.output] = results
|
|
|
|
|
|
|
|
return new_state
|
|
|
|
|
|
|
|
|
2020-08-23 23:22:34 +00:00
|
|
|
# Injects a scalar that is modulated with a specified schedule. Useful for increasing or decreasing the influence
|
|
|
|
# of something over time.
|
|
|
|
class ScheduledScalarInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super(ScheduledScalarInjector, self).__init__(opt, env)
|
|
|
|
self.scheduler = get_scheduler_for_opt(opt['scheduler'])
|
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
return {self.opt['out']: self.scheduler.get_weight_for_step(self.env['step'])}
|
|
|
|
|
|
|
|
|
2020-08-22 19:08:33 +00:00
|
|
|
# Adds gaussian noise to [in], scales it to [0,[scale]] and injects into [out]
|
|
|
|
class AddNoiseInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super(AddNoiseInjector, self).__init__(opt, env)
|
2020-11-12 22:42:05 +00:00
|
|
|
self.mode = opt['mode'] if 'mode' in opt.keys() else 'normal'
|
2020-08-22 19:08:33 +00:00
|
|
|
|
|
|
|
def forward(self, state):
|
2020-08-23 23:22:34 +00:00
|
|
|
# Scale can be a fixed float, or a state key (e.g. from ScheduledScalarInjector).
|
|
|
|
if isinstance(self.opt['scale'], str):
|
|
|
|
scale = state[self.opt['scale']]
|
|
|
|
else:
|
|
|
|
scale = self.opt['scale']
|
2020-11-20 06:47:24 +00:00
|
|
|
if scale is None:
|
|
|
|
scale = 1
|
2020-08-23 23:22:34 +00:00
|
|
|
|
2020-11-12 22:42:05 +00:00
|
|
|
ref = state[self.opt['in']]
|
|
|
|
if self.mode == 'normal':
|
|
|
|
noise = torch.randn_like(ref) * scale
|
|
|
|
elif self.mode == 'uniform':
|
|
|
|
noise = torch.FloatTensor(ref.shape).uniform_(0.0, scale).to(ref.device)
|
2020-08-22 19:08:33 +00:00
|
|
|
return {self.opt['out']: state[self.opt['in']] + noise}
|
|
|
|
|
|
|
|
|
|
|
|
# Averages the channel dimension (1) of [in] and saves to [out]. Dimensions are
|
|
|
|
# kept the same, the average is simply repeated.
|
|
|
|
class GreyInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super(GreyInjector, self).__init__(opt, env)
|
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
mean = torch.mean(state[self.opt['in']], dim=1, keepdim=True)
|
2020-09-03 17:32:47 +00:00
|
|
|
mean = mean.repeat(1, 3, 1, 1)
|
2020-08-23 23:22:34 +00:00
|
|
|
return {self.opt['out']: mean}
|
2020-09-03 17:32:47 +00:00
|
|
|
|
2020-09-27 03:25:32 +00:00
|
|
|
|
2020-09-03 17:32:47 +00:00
|
|
|
class InterpolateInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super(InterpolateInjector, self).__init__(opt, env)
|
2020-09-30 18:01:00 +00:00
|
|
|
if 'scale_factor' in opt.keys():
|
|
|
|
self.scale_factor = opt['scale_factor']
|
|
|
|
self.size = None
|
|
|
|
else:
|
|
|
|
self.scale_factor = None
|
|
|
|
self.size = (opt['size'], opt['size'])
|
2020-09-03 17:32:47 +00:00
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
scaled = torch.nn.functional.interpolate(state[self.opt['in']], scale_factor=self.opt['scale_factor'],
|
2020-09-30 18:01:00 +00:00
|
|
|
size=self.opt['size'], mode=self.opt['mode'])
|
2020-09-19 16:07:00 +00:00
|
|
|
return {self.opt['out']: scaled}
|
2020-09-27 03:25:32 +00:00
|
|
|
|
|
|
|
|
|
|
|
# Extracts four patches from the input image, each a square of 'patch_size'. The input images are taken from each
|
|
|
|
# of the four corners of the image. The intent of this loss is that each patch shares some part of the input, which
|
|
|
|
# can then be used in the translation invariance loss.
|
|
|
|
#
|
|
|
|
# This injector is unique in that it does not only produce the specified output label into state. Instead it produces five
|
|
|
|
# outputs for the specified label, one for each corner of the input as well as the specified output, which is the top left
|
|
|
|
# corner. See the code below to find out how this works.
|
|
|
|
#
|
|
|
|
# Another note: this injector operates differently in eval mode (e.g. when env['training']=False) - in this case, it
|
|
|
|
# simply sets all the output state variables to the input. This is so that you can feed the output of this injector
|
|
|
|
# directly into your generator in training without affecting test performance.
|
|
|
|
class ImagePatchInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super(ImagePatchInjector, self).__init__(opt, env)
|
|
|
|
self.patch_size = opt['patch_size']
|
2020-12-30 03:58:02 +00:00
|
|
|
self.resize = opt[
|
|
|
|
'resize'] if 'resize' in opt.keys() else None # If specified, the output is resized to a square with this size after patch extraction.
|
2020-09-27 03:25:32 +00:00
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
im = state[self.opt['in']]
|
|
|
|
if self.env['training']:
|
2020-12-30 03:58:02 +00:00
|
|
|
res = {self.opt['out']: im[:, :3, :self.patch_size, :self.patch_size],
|
|
|
|
'%s_top_left' % (self.opt['out'],): im[:, :, :self.patch_size, :self.patch_size],
|
|
|
|
'%s_top_right' % (self.opt['out'],): im[:, :, :self.patch_size, -self.patch_size:],
|
|
|
|
'%s_bottom_left' % (self.opt['out'],): im[:, :, -self.patch_size:, :self.patch_size],
|
|
|
|
'%s_bottom_right' % (self.opt['out'],): im[:, :, -self.patch_size:, -self.patch_size:]}
|
2020-09-27 03:25:32 +00:00
|
|
|
else:
|
2020-12-30 03:58:02 +00:00
|
|
|
res = {self.opt['out']: im,
|
|
|
|
'%s_top_left' % (self.opt['out'],): im,
|
|
|
|
'%s_top_right' % (self.opt['out'],): im,
|
|
|
|
'%s_bottom_left' % (self.opt['out'],): im,
|
|
|
|
'%s_bottom_right' % (self.opt['out'],): im}
|
2020-10-14 02:44:51 +00:00
|
|
|
if self.resize is not None:
|
|
|
|
res2 = {}
|
|
|
|
for k, v in res.items():
|
|
|
|
res2[k] = torch.nn.functional.interpolate(v, size=(self.resize, self.resize), mode="nearest")
|
|
|
|
res = res2
|
|
|
|
return res
|
2020-10-07 15:02:42 +00:00
|
|
|
|
|
|
|
|
|
|
|
# Concatenates a list of tensors on the specified dimension.
|
|
|
|
class ConcatenateInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super(ConcatenateInjector, self).__init__(opt, env)
|
|
|
|
self.dim = opt['dim']
|
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
input = [state[i] for i in self.input]
|
2020-10-10 02:35:56 +00:00
|
|
|
return {self.opt['out']: torch.cat(input, dim=self.dim)}
|
|
|
|
|
|
|
|
|
|
|
|
# Removes margins from an image.
|
|
|
|
class MarginRemoval(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super(MarginRemoval, self).__init__(opt, env)
|
|
|
|
self.margin = opt['margin']
|
2020-10-31 17:08:55 +00:00
|
|
|
self.random_shift_max = opt['random_shift_max'] if 'random_shift_max' in opt.keys() else 0
|
2020-10-10 02:35:56 +00:00
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
input = state[self.input]
|
2020-10-31 17:08:55 +00:00
|
|
|
if self.random_shift_max > 0:
|
|
|
|
output = []
|
|
|
|
# This is a really shitty way of doing this. If it works at all, I should reconsider using Resample2D, for example.
|
|
|
|
for b in range(input.shape[0]):
|
|
|
|
shiftleft = random.randint(-self.random_shift_max, self.random_shift_max)
|
|
|
|
shifttop = random.randint(-self.random_shift_max, self.random_shift_max)
|
2020-12-30 03:58:02 +00:00
|
|
|
output.append(input[b, :, self.margin + shiftleft:-(self.margin - shiftleft),
|
|
|
|
self.margin + shifttop:-(self.margin - shifttop)])
|
2020-10-31 17:08:55 +00:00
|
|
|
output = torch.stack(output, dim=0)
|
|
|
|
else:
|
|
|
|
output = input[:, :, self.margin:-self.margin,
|
2020-12-30 03:58:02 +00:00
|
|
|
self.margin:-self.margin]
|
2020-10-31 17:08:55 +00:00
|
|
|
|
|
|
|
return {self.opt['out']: output}
|
|
|
|
|
2020-10-11 03:50:23 +00:00
|
|
|
|
2020-10-11 04:39:55 +00:00
|
|
|
# Produces an injection which is composed of applying a single injector multiple times across a single dimension.
|
|
|
|
class ForEachInjector(Injector):
|
2020-10-11 03:50:23 +00:00
|
|
|
def __init__(self, opt, env):
|
2020-10-11 04:39:55 +00:00
|
|
|
super(ForEachInjector, self).__init__(opt, env)
|
|
|
|
o = opt.copy()
|
|
|
|
o['type'] = opt['subtype']
|
|
|
|
o['in'] = '_in'
|
|
|
|
o['out'] = '_out'
|
|
|
|
self.injector = create_injector(o, self.env)
|
2020-11-24 16:24:02 +00:00
|
|
|
self.aslist = opt['aslist'] if 'aslist' in opt.keys() else False
|
2020-10-11 03:50:23 +00:00
|
|
|
|
|
|
|
def forward(self, state):
|
2020-10-11 04:39:55 +00:00
|
|
|
injs = []
|
|
|
|
st = state.copy()
|
|
|
|
inputs = state[self.opt['in']]
|
|
|
|
for i in range(inputs.shape[1]):
|
|
|
|
st['_in'] = inputs[:, i]
|
|
|
|
injs.append(self.injector(st)['_out'])
|
2020-11-24 16:24:02 +00:00
|
|
|
if self.aslist:
|
|
|
|
return {self.output: injs}
|
|
|
|
else:
|
|
|
|
return {self.output: torch.stack(injs, dim=1)}
|
2020-10-11 14:20:07 +00:00
|
|
|
|
2020-12-30 03:58:02 +00:00
|
|
|
|
2020-10-11 14:20:07 +00:00
|
|
|
class ConstantInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super(ConstantInjector, self).__init__(opt, env)
|
|
|
|
self.constant_type = opt['constant_type']
|
|
|
|
self.like = opt['like'] # This injector uses this tensor to determine what batch size and device to use.
|
|
|
|
|
|
|
|
def forward(self, state):
|
2020-10-12 16:36:30 +00:00
|
|
|
like = state[self.like]
|
2020-10-11 14:20:07 +00:00
|
|
|
if self.constant_type == 'zeroes':
|
2020-10-12 16:36:30 +00:00
|
|
|
out = torch.zeros_like(like)
|
2020-10-11 14:20:07 +00:00
|
|
|
else:
|
|
|
|
raise NotImplementedError
|
2020-12-30 03:58:02 +00:00
|
|
|
return {self.opt['out']: out}
|
2020-10-22 04:22:00 +00:00
|
|
|
|
|
|
|
|
2020-10-24 17:56:39 +00:00
|
|
|
class IndicesExtractor(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super(IndicesExtractor, self).__init__(opt, env)
|
|
|
|
self.dim = opt['dim']
|
|
|
|
assert self.dim == 1 # Honestly not sure how to support an abstract dim here, so just add yours when needed.
|
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
results = {}
|
|
|
|
for i, o in enumerate(self.output):
|
|
|
|
if self.dim == 1:
|
|
|
|
results[o] = state[self.input][:, i]
|
|
|
|
return results
|
|
|
|
|
2020-10-31 17:08:55 +00:00
|
|
|
|
|
|
|
class RandomShiftInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super(RandomShiftInjector, self).__init__(opt, env)
|
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
img = state[self.input]
|
|
|
|
return {self.output: img}
|
|
|
|
|
|
|
|
|
|
|
|
class BatchRotateInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super(BatchRotateInjector, self).__init__(opt, env)
|
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
img = state[self.input]
|
|
|
|
return {self.output: torch.roll(img, 1, 0)}
|
|
|
|
|
2020-11-14 03:11:50 +00:00
|
|
|
|
|
|
|
# Injector used to work with image deltas used in diff-SR
|
|
|
|
class SrDiffsInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super(SrDiffsInjector, self).__init__(opt, env)
|
|
|
|
self.mode = opt['mode']
|
|
|
|
assert self.mode in ['recombine', 'produce_diff']
|
|
|
|
self.lq = opt['lq']
|
|
|
|
self.hq = opt['hq']
|
|
|
|
if self.mode == 'produce_diff':
|
|
|
|
self.diff_key = opt['diff']
|
2020-11-15 23:16:18 +00:00
|
|
|
self.include_combined = opt['include_combined']
|
2020-11-14 03:11:50 +00:00
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
resampled_lq = state[self.lq]
|
|
|
|
hq = state[self.hq]
|
|
|
|
if self.mode == 'produce_diff':
|
|
|
|
diff = hq - resampled_lq
|
2020-11-15 23:16:18 +00:00
|
|
|
if self.include_combined:
|
|
|
|
res = torch.cat([resampled_lq, diff, hq], dim=1)
|
|
|
|
else:
|
|
|
|
res = torch.cat([resampled_lq, diff], dim=1)
|
|
|
|
return {self.output: res,
|
2020-11-14 03:11:50 +00:00
|
|
|
self.diff_key: diff}
|
|
|
|
elif self.mode == 'recombine':
|
|
|
|
combined = resampled_lq + hq
|
|
|
|
return {self.output: combined}
|
2020-11-29 22:39:50 +00:00
|
|
|
|
|
|
|
|
|
|
|
class MultiFrameCombiner(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super().__init__(opt, env)
|
|
|
|
self.mode = opt['mode']
|
|
|
|
self.dim = opt['dim'] if 'dim' in opt.keys() else None
|
|
|
|
self.flow = opt['flow']
|
|
|
|
self.in_lq_key = opt['in']
|
|
|
|
self.in_hq_key = opt['in_hq']
|
|
|
|
self.out_lq_key = opt['out']
|
|
|
|
self.out_hq_key = opt['out_hq']
|
2020-12-18 16:24:31 +00:00
|
|
|
from models.flownet2.networks import Resample2d
|
2020-11-29 22:39:50 +00:00
|
|
|
self.resampler = Resample2d()
|
|
|
|
|
|
|
|
def combine(self, state):
|
|
|
|
flow = self.env['generators'][self.flow]
|
|
|
|
lq = state[self.in_lq_key]
|
|
|
|
hq = state[self.in_hq_key]
|
|
|
|
b, f, c, h, w = lq.shape
|
|
|
|
center = f // 2
|
2020-12-30 03:58:02 +00:00
|
|
|
center_img = lq[:, center, :, :, :]
|
2020-11-29 22:39:50 +00:00
|
|
|
imgs = [center_img]
|
|
|
|
with torch.no_grad():
|
|
|
|
for i in range(f):
|
|
|
|
if i == center:
|
|
|
|
continue
|
2020-12-30 03:58:02 +00:00
|
|
|
nimg = lq[:, i, :, :, :]
|
2020-11-29 22:39:50 +00:00
|
|
|
flowfield = flow(torch.stack([center_img, nimg], dim=2).float())
|
|
|
|
nimg = self.resampler(nimg, flowfield)
|
|
|
|
imgs.append(nimg)
|
2020-12-30 03:58:02 +00:00
|
|
|
hq_out = hq[:, center, :, :, :]
|
2020-11-29 22:39:50 +00:00
|
|
|
return {self.out_lq_key: torch.cat(imgs, dim=1),
|
|
|
|
self.out_hq_key: hq_out,
|
|
|
|
self.out_lq_key + "_flow_sample": torch.cat(imgs, dim=0)}
|
|
|
|
|
|
|
|
def synthesize(self, state):
|
|
|
|
lq = state[self.in_lq_key]
|
|
|
|
return {
|
|
|
|
self.out_lq_key: lq.repeat(1, self.dim, 1, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
if self.mode == "synthesize":
|
|
|
|
return self.synthesize(state)
|
|
|
|
elif self.mode == "combine":
|
|
|
|
return self.combine(state)
|
|
|
|
else:
|
|
|
|
raise NotImplementedError
|
2020-12-26 20:49:27 +00:00
|
|
|
|
|
|
|
|
|
|
|
# Combines data from multiple different sources and mixes them along the batch dimension. Labels are then emitted
|
|
|
|
# according to how the mixing was performed.
|
|
|
|
class MixAndLabelInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super().__init__(opt, env)
|
|
|
|
self.out_labels = opt['out_labels']
|
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
input_tensors = [state[i] for i in self.input]
|
|
|
|
num_inputs = len(input_tensors)
|
|
|
|
bs = input_tensors[0].shape[0]
|
|
|
|
labels = torch.randint(0, num_inputs, (bs,), device=input_tensors[0].device)
|
|
|
|
# Still don't know of a good way to do this in torch.. TODO make it better..
|
|
|
|
res = []
|
|
|
|
for b in range(bs):
|
|
|
|
res.append(input_tensors[labels[b]][b, :, :, :])
|
|
|
|
output = torch.stack(res, dim=0)
|
2020-12-30 03:58:02 +00:00
|
|
|
return {self.out_labels: labels, self.output: output}
|
2020-12-26 20:49:27 +00:00
|
|
|
|
|
|
|
|
2021-01-07 19:14:55 +00:00
|
|
|
# Randomly performs a uniform resize & crop from a base image.
|
|
|
|
# Never resizes below input resolution or messes with the aspect ratio.
|
|
|
|
class RandomCropInjector(Injector):
|
2020-12-26 20:49:27 +00:00
|
|
|
def __init__(self, opt, env):
|
|
|
|
super().__init__(opt, env)
|
2021-01-07 19:14:55 +00:00
|
|
|
dim_in = opt['dim_in']
|
|
|
|
dim_out = opt['dim_out']
|
|
|
|
scale = dim_out / dim_in
|
2021-01-07 23:31:43 +00:00
|
|
|
self.operator = RandomResizedCrop(size=(dim_out, dim_out), scale=(scale, 1),
|
|
|
|
ratio=(.99,1), # An aspect ratio range is required, but .99,1 is effectively "none".
|
2021-01-07 19:14:55 +00:00
|
|
|
resample='NEAREST')
|
2020-12-26 20:49:27 +00:00
|
|
|
|
|
|
|
def forward(self, state):
|
2021-01-07 23:31:43 +00:00
|
|
|
return {self.output: self.operator(state[self.input])}
|
2021-01-07 19:14:55 +00:00
|
|
|
|
2021-02-08 15:09:21 +00:00
|
|
|
|
|
|
|
class Stylegan2NoiseInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super().__init__(opt, env)
|
|
|
|
self.mix_prob = opt_get(opt, ['mix_probability'], .9)
|
|
|
|
self.latent_dim = opt_get(opt, ['latent_dim'], 512)
|
|
|
|
|
|
|
|
def make_noise(self, batch, latent_dim, n_noise, device):
|
|
|
|
return torch.randn(n_noise, batch, latent_dim, device=device).unbind(0)
|
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
i = state[self.input]
|
|
|
|
if self.mix_prob > 0 and random.random() < self.mix_prob:
|
|
|
|
return {self.output: self.make_noise(i.shape[0], self.latent_dim, 2, i.device)}
|
|
|
|
else:
|
2021-03-03 03:51:48 +00:00
|
|
|
return {self.output: self.make_noise(i.shape[0], self.latent_dim, 1, i.device)}
|
|
|
|
|
|
|
|
|
|
|
|
class NoiseInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super().__init__(opt, env)
|
|
|
|
self.shape = tuple(opt['shape'])
|
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
shape = (state[self.input].shape[0],) + self.shape
|
|
|
|
return {self.output: torch.randn(shape, device=state[self.input].device)}
|
2021-07-20 14:36:46 +00:00
|
|
|
|
|
|
|
|
|
|
|
# Incorporates the specified dimension into the batch dimension.
|
|
|
|
class DecomposeDimensionInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super().__init__(opt, env)
|
|
|
|
self.dim = opt['dim']
|
2021-07-23 16:58:14 +00:00
|
|
|
self.cutoff_dim = opt_get(opt, ['cutoff_dim'], -1)
|
2021-07-20 14:36:46 +00:00
|
|
|
assert self.dim != 0 # Cannot decompose the batch dimension
|
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
inp = state[self.input]
|
|
|
|
dims = list(range(len(inp.shape))) # Looks like [0,1,2,3]
|
|
|
|
shape = list(inp.shape)
|
|
|
|
del dims[self.dim]
|
|
|
|
del shape[self.dim]
|
2021-07-20 16:40:05 +00:00
|
|
|
|
|
|
|
# Compute the reverse permutation and shape arguments needed to undo this operation.
|
|
|
|
rev_shape = [inp.shape[self.dim]] + shape.copy()
|
|
|
|
rev_permute = list(range(len(inp.shape)))[1:] # Looks like [1,2,3]
|
|
|
|
rev_permute = rev_permute[:self.dim] + [0] + (rev_permute[self.dim:] if self.dim < len(rev_permute) else [])
|
|
|
|
|
2021-07-23 16:58:14 +00:00
|
|
|
out = inp.permute([self.dim] + dims).reshape((-1,) + tuple(shape[1:]))
|
|
|
|
if self.cutoff_dim > -1:
|
|
|
|
out = out[:self.cutoff_dim]
|
|
|
|
|
|
|
|
return {self.output: out,
|
2021-07-20 16:40:05 +00:00
|
|
|
f'{self.output}_reverse_shape': rev_shape,
|
|
|
|
f'{self.output}_reverse_permute': rev_permute}
|
|
|
|
|
|
|
|
|
|
|
|
# Undoes a decompose.
|
|
|
|
class RecomposeDimensionInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super().__init__(opt, env)
|
|
|
|
self.rev_shape_key = opt['reverse_shape']
|
|
|
|
self.rev_permute_key = opt['reverse_permute']
|
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
inp = state[self.input]
|
|
|
|
rev_shape = state[self.rev_shape_key]
|
|
|
|
rev_permute = state[self.rev_permute_key]
|
|
|
|
out = inp.reshape(rev_shape)
|
|
|
|
out = out.permute(rev_permute).contiguous()
|
|
|
|
return {self.output: out}
|
2021-07-20 14:36:46 +00:00
|
|
|
|
|
|
|
|
|
|
|
# Performs normalization across fixed constants.
|
|
|
|
class NormalizeInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super().__init__(opt, env)
|
|
|
|
self.shift = opt['shift']
|
|
|
|
self.scale = opt['scale']
|
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
inp = state[self.input]
|
|
|
|
out = (inp - self.shift) / self.scale
|
|
|
|
return {self.output: out}
|
|
|
|
|
|
|
|
|
2021-09-06 23:45:30 +00:00
|
|
|
# Performs frequency-bin normalization for spectrograms.
|
|
|
|
class FrequencyBinNormalizeInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super().__init__(opt, env)
|
|
|
|
self.shift, self.scale = torch.load(opt['stats_file'])
|
|
|
|
self.shift = self.shift.view(1,-1,1)
|
|
|
|
self.scale = self.scale.view(1,-1,1)
|
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
inp = state[self.input]
|
|
|
|
self.shift = self.shift.to(inp.device)
|
|
|
|
self.scale = self.scale.to(inp.device)
|
|
|
|
out = (inp - self.shift) / self.scale
|
|
|
|
return {self.output: out}
|
|
|
|
|
|
|
|
|
2021-07-20 16:40:05 +00:00
|
|
|
# Performs normalization across fixed constants.
|
|
|
|
class DenormalizeInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super().__init__(opt, env)
|
|
|
|
self.shift = opt['shift']
|
|
|
|
self.scale = opt['scale']
|
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
inp = state[self.input]
|
|
|
|
out = inp * self.scale + self.shift
|
|
|
|
return {self.output: out}
|
|
|
|
|
|
|
|
|
2021-08-14 00:35:55 +00:00
|
|
|
class MelSpectrogramInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super().__init__(opt, env)
|
|
|
|
from models.tacotron2.layers import TacotronSTFT
|
2021-10-29 04:34:12 +00:00
|
|
|
# These are the default tacotron values for the MEL spectrogram.
|
|
|
|
filter_length = opt_get(opt, ['filter_length'], 1024)
|
|
|
|
hop_length = opt_get(opt, ['hop_length'], 256)
|
|
|
|
win_length = opt_get(opt, ['win_length'], 1024)
|
|
|
|
n_mel_channels = opt_get(opt, ['n_mel_channels'], 80)
|
|
|
|
mel_fmin = opt_get(opt, ['mel_fmin'], 0)
|
|
|
|
mel_fmax = opt_get(opt, ['mel_fmax'], 8000)
|
|
|
|
sampling_rate = opt_get(opt, ['sampling_rate'], 22050)
|
|
|
|
self.stft = TacotronSTFT(filter_length, hop_length, win_length, n_mel_channels, sampling_rate, mel_fmin, mel_fmax)
|
2021-08-14 00:35:55 +00:00
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
inp = state[self.input]
|
|
|
|
if len(inp.shape) == 3: # Automatically squeeze out the channels dimension if it is present (assuming mono-audio)
|
|
|
|
inp = inp.squeeze(1)
|
|
|
|
assert len(inp.shape) == 2
|
|
|
|
self.stft = self.stft.to(inp.device)
|
|
|
|
return {self.output: self.stft.mel_spectrogram(inp)}
|
|
|
|
|
|
|
|
|
2021-11-19 03:02:45 +00:00
|
|
|
class TorchMelSpectrogramInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super().__init__(opt, env)
|
|
|
|
# These are the default tacotron values for the MEL spectrogram.
|
|
|
|
self.filter_length = opt_get(opt, ['filter_length'], 1024)
|
|
|
|
self.hop_length = opt_get(opt, ['hop_length'], 256)
|
|
|
|
self.win_length = opt_get(opt, ['win_length'], 1024)
|
|
|
|
self.n_mel_channels = opt_get(opt, ['n_mel_channels'], 80)
|
|
|
|
self.mel_fmin = opt_get(opt, ['mel_fmin'], 0)
|
|
|
|
self.mel_fmax = opt_get(opt, ['mel_fmax'], 8000)
|
|
|
|
self.sampling_rate = opt_get(opt, ['sampling_rate'], 22050)
|
2021-11-23 16:29:29 +00:00
|
|
|
norm = opt_get(opt, ['normalize'], False)
|
2021-11-19 03:02:45 +00:00
|
|
|
self.mel_stft = torchaudio.transforms.MelSpectrogram(n_fft=self.filter_length, hop_length=self.hop_length,
|
2021-11-23 16:29:29 +00:00
|
|
|
win_length=self.win_length, power=2, normalized=norm,
|
2021-11-19 03:02:45 +00:00
|
|
|
sample_rate=self.sampling_rate, f_min=self.mel_fmin,
|
2021-12-11 03:04:52 +00:00
|
|
|
f_max=self.mel_fmax, n_mels=self.n_mel_channels,
|
|
|
|
norm="slaney")
|
2021-12-11 15:30:49 +00:00
|
|
|
self.mel_norm_file = opt_get(opt, ['mel_norm_file'], None)
|
|
|
|
if self.mel_norm_file is not None:
|
|
|
|
self.mel_norms = torch.load(self.mel_norm_file)
|
|
|
|
else:
|
|
|
|
self.mel_norms = None
|
2021-11-19 03:02:45 +00:00
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
inp = state[self.input]
|
|
|
|
if len(inp.shape) == 3: # Automatically squeeze out the channels dimension if it is present (assuming mono-audio)
|
|
|
|
inp = inp.squeeze(1)
|
|
|
|
assert len(inp.shape) == 2
|
2021-11-22 23:40:19 +00:00
|
|
|
self.mel_stft = self.mel_stft.to(inp.device)
|
2021-11-19 03:02:45 +00:00
|
|
|
mel = self.mel_stft(inp)
|
2021-12-11 03:06:24 +00:00
|
|
|
# Perform dynamic range compression
|
|
|
|
mel = torch.log(torch.clamp(mel, min=1e-5))
|
2021-12-11 15:30:49 +00:00
|
|
|
if self.mel_norms is not None:
|
|
|
|
self.mel_norms = self.mel_norms.to(mel.device)
|
|
|
|
mel = mel / self.mel_norms.unsqueeze(0).unsqueeze(-1)
|
2021-11-19 03:02:45 +00:00
|
|
|
return {self.output: mel}
|
|
|
|
|
|
|
|
|
2021-11-23 00:16:39 +00:00
|
|
|
def test_torch_mel_injector():
|
|
|
|
a = load_audio('D:\\data\\audio\\libritts\\train-clean-100\\19\\198\\19_198_000000_000000.wav', 22050)
|
2021-12-13 02:52:08 +00:00
|
|
|
inj = TorchMelSpectrogramInjector({'in': 'in', 'out': 'out', 'mel_norm_file': '../experiments/clips_mel_norms.pth'}, {})
|
2021-11-23 00:16:39 +00:00
|
|
|
f = inj({'in': a.unsqueeze(0)})['out']
|
|
|
|
plot_spectrogram(f[0])
|
2021-12-11 15:17:26 +00:00
|
|
|
inj = MelSpectrogramInjector({'in': 'in', 'out': 'out'}, {})
|
|
|
|
t = inj({'in': a.unsqueeze(0)})['out']
|
|
|
|
plot_spectrogram(t[0])
|
2021-11-23 00:16:39 +00:00
|
|
|
print('Pause')
|
|
|
|
|
|
|
|
|
2021-08-31 20:38:33 +00:00
|
|
|
class RandomAudioCropInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super().__init__(opt, env)
|
|
|
|
self.crop_sz = opt['crop_size']
|
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
inp = state[self.input]
|
|
|
|
len = inp.shape[-1]
|
|
|
|
margin = len - self.crop_sz
|
|
|
|
start = random.randint(0, margin)
|
|
|
|
return {self.output: inp[:, :, start:start+self.crop_sz]}
|
|
|
|
|
2021-07-20 14:36:46 +00:00
|
|
|
|
2021-11-11 03:06:33 +00:00
|
|
|
class AudioResampleInjector(Injector):
|
|
|
|
def __init__(self, opt, env):
|
|
|
|
super().__init__(opt, env)
|
|
|
|
self.input_sr = opt['input_sample_rate']
|
|
|
|
self.output_sr = opt['output_sample_rate']
|
|
|
|
|
|
|
|
def forward(self, state):
|
|
|
|
inp = state[self.input]
|
|
|
|
return {self.output: torchaudio.functional.resample(inp, self.input_sr, self.output_sr)}
|
|
|
|
|
|
|
|
|
2021-11-23 00:16:39 +00:00
|
|
|
def test_audio_resample_injector():
|
2021-11-19 03:02:45 +00:00
|
|
|
inj = AudioResampleInjector({'in': 'x', 'out': 'y', 'input_sample_rate': 22050, 'output_sample_rate': '1'}, None)
|
2021-11-23 00:16:39 +00:00
|
|
|
print(inj({'x':torch.rand(10,1,40800)})['y'].shape)
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
test_torch_mel_injector()
|