stopgap cringe to get this training session working (it does not seem fruitful)
This commit is contained in:
parent
c0b46b82eb
commit
d4a6709fb4
|
@ -879,6 +879,7 @@ class Dataset(_Dataset):
|
|||
self.duration_map = _get_duration_map( self.dataset_type )
|
||||
|
||||
# cull speakers if they do not have enough utterances (or cull speakers with too many utternaces)
|
||||
"""
|
||||
if cfg.dataset.min_utterances > 0 or cfg.dataset.max_utterances > 0:
|
||||
keys = list(self.paths_by_spkr_name.keys())
|
||||
for key in keys:
|
||||
|
@ -889,7 +890,7 @@ class Dataset(_Dataset):
|
|||
# slice away extraneous utterances
|
||||
if cfg.dataset.max_utterances:
|
||||
self.paths_by_spkr_name[key] = self.paths_by_spkr_name[key][:cfg.dataset.max_utterances]
|
||||
|
||||
"""
|
||||
# flatten paths
|
||||
self.paths = list(itertools.chain.from_iterable(self.paths_by_spkr_name.values()))
|
||||
|
||||
|
@ -1191,10 +1192,17 @@ class Dataset(_Dataset):
|
|||
if len(reference_metadata["similar"]) >= offset:
|
||||
offset = 0
|
||||
|
||||
# cringe stopgap
|
||||
offset_end = offset + cfg.dataset.prompt_similar_top_k
|
||||
if offset >= len( reference_metadata["similar"] ):
|
||||
return None
|
||||
if offset_end >= len( reference_metadata["similar"] ):
|
||||
return None
|
||||
|
||||
metadata_keys = list(metadata.keys())
|
||||
|
||||
if cfg.dataset.prompt_similar_top_k > 1:
|
||||
indices = reference_metadata["similar"][offset:offset+cfg.dataset.prompt_similar_top_k]
|
||||
indices = reference_metadata["similar"][offset:offset_end]
|
||||
index = random.choice( indices )
|
||||
else:
|
||||
index = reference_metadata["similar"][offset]
|
||||
|
@ -1246,7 +1254,10 @@ class Dataset(_Dataset):
|
|||
# yuck
|
||||
path = None
|
||||
if random.random() < cfg.dataset.prompt_similar_p:
|
||||
try:
|
||||
path = self.get_similar_utterance( reference, offset = len(prom_list) )
|
||||
except Exception as e:
|
||||
path = None
|
||||
if not path:
|
||||
path = random.choice(choices)
|
||||
else:
|
||||
|
@ -1310,7 +1321,13 @@ class Dataset(_Dataset):
|
|||
key = _get_hdf5_path(path)
|
||||
|
||||
if key not in cfg.hdf5:
|
||||
raise RuntimeError(f'Key of Path ({path}) not in HDF5: {key}')
|
||||
_logger.warning(f'Key of Path ({path}) not in HDF5: {key}')
|
||||
return dict(path=None)
|
||||
|
||||
# cringe stopgap
|
||||
if "text" not in cfg.hdf5[key] or "audio" not in cfg.hdf5[key]:
|
||||
_logger.warning(f"text/audio not in entry: {key}")
|
||||
return dict(path=None)
|
||||
|
||||
# I need to do some weird coersion to a normal dict because it'll bitch about Hdf5 objects not being pickleable in worker processes
|
||||
metadata = { f'{k}': f'{v}' for k, v in cfg.hdf5[key].attrs.items() }
|
||||
|
@ -1342,6 +1359,9 @@ class Dataset(_Dataset):
|
|||
if not tone:
|
||||
tone = "neutral"
|
||||
|
||||
if lang == "auto":
|
||||
lang = "en"
|
||||
|
||||
lang = torch.tensor([self.lang_symmap[lang]]).to(torch.uint8)
|
||||
tone = torch.tensor([self.tone_symmap[tone]]).to(torch.uint8)
|
||||
|
||||
|
@ -1601,6 +1621,7 @@ class Dataset(_Dataset):
|
|||
|
||||
|
||||
def collate_fn(samples: list[dict]):
|
||||
samples = [ s for s in samples if s["path"] is not None ]
|
||||
batch: dict[str, Any] = {k: [s[k] for s in samples] for k in samples[0]}
|
||||
return batch
|
||||
|
||||
|
|
|
@ -1,459 +0,0 @@
|
|||
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# MIT License
|
||||
#
|
||||
# Copyright (c) 2020 Jungil Kong
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in all
|
||||
# copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
# The following functions/classes were based on code from https://github.com/jik876/hifi-gan:
|
||||
# ResBlock1, ResBlock2, Generator, DiscriminatorP, DiscriminatorS, MultiScaleDiscriminator,
|
||||
# MultiPeriodDiscriminator, init_weights, get_padding
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d
|
||||
from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm
|
||||
|
||||
from nemo.core.classes.common import typecheck
|
||||
from nemo.core.classes.module import NeuralModule
|
||||
from nemo.core.neural_types.elements import AudioSignal, MelSpectrogramType, VoidType
|
||||
from nemo.core.neural_types.neural_type import NeuralType
|
||||
|
||||
LRELU_SLOPE = 0.1
|
||||
|
||||
|
||||
def init_weights(m, mean=0.0, std=0.01):
|
||||
classname = m.__class__.__name__
|
||||
if classname.find("Conv") != -1:
|
||||
m.weight.data.normal_(mean, std)
|
||||
|
||||
|
||||
def get_padding(kernel_size, dilation=1):
|
||||
return int((kernel_size * dilation - dilation) / 2)
|
||||
|
||||
|
||||
class ResBlock1(torch.nn.Module):
|
||||
__constants__ = ['lrelu_slope']
|
||||
|
||||
def __init__(self, channels, kernel_size, dilation):
|
||||
super().__init__()
|
||||
self.lrelu_slope = LRELU_SLOPE
|
||||
self.convs1 = nn.ModuleList(
|
||||
[
|
||||
weight_norm(
|
||||
Conv1d(
|
||||
channels,
|
||||
channels,
|
||||
kernel_size,
|
||||
1,
|
||||
dilation=dilation[0],
|
||||
padding=get_padding(kernel_size, dilation[0]),
|
||||
)
|
||||
),
|
||||
weight_norm(
|
||||
Conv1d(
|
||||
channels,
|
||||
channels,
|
||||
kernel_size,
|
||||
1,
|
||||
dilation=dilation[1],
|
||||
padding=get_padding(kernel_size, dilation[1]),
|
||||
)
|
||||
),
|
||||
weight_norm(
|
||||
Conv1d(
|
||||
channels,
|
||||
channels,
|
||||
kernel_size,
|
||||
1,
|
||||
dilation=dilation[2],
|
||||
padding=get_padding(kernel_size, dilation[2]),
|
||||
)
|
||||
),
|
||||
]
|
||||
)
|
||||
self.convs1.apply(init_weights)
|
||||
|
||||
self.convs2 = nn.ModuleList(
|
||||
[
|
||||
weight_norm(
|
||||
Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1))
|
||||
),
|
||||
weight_norm(
|
||||
Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1))
|
||||
),
|
||||
weight_norm(
|
||||
Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1))
|
||||
),
|
||||
]
|
||||
)
|
||||
self.convs2.apply(init_weights)
|
||||
|
||||
def forward(self, x):
|
||||
for c1, c2 in zip(self.convs1, self.convs2):
|
||||
xt = F.leaky_relu(x, self.lrelu_slope)
|
||||
xt = c1(xt)
|
||||
xt = F.leaky_relu(xt, self.lrelu_slope)
|
||||
xt = c2(xt)
|
||||
x = xt + x
|
||||
return x
|
||||
|
||||
def remove_weight_norm(self):
|
||||
for l in self.convs1:
|
||||
remove_weight_norm(l)
|
||||
for l in self.convs2:
|
||||
remove_weight_norm(l)
|
||||
|
||||
|
||||
class ResBlock2(torch.nn.Module):
|
||||
__constants__ = ['lrelu_slope']
|
||||
|
||||
def __init__(self, channels, kernel_size, dilation):
|
||||
super().__init__()
|
||||
self.convs = nn.ModuleList(
|
||||
[
|
||||
weight_norm(
|
||||
Conv1d(
|
||||
channels,
|
||||
channels,
|
||||
kernel_size,
|
||||
1,
|
||||
dilation=dilation[0],
|
||||
padding=get_padding(kernel_size, dilation[0]),
|
||||
)
|
||||
),
|
||||
weight_norm(
|
||||
Conv1d(
|
||||
channels,
|
||||
channels,
|
||||
kernel_size,
|
||||
1,
|
||||
dilation=dilation[1],
|
||||
padding=get_padding(kernel_size, dilation[1]),
|
||||
)
|
||||
),
|
||||
]
|
||||
)
|
||||
self.convs.apply(init_weights)
|
||||
self.lrelu_slope = LRELU_SLOPE
|
||||
|
||||
def forward(self, x):
|
||||
for c in self.convs:
|
||||
xt = F.leaky_relu(x, self.lrelu_slope)
|
||||
xt = c(xt)
|
||||
x = xt + x
|
||||
return x
|
||||
|
||||
def remove_weight_norm(self):
|
||||
for l in self.convs:
|
||||
remove_weight_norm(l)
|
||||
|
||||
|
||||
class Generator(NeuralModule):
|
||||
__constants__ = ['lrelu_slope', 'num_kernels', 'num_upsamples']
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
resblock,
|
||||
upsample_rates,
|
||||
upsample_kernel_sizes,
|
||||
upsample_initial_channel,
|
||||
resblock_kernel_sizes,
|
||||
resblock_dilation_sizes,
|
||||
initial_input_size=80,
|
||||
apply_weight_init_conv_pre=False,
|
||||
):
|
||||
super().__init__()
|
||||
self.num_kernels = len(resblock_kernel_sizes)
|
||||
self.num_upsamples = len(upsample_rates)
|
||||
self.conv_pre = weight_norm(Conv1d(initial_input_size, upsample_initial_channel, 7, 1, padding=3))
|
||||
self.lrelu_slope = LRELU_SLOPE
|
||||
resblock = ResBlock1 if resblock == 1 else ResBlock2
|
||||
|
||||
self.ups = nn.ModuleList()
|
||||
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
||||
self.ups.append(
|
||||
weight_norm(
|
||||
ConvTranspose1d(
|
||||
upsample_initial_channel // (2 ** i),
|
||||
upsample_initial_channel // (2 ** (i + 1)),
|
||||
k,
|
||||
u,
|
||||
padding=(k - u) // 2,
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
self.resblocks = nn.ModuleList()
|
||||
for i in range(len(self.ups)):
|
||||
resblock_list = nn.ModuleList()
|
||||
ch = upsample_initial_channel // (2 ** (i + 1))
|
||||
for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
|
||||
resblock_list.append(resblock(ch, k, d))
|
||||
self.resblocks.append(resblock_list)
|
||||
|
||||
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
|
||||
self.ups.apply(init_weights)
|
||||
self.conv_post.apply(init_weights)
|
||||
if apply_weight_init_conv_pre:
|
||||
self.conv_pre.apply(init_weights)
|
||||
|
||||
@property
|
||||
def input_types(self):
|
||||
return {
|
||||
"x": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
|
||||
}
|
||||
|
||||
@property
|
||||
def output_types(self):
|
||||
return {
|
||||
"audio": NeuralType(('B', 'S', 'T'), AudioSignal()),
|
||||
}
|
||||
|
||||
@typecheck()
|
||||
def forward(self, x):
|
||||
x = self.conv_pre(x)
|
||||
for upsample_layer, resblock_group in zip(self.ups, self.resblocks):
|
||||
x = F.leaky_relu(x, self.lrelu_slope)
|
||||
x = upsample_layer(x)
|
||||
xs = torch.zeros(x.shape, dtype=x.dtype, device=x.device)
|
||||
for resblock in resblock_group:
|
||||
xs += resblock(x)
|
||||
x = xs / self.num_kernels
|
||||
x = F.leaky_relu(x)
|
||||
x = self.conv_post(x)
|
||||
x = torch.tanh(x)
|
||||
|
||||
return x
|
||||
|
||||
def remove_weight_norm(self):
|
||||
print('Removing weight norm...')
|
||||
for l in self.ups:
|
||||
remove_weight_norm(l)
|
||||
for group in self.resblocks:
|
||||
for block in group:
|
||||
block.remove_weight_norm()
|
||||
remove_weight_norm(self.conv_pre)
|
||||
remove_weight_norm(self.conv_post)
|
||||
|
||||
|
||||
class DiscriminatorP(NeuralModule):
|
||||
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False, debug=False):
|
||||
super().__init__()
|
||||
self.period = period
|
||||
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
||||
conv_ch = [32, 128, 512, 1024] if not debug else [8, 12, 32, 64]
|
||||
self.convs = nn.ModuleList(
|
||||
[
|
||||
norm_f(Conv2d(1, conv_ch[0], (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
|
||||
norm_f(Conv2d(conv_ch[0], conv_ch[1], (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
|
||||
norm_f(Conv2d(conv_ch[1], conv_ch[2], (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
|
||||
norm_f(Conv2d(conv_ch[2], conv_ch[3], (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
|
||||
norm_f(Conv2d(conv_ch[3], conv_ch[3], (kernel_size, 1), 1, padding=(2, 0))),
|
||||
]
|
||||
)
|
||||
self.conv_post = norm_f(Conv2d(conv_ch[3], 1, (3, 1), 1, padding=(1, 0)))
|
||||
|
||||
@property
|
||||
def input_types(self):
|
||||
return {
|
||||
"x": NeuralType(('B', 'S', 'T'), AudioSignal()),
|
||||
}
|
||||
|
||||
@property
|
||||
def output_types(self):
|
||||
return {
|
||||
"decision": NeuralType(('B', 'T'), VoidType()),
|
||||
"feature_maps": [NeuralType(("B", "C", "H", "W"), VoidType())],
|
||||
}
|
||||
|
||||
@typecheck()
|
||||
def forward(self, x):
|
||||
fmap = []
|
||||
|
||||
# 1d to 2d
|
||||
b, c, t = x.shape
|
||||
if t % self.period != 0: # pad first
|
||||
n_pad = self.period - (t % self.period)
|
||||
x = F.pad(x, (0, n_pad), "reflect")
|
||||
t = t + n_pad
|
||||
x = x.view(b, c, t // self.period, self.period)
|
||||
|
||||
for l in self.convs:
|
||||
x = l(x)
|
||||
x = F.leaky_relu(x, LRELU_SLOPE)
|
||||
fmap.append(x)
|
||||
x = self.conv_post(x)
|
||||
fmap.append(x)
|
||||
x = torch.flatten(x, 1, -1)
|
||||
|
||||
return x, fmap
|
||||
|
||||
|
||||
class MultiPeriodDiscriminator(NeuralModule):
|
||||
def __init__(self, debug=False):
|
||||
super().__init__()
|
||||
self.discriminators = nn.ModuleList(
|
||||
[
|
||||
DiscriminatorP(2, debug=debug),
|
||||
DiscriminatorP(3, debug=debug),
|
||||
DiscriminatorP(5, debug=debug),
|
||||
DiscriminatorP(7, debug=debug),
|
||||
DiscriminatorP(11, debug=debug),
|
||||
]
|
||||
)
|
||||
|
||||
@property
|
||||
def input_types(self):
|
||||
return {
|
||||
"y": NeuralType(('B', 'S', 'T'), AudioSignal()),
|
||||
"y_hat": NeuralType(('B', 'S', 'T'), AudioSignal()),
|
||||
}
|
||||
|
||||
@property
|
||||
def output_types(self):
|
||||
return {
|
||||
"real_scores": [NeuralType(('B', 'T'), VoidType())],
|
||||
"fake_scores": [NeuralType(('B', 'T'), VoidType())],
|
||||
"real_feature_maps": [[NeuralType(("B", "C", "H", "W"), VoidType())]],
|
||||
"fake_feature_maps": [[NeuralType(("B", "C", "H", "W"), VoidType())]],
|
||||
}
|
||||
|
||||
@typecheck()
|
||||
def forward(self, y, y_hat):
|
||||
y_d_rs = []
|
||||
y_d_gs = []
|
||||
fmap_rs = []
|
||||
fmap_gs = []
|
||||
for i, d in enumerate(self.discriminators):
|
||||
y_d_r, fmap_r = d(x=y)
|
||||
y_d_g, fmap_g = d(x=y_hat)
|
||||
y_d_rs.append(y_d_r)
|
||||
fmap_rs.append(fmap_r)
|
||||
y_d_gs.append(y_d_g)
|
||||
fmap_gs.append(fmap_g)
|
||||
|
||||
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
||||
|
||||
|
||||
class DiscriminatorS(NeuralModule):
|
||||
def __init__(self, use_spectral_norm=False, debug=False):
|
||||
super().__init__()
|
||||
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
||||
conv_ch = [128, 256, 512, 1024] if not debug else [16, 32, 32, 64]
|
||||
self.convs = nn.ModuleList(
|
||||
[
|
||||
norm_f(Conv1d(1, conv_ch[0], 15, 1, padding=7)),
|
||||
norm_f(Conv1d(conv_ch[0], conv_ch[0], 41, 2, groups=4, padding=20)),
|
||||
norm_f(Conv1d(conv_ch[0], conv_ch[1], 41, 2, groups=16, padding=20)),
|
||||
norm_f(Conv1d(conv_ch[1], conv_ch[2], 41, 4, groups=16, padding=20)),
|
||||
norm_f(Conv1d(conv_ch[2], conv_ch[3], 41, 4, groups=16, padding=20)),
|
||||
norm_f(Conv1d(conv_ch[3], conv_ch[3], 41, 1, groups=16, padding=20)),
|
||||
norm_f(Conv1d(conv_ch[3], conv_ch[3], 5, 1, padding=2)),
|
||||
]
|
||||
)
|
||||
self.conv_post = norm_f(Conv1d(conv_ch[3], 1, 3, 1, padding=1))
|
||||
|
||||
@property
|
||||
def input_types(self):
|
||||
return {
|
||||
"x": NeuralType(('B', 'S', 'T'), AudioSignal()),
|
||||
}
|
||||
|
||||
@property
|
||||
def output_types(self):
|
||||
return {
|
||||
"decision": NeuralType(('B', 'T'), VoidType()),
|
||||
"feature_maps": [NeuralType(("B", "C", "T"), VoidType())],
|
||||
}
|
||||
|
||||
@typecheck()
|
||||
def forward(self, x):
|
||||
fmap = []
|
||||
for l in self.convs:
|
||||
x = l(x)
|
||||
x = F.leaky_relu(x, LRELU_SLOPE)
|
||||
fmap.append(x)
|
||||
x = self.conv_post(x)
|
||||
fmap.append(x)
|
||||
x = torch.flatten(x, 1, -1)
|
||||
|
||||
return x, fmap
|
||||
|
||||
|
||||
class MultiScaleDiscriminator(NeuralModule):
|
||||
def __init__(self, debug=False):
|
||||
super().__init__()
|
||||
self.discriminators = nn.ModuleList(
|
||||
[
|
||||
DiscriminatorS(use_spectral_norm=True, debug=debug),
|
||||
DiscriminatorS(debug=debug),
|
||||
DiscriminatorS(debug=debug),
|
||||
]
|
||||
)
|
||||
self.meanpools = nn.ModuleList([AvgPool1d(4, 2, padding=2), AvgPool1d(4, 2, padding=2)])
|
||||
|
||||
@property
|
||||
def input_types(self):
|
||||
return {
|
||||
"y": NeuralType(('B', 'S', 'T'), AudioSignal()),
|
||||
"y_hat": NeuralType(('B', 'S', 'T'), AudioSignal()),
|
||||
}
|
||||
|
||||
@property
|
||||
def output_types(self):
|
||||
return {
|
||||
"real_scores": [NeuralType(('B', 'T'), VoidType())],
|
||||
"fake_scores": [NeuralType(('B', 'T'), VoidType())],
|
||||
"real_feature_maps": [[NeuralType(("B", "C", "T"), VoidType())]],
|
||||
"fake_feature_maps": [[NeuralType(("B", "C", "T"), VoidType())]],
|
||||
}
|
||||
|
||||
@typecheck()
|
||||
def forward(self, y, y_hat):
|
||||
y_d_rs = []
|
||||
y_d_gs = []
|
||||
fmap_rs = []
|
||||
fmap_gs = []
|
||||
for i, d in enumerate(self.discriminators):
|
||||
if i != 0:
|
||||
y = self.meanpools[i - 1](y)
|
||||
y_hat = self.meanpools[i - 1](y_hat)
|
||||
y_d_r, fmap_r = d(x=y)
|
||||
y_d_g, fmap_g = d(x=y_hat)
|
||||
y_d_rs.append(y_d_r)
|
||||
fmap_rs.append(fmap_r)
|
||||
y_d_gs.append(y_d_g)
|
||||
fmap_gs.append(fmap_g)
|
||||
|
||||
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
|
@ -164,10 +164,14 @@ def batch_similar_utterances(
|
|||
"""
|
||||
|
||||
# compute features (embeddings if quantized already, MFCC features if raw audio)
|
||||
dim_shape = 1024
|
||||
for filename in tqdm(os.listdir(f'./{speaker_path}/'), desc=f"Encoding '{speaker_path.name}'", disable=not verbose):
|
||||
extension = filename.split(".")[-1]
|
||||
filename = filename.replace(f".{extension}", "")
|
||||
|
||||
if filename not in features:
|
||||
continue
|
||||
|
||||
if similarity_backend == "text":
|
||||
if extension not in artifact_extension:
|
||||
raise Exception("!")
|
||||
|
@ -274,6 +278,9 @@ def batch_similar_utterances(
|
|||
if top_k == 0:
|
||||
top_k = len(keys)
|
||||
|
||||
if len(keys) == 0:
|
||||
return None
|
||||
|
||||
# fill any missing keys with a null embedding to keep the order the same
|
||||
null_embedding = torch.zeros( (dim_shape,), device=tts.device, dtype=tts.dtype )
|
||||
embeddings = torch.stack( [ feature if feature is not None else null_embedding for feature in features.values() ] )
|
||||
|
@ -443,6 +450,7 @@ def main():
|
|||
if args.skip_existing and metadata_keys and "similar" in metadata[metadata_keys[-1]]:
|
||||
return
|
||||
|
||||
try:
|
||||
similarities = batch_similar_utterances(
|
||||
speaker_path=cfg.data_dir / speaker_name,
|
||||
yaml=args.yaml,
|
||||
|
@ -463,6 +471,8 @@ def main():
|
|||
|
||||
verbose=True,
|
||||
)
|
||||
except Exception as e:
|
||||
similarities = None
|
||||
|
||||
if not similarities:
|
||||
return
|
||||
|
|
|
@ -109,9 +109,15 @@ def _make_infinite_epochs(dl):
|
|||
if dl.dataset.index() == 0:
|
||||
_logger.info("New epoch starts.")
|
||||
|
||||
with tqdm(dl, "Epoch progress", dynamic_ncols=True, disable=not is_global_leader()) as pbar:
|
||||
yield from pbar
|
||||
|
||||
"""
|
||||
# this breaks the bar on a new epoch...
|
||||
total = dl.dataset.batches() - dl.dataset.index()
|
||||
with tqdm(dl, "Epoch progress", dynamic_ncols=True, disable=not is_global_leader(), total=total) as pbar:
|
||||
yield from pbar
|
||||
"""
|
||||
|
||||
@local_leader_only(default=None)
|
||||
def logger(data):
|
||||
|
|
Loading…
Reference in New Issue
Block a user