Fix typos

Found via `codespell -S *.json -L splitted,nd,ser,broadcat`
This commit is contained in:
Kian-Meng Ang 2023-01-05 19:49:55 +08:00
parent 2c0d8d71e0
commit 551fe655ff
5 changed files with 5 additions and 5 deletions

View File

@ -120,7 +120,7 @@ For the those in the ML space: this is created by projecting a random vector ont
This repo comes with several pre-packaged voices. Voices prepended with "train_" came from the training set and perform
far better than the others. If your goal is high quality speech, I recommend you pick one of them. If you want to see
what Tortoise can do for zero-shot mimicing, take a look at the others.
what Tortoise can do for zero-shot mimicking, take a look at the others.
### Adding a new voice

View File

@ -110,7 +110,7 @@ tuning_group.add_argument(
tuning_group.add_argument(
'--cvvp-amount', type=float, default=None,
help='How much the CVVP model should influence the output.'
'Increasing this can in some cases reduce the likelyhood of multiple speakers.')
'Increasing this can in some cases reduce the likelihood of multiple speakers.')
tuning_group.add_argument(
'--diffusion-iterations', type=int, default=None,
help='Number of diffusion steps to perform. More steps means the network has more chances to iteratively'

View File

@ -20,7 +20,7 @@ if __name__ == '__main__':
parser.add_argument('--seed', type=int, help='Random seed which can be used to reproduce results.', default=None)
parser.add_argument('--produce_debug_state', type=bool, help='Whether or not to produce debug_state.pth, which can aid in reproducing problems. Defaults to true.', default=True)
parser.add_argument('--cvvp_amount', type=float, help='How much the CVVP model should influence the output.'
'Increasing this can in some cases reduce the likelyhood of multiple speakers. Defaults to 0 (disabled)', default=.0)
'Increasing this can in some cases reduce the likelihood of multiple speakers. Defaults to 0 (disabled)', default=.0)
args = parser.parse_args()
os.makedirs(args.output_path, exist_ok=True)

View File

@ -43,7 +43,7 @@ def normalization(channels):
class QKVAttentionLegacy(nn.Module):
"""
A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
A module which performs QKV attention. Matches legacy QKVAttention + input/output heads shaping
"""
def __init__(self, n_heads):

View File

@ -216,4 +216,4 @@ class Transformer(nn.Module):
self.layers = execute_type(layers, args_route = attn_route_map)
def forward(self, x, **kwargs):
return self.layers(x, **kwargs)
return self.layers(x, **kwargs)