forked from mrq/tortoise-tts
how did I botch this, I don't think it affects anything since it never thrown an error
This commit is contained in:
parent
00be48670b
commit
0514f011ff
|
@ -4,10 +4,10 @@ transformers==4.19
|
||||||
tokenizers
|
tokenizers
|
||||||
inflect
|
inflect
|
||||||
progressbar
|
progressbar
|
||||||
einops
|
einops==0.6.0
|
||||||
unidecode
|
unidecode
|
||||||
scipy
|
scipy
|
||||||
librosa
|
librosa==0.8.0
|
||||||
torchaudio
|
torchaudio
|
||||||
threadpoolctl
|
threadpoolctl
|
||||||
appdirs
|
appdirs
|
||||||
|
|
|
@ -485,7 +485,7 @@ class UnifiedVoice(nn.Module):
|
||||||
max_generate_length=None, typical_sampling=False, typical_mass=.9, **hf_generate_kwargs):
|
max_generate_length=None, typical_sampling=False, typical_mass=.9, **hf_generate_kwargs):
|
||||||
seq_length = self.max_mel_tokens + self.max_text_tokens + 2
|
seq_length = self.max_mel_tokens + self.max_text_tokens + 2
|
||||||
if not hasattr(self, 'inference_model'):
|
if not hasattr(self, 'inference_model'):
|
||||||
self.post_init_gpt2_config(kv_cache=self.kv_cachepost_init_gpt2_config)
|
self.post_init_gpt2_config(kv_cache=self.kv_cache)
|
||||||
|
|
||||||
|
|
||||||
text_inputs = F.pad(text_inputs, (0, 1), value=self.stop_text_token)
|
text_inputs = F.pad(text_inputs, (0, 1), value=self.stop_text_token)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user