2023-08-02 21:53:35 +00:00
|
|
|
import torch
|
|
|
|
import torchaudio
|
|
|
|
import soundfile
|
2024-06-25 18:41:29 +00:00
|
|
|
import time
|
2024-08-29 18:27:16 +00:00
|
|
|
import logging
|
|
|
|
|
|
|
|
_logger = logging.getLogger(__name__)
|
2023-08-02 21:53:35 +00:00
|
|
|
|
2023-08-21 02:36:02 +00:00
|
|
|
from torch import Tensor
|
2023-08-02 21:53:35 +00:00
|
|
|
from einops import rearrange
|
2023-08-21 02:36:02 +00:00
|
|
|
from pathlib import Path
|
2023-08-02 21:53:35 +00:00
|
|
|
|
|
|
|
from .emb import g2p, qnt
|
2024-07-16 00:59:48 +00:00
|
|
|
from .emb.qnt import trim, trim_random, unload_model
|
2024-06-25 18:41:29 +00:00
|
|
|
from .utils import to_device, set_seed, wrapper as ml
|
2023-08-02 21:53:35 +00:00
|
|
|
|
2024-07-16 00:59:48 +00:00
|
|
|
from .config import cfg, Config
|
2023-08-14 03:07:45 +00:00
|
|
|
from .models import get_models
|
2024-10-10 18:40:25 +00:00
|
|
|
from .models.lora import enable_lora
|
2023-10-09 20:24:04 +00:00
|
|
|
from .engines import load_engines, deepspeed_available
|
2024-04-30 03:14:01 +00:00
|
|
|
from .data import get_phone_symmap, get_lang_symmap, _load_quants, _cleanup_phones, tokenize
|
2023-08-02 21:53:35 +00:00
|
|
|
|
2023-10-09 20:24:04 +00:00
|
|
|
if deepspeed_available:
|
2023-10-09 19:46:17 +00:00
|
|
|
import deepspeed
|
|
|
|
|
2023-08-02 21:53:35 +00:00
|
|
|
class TTS():
|
2024-08-27 00:33:51 +00:00
|
|
|
def __init__( self, config=None, device=None, amp=None, dtype=None, attention=None ):
|
2023-08-02 21:53:35 +00:00
|
|
|
self.loading = True
|
2023-08-14 03:56:28 +00:00
|
|
|
|
2024-08-27 00:33:51 +00:00
|
|
|
# yes I can just grab **kwargs and forward them here
|
|
|
|
self.load_config( config=config, device=device, amp=amp, dtype=dtype, attention=attention )
|
2024-07-16 00:59:48 +00:00
|
|
|
self.load_model()
|
|
|
|
|
|
|
|
self.loading = False
|
|
|
|
|
2024-08-27 00:33:51 +00:00
|
|
|
def load_config( self, config=None, device=None, amp=None, dtype=None, attention=None ):
|
2023-08-14 03:56:28 +00:00
|
|
|
if config:
|
2024-08-29 18:27:16 +00:00
|
|
|
_logger.info(f"Loading YAML: {config}")
|
2023-08-14 03:56:28 +00:00
|
|
|
cfg.load_yaml( config )
|
2023-08-16 02:58:16 +00:00
|
|
|
|
2023-08-21 00:21:54 +00:00
|
|
|
try:
|
2024-06-09 22:11:38 +00:00
|
|
|
cfg.format( training=False )
|
|
|
|
cfg.dataset.use_hdf5 = False # could use cfg.load_hdf5(), but why would it ever need to be loaded for inferencing
|
2023-08-21 00:21:54 +00:00
|
|
|
except Exception as e:
|
2024-06-08 01:23:53 +00:00
|
|
|
raise e # throw an error because I'm tired of silent errors messing things up for me
|
2023-09-02 01:58:29 +00:00
|
|
|
|
2023-09-09 21:17:20 +00:00
|
|
|
if amp is None:
|
|
|
|
amp = cfg.inference.amp
|
2023-10-13 03:21:43 +00:00
|
|
|
if dtype is None or dtype == "auto":
|
2023-09-21 00:10:59 +00:00
|
|
|
dtype = cfg.inference.weight_dtype
|
2023-09-09 21:17:20 +00:00
|
|
|
if device is None:
|
|
|
|
device = cfg.device
|
|
|
|
|
|
|
|
cfg.device = device
|
2023-10-09 20:24:04 +00:00
|
|
|
cfg.mode = "inferencing"
|
|
|
|
cfg.trainer.backend = cfg.inference.backend
|
2023-09-09 21:17:20 +00:00
|
|
|
cfg.trainer.weight_dtype = dtype
|
|
|
|
cfg.inference.weight_dtype = dtype
|
|
|
|
|
|
|
|
self.device = device
|
|
|
|
self.dtype = cfg.inference.dtype
|
|
|
|
self.amp = amp
|
2024-07-16 00:59:48 +00:00
|
|
|
|
2024-08-27 00:33:51 +00:00
|
|
|
self.model_kwargs = {}
|
|
|
|
if attention:
|
|
|
|
self.model_kwargs["attention"] = attention
|
2023-09-09 21:17:20 +00:00
|
|
|
|
2024-07-16 00:59:48 +00:00
|
|
|
def load_model( self ):
|
|
|
|
load_engines.cache_clear()
|
|
|
|
unload_model()
|
|
|
|
|
2024-08-27 00:33:51 +00:00
|
|
|
self.engines = load_engines(training=False, **self.model_kwargs)
|
2024-06-06 14:48:43 +00:00
|
|
|
for name, engine in self.engines.items():
|
|
|
|
if self.dtype != torch.int8:
|
|
|
|
engine.to(self.device, dtype=self.dtype if not self.amp else torch.float32)
|
|
|
|
|
|
|
|
self.engines.eval()
|
2024-07-16 00:59:48 +00:00
|
|
|
self.symmap = get_phone_symmap()
|
2024-08-29 18:27:16 +00:00
|
|
|
_logger.info("Loaded model")
|
2023-08-02 21:53:35 +00:00
|
|
|
|
2024-10-10 18:40:25 +00:00
|
|
|
def enable_lora( self, enabled=True ):
|
|
|
|
for name, engine in self.engines.items():
|
|
|
|
enable_lora( engine.module, mode = enabled )
|
|
|
|
|
|
|
|
def disable_lora( self ):
|
|
|
|
return self.enable_lora( enabled=False )
|
|
|
|
|
2023-08-21 02:36:02 +00:00
|
|
|
def encode_text( self, text, language="en" ):
|
|
|
|
# already a tensor, return it
|
|
|
|
if isinstance( text, Tensor ):
|
|
|
|
return text
|
|
|
|
|
|
|
|
content = g2p.encode(text, language=language)
|
2024-04-30 03:14:01 +00:00
|
|
|
tokens = tokenize( content )
|
2024-04-21 19:49:18 +00:00
|
|
|
|
2024-04-30 03:14:01 +00:00
|
|
|
return torch.tensor( tokens )
|
2023-08-02 21:53:35 +00:00
|
|
|
|
2023-10-13 04:21:01 +00:00
|
|
|
def encode_lang( self, language ):
|
|
|
|
symmap = get_lang_symmap()
|
|
|
|
id = 0
|
|
|
|
if language in symmap:
|
|
|
|
id = symmap[language]
|
|
|
|
return torch.tensor([ id ])
|
|
|
|
|
2024-09-10 21:34:23 +00:00
|
|
|
# to-do: trim before quantizing, instead of after
|
2023-09-09 23:04:44 +00:00
|
|
|
def encode_audio( self, paths, trim_length=0.0 ):
|
2023-08-21 02:36:02 +00:00
|
|
|
# already a tensor, return it
|
|
|
|
if isinstance( paths, Tensor ):
|
|
|
|
return paths
|
|
|
|
|
|
|
|
# split string into paths
|
|
|
|
if isinstance( paths, str ):
|
|
|
|
paths = [ Path(p) for p in paths.split(";") ]
|
|
|
|
|
|
|
|
# merge inputs
|
2024-05-25 16:07:52 +00:00
|
|
|
|
|
|
|
proms = []
|
|
|
|
|
|
|
|
for path in paths:
|
|
|
|
prom = qnt.encode_from_file(path)
|
|
|
|
if hasattr( prom, "codes" ):
|
|
|
|
prom = prom.codes
|
|
|
|
prom = prom[0][:, :].t().to(torch.int16)
|
|
|
|
|
|
|
|
proms.append( prom )
|
|
|
|
|
|
|
|
res = torch.cat(proms)
|
2023-08-25 04:33:36 +00:00
|
|
|
|
2023-09-09 23:04:44 +00:00
|
|
|
if trim_length:
|
2024-05-04 17:05:41 +00:00
|
|
|
res = trim( res, int( cfg.dataset.frames_per_second * trim_length ) )
|
2023-08-21 02:36:02 +00:00
|
|
|
|
2023-08-16 02:58:16 +00:00
|
|
|
return res
|
2023-08-02 21:53:35 +00:00
|
|
|
|
2024-09-10 21:45:59 +00:00
|
|
|
@torch.inference_mode()
|
|
|
|
def text_embedding( self, input, prom=False ):
|
|
|
|
model = None
|
|
|
|
|
|
|
|
for name, engine in self.engines.items():
|
|
|
|
model = engine.module
|
|
|
|
break
|
|
|
|
|
|
|
|
if isinstance( input, str ):
|
|
|
|
input = cfg.tokenizer.encode(input)
|
|
|
|
|
|
|
|
if isinstance( input, list ):
|
|
|
|
input = torch.tensor( input, dtype=torch.uint8, device=self.device )
|
|
|
|
|
|
|
|
return model.text_emb( input )
|
|
|
|
|
2024-09-10 21:34:23 +00:00
|
|
|
@torch.inference_mode()
|
|
|
|
def audio_embedding( self, input, prom=False ):
|
|
|
|
model = None
|
|
|
|
|
|
|
|
for name, engine in self.engines.items():
|
|
|
|
model = engine.module
|
|
|
|
break
|
|
|
|
|
|
|
|
# im really not sure which way is the better way, since the proms_emb and resps_emb have different properties.......
|
|
|
|
if prom:
|
|
|
|
return model.proms_emb(
|
|
|
|
input,
|
|
|
|
quant_level=input.shape[-1] - 1,
|
|
|
|
offset=0,
|
|
|
|
sums=True,
|
|
|
|
)
|
|
|
|
return sum([ model.resps_emb(
|
|
|
|
input[:, :l+1],
|
|
|
|
offset = 0 if l == 0 else 1, # or maybe set to 1
|
|
|
|
quant_level = l,
|
|
|
|
sums = False
|
|
|
|
) for l in range( input.shape[-1] - 1 ) ])
|
|
|
|
|
2023-08-21 02:36:02 +00:00
|
|
|
@torch.inference_mode()
|
2023-09-13 02:28:07 +00:00
|
|
|
def inference(
|
|
|
|
self,
|
|
|
|
text,
|
|
|
|
references,
|
2023-10-13 04:21:01 +00:00
|
|
|
language="en",
|
2024-09-06 04:21:18 +00:00
|
|
|
task="tts",
|
2024-07-30 00:15:07 +00:00
|
|
|
#
|
2024-05-04 17:05:41 +00:00
|
|
|
max_ar_steps=6 * cfg.dataset.frames_per_second,
|
2023-09-13 02:28:07 +00:00
|
|
|
max_nar_levels=7,
|
2024-07-30 00:15:07 +00:00
|
|
|
#
|
2023-09-13 02:28:07 +00:00
|
|
|
input_prompt_length=0.0,
|
2024-10-04 23:57:19 +00:00
|
|
|
input_prompt_prefix=False,
|
2024-07-30 00:15:07 +00:00
|
|
|
#
|
2023-09-13 02:28:07 +00:00
|
|
|
ar_temp=0.95,
|
|
|
|
nar_temp=0.5,
|
2024-07-30 00:15:07 +00:00
|
|
|
#
|
2023-10-10 22:02:33 +00:00
|
|
|
min_ar_temp=0.95,
|
|
|
|
min_nar_temp=0.5,
|
2024-07-30 00:15:07 +00:00
|
|
|
#
|
2023-09-13 02:28:07 +00:00
|
|
|
top_p=1.0,
|
|
|
|
top_k=0,
|
2024-10-12 03:36:06 +00:00
|
|
|
min_p=0.0,
|
2024-07-30 00:15:07 +00:00
|
|
|
#
|
2023-09-13 02:28:07 +00:00
|
|
|
repetition_penalty=1.0,
|
|
|
|
repetition_penalty_decay=0.0,
|
|
|
|
length_penalty=0.0,
|
2024-07-30 00:15:07 +00:00
|
|
|
#
|
2023-09-13 02:28:07 +00:00
|
|
|
beam_width=0,
|
2024-07-30 00:15:07 +00:00
|
|
|
#
|
2023-09-18 23:55:41 +00:00
|
|
|
mirostat_tau=0,
|
|
|
|
mirostat_eta=0.1,
|
2024-07-30 00:15:07 +00:00
|
|
|
#
|
|
|
|
dry_multiplier=0.0,
|
|
|
|
dry_base=1.75,
|
|
|
|
dry_allowed_length=2,
|
2024-10-12 16:27:55 +00:00
|
|
|
#
|
|
|
|
entropix_sampling=False,
|
|
|
|
#
|
2024-06-25 18:41:29 +00:00
|
|
|
seed = None,
|
|
|
|
|
2024-07-20 01:49:40 +00:00
|
|
|
out_path=None,
|
|
|
|
|
|
|
|
tqdm=True,
|
2024-10-11 00:04:12 +00:00
|
|
|
use_lora=None,
|
2023-09-13 02:28:07 +00:00
|
|
|
):
|
2023-12-26 03:20:32 +00:00
|
|
|
lines = text.split("\n")
|
|
|
|
|
|
|
|
wavs = []
|
|
|
|
sr = None
|
|
|
|
|
2024-06-06 14:48:43 +00:00
|
|
|
model_ar = None
|
2024-06-13 00:49:47 +00:00
|
|
|
model_len = None
|
2024-06-06 14:48:43 +00:00
|
|
|
model_nar = None
|
|
|
|
|
|
|
|
for name, engine in self.engines.items():
|
|
|
|
if "ar" in engine.hyper_config.capabilities:
|
|
|
|
model_ar = engine.module
|
2024-06-13 00:49:47 +00:00
|
|
|
if "len" in engine.hyper_config.capabilities:
|
|
|
|
model_len = engine.module
|
2024-06-06 14:48:43 +00:00
|
|
|
if "nar" in engine.hyper_config.capabilities:
|
|
|
|
model_nar = engine.module
|
2024-06-25 18:41:29 +00:00
|
|
|
|
|
|
|
set_seed(seed)
|
2024-06-06 14:48:43 +00:00
|
|
|
|
2024-09-06 04:21:18 +00:00
|
|
|
if task == "stt":
|
|
|
|
resp = self.encode_audio( references )
|
|
|
|
lang = self.encode_lang( language )
|
|
|
|
|
2024-09-06 20:13:04 +00:00
|
|
|
resp = to_device(resp, device=self.device, dtype=torch.int16)
|
2024-09-06 04:21:18 +00:00
|
|
|
lang = to_device(lang, device=self.device, dtype=torch.uint8)
|
|
|
|
|
|
|
|
with torch.autocast("cuda", dtype=self.dtype, enabled=self.amp):
|
|
|
|
if model_ar is not None:
|
|
|
|
text_list = model_ar(
|
|
|
|
text_list=None, proms_list=[resp], lang_list=[lang], resps_list=[resp], max_steps=max_ar_steps,
|
|
|
|
sampling_temperature=ar_temp,
|
|
|
|
sampling_min_temperature=min_ar_temp,
|
2024-10-12 03:36:06 +00:00
|
|
|
sampling_top_p=top_p, sampling_top_k=top_k, sampling_min_p=min_p,
|
2024-09-06 04:21:18 +00:00
|
|
|
sampling_repetition_penalty=repetition_penalty, sampling_repetition_penalty_decay=repetition_penalty_decay,
|
|
|
|
sampling_length_penalty=length_penalty,
|
|
|
|
sampling_beam_width=beam_width,
|
|
|
|
sampling_mirostat_tau=mirostat_tau,
|
|
|
|
sampling_mirostat_eta=mirostat_eta,
|
|
|
|
sampling_dry_multiplier=dry_multiplier,
|
|
|
|
sampling_dry_base=dry_base,
|
|
|
|
sampling_dry_allowed_length=dry_allowed_length,
|
2024-10-12 16:27:55 +00:00
|
|
|
sampling_entropix=entropix_sampling,
|
2024-09-06 04:21:18 +00:00
|
|
|
|
|
|
|
disable_tqdm=not tqdm,
|
2024-10-11 00:04:12 +00:00
|
|
|
use_lora=use_lora,
|
2024-09-06 04:21:18 +00:00
|
|
|
)
|
|
|
|
else:
|
|
|
|
raise Exception("!")
|
|
|
|
|
2024-09-06 23:44:25 +00:00
|
|
|
text_list = [ cfg.tokenizer.decode( text ).replace(" ", "_").replace(" ", "").replace("_", " ") for text in text_list ]
|
2024-09-06 04:21:18 +00:00
|
|
|
|
|
|
|
return text_list[0]
|
|
|
|
|
|
|
|
|
2023-12-26 03:20:32 +00:00
|
|
|
for line in lines:
|
|
|
|
if out_path is None:
|
2024-06-25 18:41:29 +00:00
|
|
|
output_dir = Path("./data/results/")
|
|
|
|
if not output_dir.exists():
|
|
|
|
output_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
out_path = output_dir / f"{time.time()}.wav"
|
2023-12-26 03:20:32 +00:00
|
|
|
|
2024-07-23 00:36:07 +00:00
|
|
|
prom = self.encode_audio( references, trim_length=input_prompt_length ) if references else None
|
2023-12-26 03:20:32 +00:00
|
|
|
phns = self.encode_text( line, language=language )
|
|
|
|
lang = self.encode_lang( language )
|
|
|
|
|
2024-07-23 00:36:07 +00:00
|
|
|
prom = to_device(prom, device=self.device, dtype=torch.int16)
|
|
|
|
phns = to_device(phns, device=self.device, dtype=torch.uint8 if len(self.symmap) < 256 else torch.int16)
|
|
|
|
lang = to_device(lang, device=self.device, dtype=torch.uint8)
|
2023-12-26 03:20:32 +00:00
|
|
|
|
2024-07-31 03:15:56 +00:00
|
|
|
# to-do: add in case for experimental.hf model
|
2023-12-26 03:20:32 +00:00
|
|
|
with torch.autocast("cuda", dtype=self.dtype, enabled=self.amp):
|
2024-06-13 00:49:47 +00:00
|
|
|
if model_ar is not None:
|
|
|
|
resps_list = model_ar(
|
2024-06-30 15:37:33 +00:00
|
|
|
text_list=[phns], proms_list=[prom], lang_list=[lang], max_steps=max_ar_steps,
|
2024-10-04 23:57:19 +00:00
|
|
|
input_prompt_prefix=input_prompt_prefix,
|
2024-06-13 00:49:47 +00:00
|
|
|
sampling_temperature=ar_temp,
|
|
|
|
sampling_min_temperature=min_ar_temp,
|
2024-10-12 03:36:06 +00:00
|
|
|
sampling_top_p=top_p, sampling_top_k=top_k, sampling_min_p=min_p,
|
2024-06-13 00:49:47 +00:00
|
|
|
sampling_repetition_penalty=repetition_penalty, sampling_repetition_penalty_decay=repetition_penalty_decay,
|
|
|
|
sampling_length_penalty=length_penalty,
|
|
|
|
sampling_beam_width=beam_width,
|
|
|
|
sampling_mirostat_tau=mirostat_tau,
|
|
|
|
sampling_mirostat_eta=mirostat_eta,
|
2024-07-30 00:15:07 +00:00
|
|
|
sampling_dry_multiplier=dry_multiplier,
|
|
|
|
sampling_dry_base=dry_base,
|
|
|
|
sampling_dry_allowed_length=dry_allowed_length,
|
2024-10-12 16:27:55 +00:00
|
|
|
sampling_entropix=entropix_sampling,
|
2024-07-20 01:49:40 +00:00
|
|
|
|
|
|
|
disable_tqdm=not tqdm,
|
2024-10-11 00:04:12 +00:00
|
|
|
use_lora=use_lora,
|
2024-06-13 00:49:47 +00:00
|
|
|
)
|
|
|
|
resps_list = model_nar(
|
|
|
|
text_list=[phns], proms_list=[prom], lang_list=[lang], resps_list=resps_list,
|
2024-10-04 23:57:19 +00:00
|
|
|
input_prompt_prefix=input_prompt_prefix,
|
2024-06-13 00:49:47 +00:00
|
|
|
max_levels=max_nar_levels,
|
|
|
|
sampling_temperature=nar_temp,
|
|
|
|
sampling_min_temperature=min_nar_temp,
|
2024-10-12 03:36:06 +00:00
|
|
|
sampling_top_p=top_p, sampling_top_k=top_k, sampling_min_p=min_p,
|
2024-06-13 00:49:47 +00:00
|
|
|
sampling_repetition_penalty=repetition_penalty, sampling_repetition_penalty_decay=repetition_penalty_decay,
|
2024-07-20 01:49:40 +00:00
|
|
|
|
|
|
|
disable_tqdm=not tqdm,
|
2024-10-11 00:04:12 +00:00
|
|
|
use_lora=use_lora,
|
2024-06-13 00:49:47 +00:00
|
|
|
)
|
|
|
|
elif model_len is not None:
|
2024-07-20 01:49:40 +00:00
|
|
|
len_list = model_len( text_list=[phns], proms_list=[prom], max_steps=10, disable_tqdm=not tqdm ) # don't need more than that
|
2024-06-13 00:49:47 +00:00
|
|
|
resps_list = model_nar( text_list=[phns], proms_list=[prom], len_list=len_list,
|
|
|
|
max_levels=max_nar_levels,
|
|
|
|
sampling_temperature=nar_temp,
|
|
|
|
sampling_min_temperature=min_nar_temp,
|
2024-10-12 03:36:06 +00:00
|
|
|
sampling_top_p=top_p, sampling_top_k=top_k, sampling_min_p=min_p,
|
2024-06-13 00:49:47 +00:00
|
|
|
sampling_repetition_penalty=repetition_penalty, sampling_repetition_penalty_decay=repetition_penalty_decay,
|
2024-07-20 01:49:40 +00:00
|
|
|
|
|
|
|
disable_tqdm=not tqdm,
|
2024-10-11 00:04:12 +00:00
|
|
|
use_lora=use_lora,
|
2024-06-13 00:49:47 +00:00
|
|
|
)
|
|
|
|
else:
|
|
|
|
raise Exception("!")
|
2023-12-26 03:20:32 +00:00
|
|
|
|
|
|
|
wav, sr = qnt.decode_to_file(resps_list[0], out_path, device=self.device)
|
|
|
|
wavs.append(wav)
|
2023-08-02 21:53:35 +00:00
|
|
|
|
2023-12-26 03:20:32 +00:00
|
|
|
return (torch.concat(wavs, dim=-1), sr)
|
2023-08-02 21:53:35 +00:00
|
|
|
|