DL-Art-School/codes/models/gpt_voice/gpt_tts_hf.py

155 lines
7.4 KiB
Python
Raw Normal View History

2021-12-03 04:48:42 +00:00
from time import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import GPT2Model, GPT2Config, GPT2LMHeadModel, GPT2PreTrainedModel
from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions
from transformers.utils.model_parallel_utils import get_device_map, assert_device_map
2021-12-13 02:51:44 +00:00
from models.gpt_voice.gpt_asr_hf import GPT2InferenceModel
2021-12-03 04:48:42 +00:00
from models.gpt_voice.mini_encoder import AudioMiniEncoder
from models.tacotron2.text import symbols
from trainer.networks import register_model
from utils.util import opt_get
class GptTtsHf(nn.Module):
NUMBER_TEXT_TOKENS = len(symbols)+1
START_TEXT_TOKEN = len(symbols)
STOP_TEXT_TOKEN = 0
NUMBER_MEL_CODES = 8194
START_MEL_TOKEN = 8192
STOP_MEL_TOKEN = 8193
def __init__(self, layers=8, model_dim=512, heads=8, max_symbols_per_phrase=200, max_mel_tokens=250, max_conditioning_inputs=3,
checkpointing=True, mel_length_compression=256):
2021-12-03 04:48:42 +00:00
super().__init__()
self.max_mel_tokens = max_mel_tokens
self.max_symbols_per_phrase = max_symbols_per_phrase
self.model_dim = model_dim
self.max_mel_tokens = max_mel_tokens
self.max_conditioning_inputs = max_conditioning_inputs
self.mel_length_compression = mel_length_compression
2021-12-03 04:48:42 +00:00
self.conditioning_encoder = AudioMiniEncoder(80, model_dim)
2021-12-17 06:28:44 +00:00
self.text_embedding = nn.Embedding(self.NUMBER_TEXT_TOKENS, model_dim)
2021-12-17 17:01:42 +00:00
self.text_pos_embedding = nn.Embedding(self.max_symbols_per_phrase + 1, model_dim)
2021-12-17 06:28:44 +00:00
self.conditioning_embedding = nn.Parameter(torch.randn(1,model_dim), requires_grad=True)
2021-12-17 17:01:42 +00:00
self.mel_pos_embedding = nn.Embedding(self.max_mel_tokens + 1, model_dim)
seq_length = 2+self.max_symbols_per_phrase+self.max_conditioning_inputs+self.max_mel_tokens
2021-12-03 04:48:42 +00:00
self.gpt_config = GPT2Config(vocab_size=self.NUMBER_MEL_CODES,
n_positions=seq_length,
n_ctx=seq_length,
n_embd=model_dim,
n_layer=layers,
n_head=heads,
gradient_checkpointing=checkpointing,
use_cache=not checkpointing)
self.gpt = GPT2Model(self.gpt_config)
self.final_norm = nn.LayerNorm(model_dim)
self.text_head = nn.Linear(model_dim, self.NUMBER_TEXT_TOKENS)
self.mel_head = nn.Linear(model_dim, self.NUMBER_MEL_CODES)
def get_logits(self, text_inputs, cond_inputs, mel_targets, get_attns=False):
assert text_inputs.shape[1] <= self.max_symbols_per_phrase
assert cond_inputs.shape[1] <= self.max_conditioning_inputs
assert mel_targets.shape[1] <= self.max_mel_tokens
text_targets = F.pad(text_inputs, (1,0), value=self.START_TEXT_TOKEN)
2021-12-17 06:28:44 +00:00
text_emb = self.text_embedding(text_targets)
2021-12-03 04:48:42 +00:00
text_emb = text_emb + self.text_pos_embedding(torch.arange(text_emb.shape[1], device=text_targets.device))
conds = []
for k in range(cond_inputs.shape[1]):
conds.append(self.conditioning_encoder(cond_inputs[:, k]))
while len(conds) < self.max_conditioning_inputs:
conds.append(conds[-1])
conds = torch.stack(conds, dim=1)
2021-12-17 06:28:44 +00:00
conds = conds + self.conditioning_embedding
mel_targets = F.pad(mel_targets, (1,0), value=self.START_MEL_TOKEN)
mel_emb = self.gpt.get_input_embeddings()(mel_targets)
mel_emb = mel_emb + self.mel_pos_embedding(torch.arange(mel_emb.shape[1], device=mel_targets.device))
2021-12-03 04:48:42 +00:00
2021-12-03 15:53:09 +00:00
emb = torch.cat([text_emb, conds, mel_emb], dim=1)
2021-12-03 04:48:42 +00:00
gpt_out = self.gpt(inputs_embeds=emb, return_dict=True, output_attentions=get_attns)
if get_attns:
return gpt_out.attentions
enc = gpt_out.last_hidden_state
2021-12-17 17:01:42 +00:00
text_logits = self.final_norm(enc[:, :self.max_symbols_per_phrase+1])
2021-12-03 04:48:42 +00:00
text_logits = self.text_head(text_logits)
text_logits = text_logits.permute(0,2,1)
2021-12-17 17:01:42 +00:00
mel_logits = self.final_norm(enc[:, -(self.max_mel_tokens+1):])
2021-12-03 04:48:42 +00:00
mel_logits = self.mel_head(mel_logits)
mel_logits = mel_logits.permute(0,2,1)
return text_logits, mel_logits
def forward(self, text_inputs, cond_inputs, mel_targets, wav_lengths, return_attentions=False):
2021-12-03 04:48:42 +00:00
"""
Forward pass
text_inputs: long tensor, (b,t)
cond_inputs: MEL float tensor, (b,c,80,s)
mel_targets: long tensor, (b,m)
mel_lengths: long tensor, (b,)
2021-12-03 04:48:42 +00:00
"""
# Set padding areas within MEL (currently it is coded with the MEL code for <zero>)
mel_lengths = wav_lengths // self.mel_length_compression
for b in range(len(mel_lengths)):
if mel_lengths[b] < mel_targets.shape[-1]:
mel_targets[b, mel_lengths[b]:] = self.STOP_MEL_TOKEN
2021-12-03 04:48:42 +00:00
text_logits, mel_logits = self.get_logits(text_inputs, cond_inputs, mel_targets, get_attns=return_attentions)
if return_attentions:
return mel_logits
2021-12-17 17:01:42 +00:00
text_targets = F.pad(text_inputs, (0,self.max_symbols_per_phrase-text_inputs.shape[1]+1), value=self.STOP_TEXT_TOKEN)
2021-12-03 04:48:42 +00:00
loss_text = F.cross_entropy(text_logits, text_targets.long())
2021-12-17 17:01:42 +00:00
mel_targets = F.pad(mel_targets, (0,self.max_mel_tokens-mel_targets.shape[1]+1), value=self.STOP_MEL_TOKEN)
2021-12-03 04:48:42 +00:00
loss_mel = F.cross_entropy(mel_logits, mel_targets.long())
return loss_text.mean(), loss_mel.mean(), mel_logits
2021-12-13 02:51:44 +00:00
def inference(self, text_inputs, cond_inputs, do_sample=False, temperature=1.0, num_beams=8):
if not hasattr(self, 'inference_model'):
self.inference_model = GPT2InferenceModel(self.gpt_config, self.gpt, self.text_pos_embedding, self.final_norm, self.text_head)
text_targets = F.pad(text_inputs, (1,0), value=self.START_TEXT_TOKEN)
2021-12-17 06:28:44 +00:00
text_targets = F.pad(text_targets, (0,1), value=self.STOP_TEXT_TOKEN)
text_emb = self.text_embedding(text_targets)
2021-12-13 02:51:44 +00:00
text_emb = text_emb + self.text_pos_embedding(torch.arange(text_emb.shape[1], device=text_targets.device))
conds = []
for k in range(cond_inputs.shape[1]):
conds.append(self.conditioning_encoder(cond_inputs[:, k]))
while len(conds) < self.max_conditioning_inputs:
conds.append(conds[-1])
conds = torch.stack(conds, dim=1)
conds = conds + self.conditioning_embedding(torch.arange(conds.shape[1], device=conds.device))
emb = torch.cat([text_emb, conds], dim=1)
self.inference_model.store_mel_emb(emb)
2021-12-17 06:28:44 +00:00
fake_inputs = torch.full((text_inputs.shape[0],emb.shape[1]+1,), fill_value=1, dtype=torch.long, device=text_inputs.device)
2021-12-13 02:51:44 +00:00
fake_inputs[:,-1] = self.START_MEL_TOKEN
gen = self.inference_model.generate(fake_inputs, do_sample=do_sample, bos_token_id=self.START_MEL_TOKEN, pad_token_id=self.STOP_MEL_TOKEN, eos_token_id=self.STOP_MEL_TOKEN,
2021-12-17 06:28:44 +00:00
max_length=emb.shape[1]+self.max_mel_tokens, temperature=temperature, num_beams=num_beams, use_cache=True)
2021-12-13 02:51:44 +00:00
return gen[:, self.max_mel_frames:]
2021-12-03 04:48:42 +00:00
@register_model
def register_gpt_tts_hf(opt_net, opt):
return GptTtsHf(**opt_get(opt_net, ['kwargs'], {}))
if __name__ == '__main__':
2021-12-17 03:47:37 +00:00
gpt = GptTtsHf(model_dim=1024, heads=16)
2021-12-17 06:28:44 +00:00
l = gpt(torch.randint(high=len(symbols), size=(2,200)),
2021-12-03 04:48:42 +00:00
torch.randn(2,2,80,800),
2021-12-17 06:28:44 +00:00
torch.randint(high=8192, size=(2,250)),
torch.tensor([150*256,195*256]))