DL-Art-School/codes/models/audio/tts/autoregressive_codegen.py

285 lines
12 KiB
Python
Raw Normal View History

2022-04-02 21:07:39 +00:00
import torch
import torch.nn as nn
import torch.nn.functional as F
2022-04-03 03:25:10 +00:00
from transformers import GPT2PreTrainedModel, GPT2Config
from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions
2022-04-02 21:07:39 +00:00
from models.arch_util import AttentionBlock
from models.lucidrains.x_transformers import TransformerWrapper, Decoder, Encoder
2022-04-02 21:07:39 +00:00
from trainer.networks import register_model
2022-04-03 03:25:10 +00:00
class InferenceModel(GPT2PreTrainedModel):
"""
Implementation of GPT2PreTrainedModel from transformers, which allows us to use their generation library with
this transformer.
"""
def __init__(self, model):
super().__init__(GPT2Config())
self.transformer = model
self.context = None
def parallelize(self, device_map=None):
# Not implemented.
pass
def deparallelize(self):
# Not implemented.
pass
def get_output_embeddings(self):
assert False, "Unsupported operation."
def set_output_embeddings(self, new_embeddings):
assert False, "Unsupported operation."
def store_context(self, context):
self.context = context
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
token_type_ids = kwargs.get("token_type_ids", None)
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past:
position_ids = position_ids[:, -1].unsqueeze(-1)
else:
position_ids = None
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
assert self.context is not None
assert inputs_embeds is None # Not supported by this inference model.
assert labels is None # Training not supported by this inference model.
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
out = self.transformer.decoder(input_ids, full_context=self.context, return_embeddings=True, past_key_values=past_key_values, use_cache=use_cache)
if use_cache:
hidden_states, present_key_values = out
else:
hidden_states = out
present_key_values = None
logits = self.transformer.decoder.to_logits(hidden_states)
2022-04-03 03:25:10 +00:00
if not return_dict:
return (logits, )
return CausalLMOutputWithCrossAttentions(
loss=None,
logits=logits,
past_key_values=present_key_values,
2022-04-03 03:25:10 +00:00
hidden_states=hidden_states,
attentions=None,
cross_attentions=None,
)
@staticmethod
def _reorder_cache(past, beam_idx):
"""
This function is used to re-order the :obj:`past_key_values` cache if
:meth:`~transformers.PreTrainedModel.beam_search` or :meth:`~transformers.PreTrainedModel.beam_sample` is
called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
"""
return tuple(
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
for layer_past in past
)
2022-04-02 21:07:39 +00:00
class ResBlock(nn.Module):
"""
Basic residual convolutional block that uses GroupNorm.
"""
def __init__(self, chan):
super().__init__()
self.net = nn.Sequential(
nn.Conv1d(chan, chan, kernel_size=3, padding=1),
nn.GroupNorm(chan//8, chan),
nn.ReLU(),
nn.Conv1d(chan, chan, kernel_size=3, padding=1),
nn.GroupNorm(chan//8, chan)
)
def forward(self, x):
return F.relu(self.net(x) + x)
class ConditioningEncoder(nn.Module):
def __init__(self,
spec_dim,
embedding_dim,
attn_blocks=6,
num_attn_heads=4,
do_checkpointing=False):
super().__init__()
attn = []
self.init = nn.Sequential(nn.Conv1d(spec_dim, embedding_dim//4, kernel_size=5, padding=2),
nn.Conv1d(embedding_dim//4, embedding_dim//2, kernel_size=3, padding=1, stride=2),
ResBlock(embedding_dim//2),
nn.Conv1d(embedding_dim//2, embedding_dim, kernel_size=3, padding=1, stride=2))
for a in range(attn_blocks):
attn.append(AttentionBlock(embedding_dim, num_attn_heads, do_checkpoint=do_checkpointing))
self.attn = nn.Sequential(*attn)
self.dim = embedding_dim
def forward(self, x):
h = self.init(x)
h = self.attn(h)
return h.mean(dim=2)
class AutoregressiveCodegen(nn.Module):
def __init__(self, model_dim, depth, num_text_tokens=256, num_mel_tokens=8194, dropout=.1):
2022-04-02 21:07:39 +00:00
super().__init__()
2022-04-07 03:04:23 +00:00
assert depth >= 8 # This is the minimum bound to support the context interleaving that happens later.
2022-04-02 21:07:39 +00:00
self.START_TOKEN=8192
self.STOP_TOKEN=8193
self.max_text_token_id = num_text_tokens
self.max_mel_token_id = num_mel_tokens
2022-04-02 21:07:39 +00:00
self.mel_embedding = ConditioningEncoder(80, model_dim, do_checkpointing=False)
self.encoder = TransformerWrapper(
2022-04-02 21:07:39 +00:00
num_tokens=num_text_tokens,
use_pos_emb=False,
2022-04-03 03:57:00 +00:00
max_seq_len=-1,
2022-04-02 21:07:39 +00:00
attn_layers = Encoder(
2022-04-07 03:04:23 +00:00
depth=depth,
2022-04-02 21:07:39 +00:00
heads=model_dim//64,
dim=model_dim,
attn_dropout=dropout,
ff_dropout=dropout,
use_rmsnorm=True,
ff_glu=True,
ff_mult=1,
rotary_pos_emb=True,
attn_rel_pos_bias=True,
2022-04-02 21:07:39 +00:00
))
2022-04-07 03:04:23 +00:00
self.encoder.norm = nn.Identity() # This layer and the next are unused.
self.encoder.to_logits = nn.Identity()
self.decoder = TransformerWrapper(
2022-04-02 21:07:39 +00:00
num_tokens=num_mel_tokens,
use_pos_emb=False,
2022-04-03 03:57:00 +00:00
max_seq_len=-1,
2022-04-02 21:07:39 +00:00
attn_layers=Decoder(
depth=depth,
heads=model_dim//64,
dim=model_dim,
attn_dropout=dropout,
ff_dropout=dropout,
use_rmsnorm=True,
ff_glu=True,
ff_mult=1,
rotary_pos_emb=True,
cross_attend=True,
attn_rel_pos_bias=True,
2022-04-02 21:07:39 +00:00
))
def get_grad_norm_parameter_groups(self):
return {
'encoder': list(self.encoder.parameters()),
'decoder': list(self.decoder.parameters()),
'minicoder': list(self.mel_embedding.parameters()),
}
def forward(self, text_codes, conditioning_signal, mel_codes, wav_lengths, return_loss=True):
assert text_codes.max() < self.max_text_token_id and text_codes.min() >= 0, f'Invalid text code encountered: {text_codes.max()}, {text_codes.min()}'
assert mel_codes.max() < self.max_mel_token_id and mel_codes.min() >= 0, f'Invalid mel code encountered: {mel_codes.max()}, {mel_codes.min()}'
2022-04-02 21:07:39 +00:00
# Format mel_codes with a stop token on the end.
mel_lengths = wav_lengths // 1024 + 1
for b in range(mel_codes.shape[0]):
mel_codes[b, mel_lengths[b]:] = self.STOP_TOKEN
mel_codes = F.pad(mel_codes, (0, 1), value=self.STOP_TOKEN)
2022-04-03 03:25:10 +00:00
# Build the context
2022-04-02 21:07:39 +00:00
if len(conditioning_signal.shape) != 4:
conditioning_signal = conditioning_signal.unsqueeze(1)
cond_embs = []
for i in range(conditioning_signal.shape[1]):
cond_embs.append(self.mel_embedding(conditioning_signal[:, i]))
cond_emb = torch.stack(cond_embs, dim=1).mean(dim=1, keepdim=True)
2022-04-07 03:04:23 +00:00
_, enc_text = self.encoder(text_codes, return_hiddens=True)
# Interleave cond_emb into the first few contexts.
full_context = enc_text
full_context[1] = cond_emb
full_context[3] = cond_emb
full_context[6] = cond_emb
2022-04-03 03:25:10 +00:00
# Execute the decoder
2022-04-02 21:07:39 +00:00
dec_inputs = F.pad(mel_codes, (1,0), value=self.START_TOKEN)[:, :-1]
2022-04-07 03:04:23 +00:00
dec = self.decoder(dec_inputs, full_context=full_context)
2022-04-02 21:07:39 +00:00
if not return_loss:
return dec
loss_mel = F.cross_entropy(dec.permute(0,2,1), mel_codes)
return loss_mel
def generate(self, conditioning_signal, text_codes, max_tokens=256, **hf_generate_kwargs):
inference_model = InferenceModel(self)
# Build the context
2022-04-03 03:25:10 +00:00
if len(conditioning_signal.shape) != 4:
conditioning_signal = conditioning_signal.unsqueeze(1)
cond_embs = []
for i in range(conditioning_signal.shape[1]):
cond_embs.append(self.mel_embedding(conditioning_signal[:, i]))
cond_emb = torch.stack(cond_embs, dim=1).mean(dim=1, keepdim=True)
_, enc_text = self.encoder(text_codes, return_hiddens=True)
# Interleave cond_emb into the first few contexts.
full_context = enc_text
full_context[1] = cond_emb
full_context[3] = cond_emb
full_context[6] = cond_emb
inference_model.store_context(full_context)
2022-04-03 03:25:10 +00:00
gen = inference_model.generate(bos_token_id=self.START_TOKEN, pad_token_id=self.STOP_TOKEN, eos_token_id=self.STOP_TOKEN,
max_length=max_tokens, output_attentions=False, return_dict_in_generate=True, use_cache=True,
2022-04-03 03:25:10 +00:00
**hf_generate_kwargs)
return gen.sequences
2022-04-03 03:25:10 +00:00
2022-04-02 21:07:39 +00:00
@register_model
def register_autoregressive_codegen(opt_net, opt):
return AutoregressiveCodegen(**opt_net['kwargs'])
if __name__ == '__main__':
2022-04-07 03:04:23 +00:00
codegen = AutoregressiveCodegen(256, 10)
2022-04-02 21:07:39 +00:00
torch.save(codegen.state_dict(), 'sample.pth')
2022-04-07 03:04:23 +00:00
#codegen.generate(torch.randn((1,80,120)), torch.randint(0,256,(1,200)))
2022-04-02 21:07:39 +00:00
codegen(torch.randint(0,256, (2,200)),
torch.randn(2,80,120),
torch.randint(0,8192, (2,350)),
2022-04-07 03:04:23 +00:00
torch.tensor([192,350]))