forked from mrq/tortoise-tts
Remove entmax dep
This commit is contained in:
parent
12acac6f77
commit
dc0390ade1
|
@ -6,6 +6,5 @@ inflect
|
|||
progressbar
|
||||
einops
|
||||
unidecode
|
||||
entmax
|
||||
scipy
|
||||
librosa
|
|
@ -10,7 +10,6 @@ from collections import namedtuple
|
|||
from einops import rearrange, repeat, reduce
|
||||
from einops.layers.torch import Rearrange
|
||||
|
||||
from entmax import entmax15
|
||||
from torch.utils.checkpoint import checkpoint
|
||||
|
||||
DEFAULT_DIM_HEAD = 64
|
||||
|
@ -556,7 +555,7 @@ class Attention(nn.Module):
|
|||
self.sparse_topk = sparse_topk
|
||||
|
||||
# entmax
|
||||
self.attn_fn = entmax15 if use_entmax15 else F.softmax
|
||||
self.attn_fn = F.softmax
|
||||
|
||||
# add memory key / values
|
||||
self.num_mem_kv = num_mem_kv
|
||||
|
|
Loading…
Reference in New Issue
Block a user