diff --git a/requirements.txt b/requirements.txt index 85d0f26..d0d398e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,6 +6,5 @@ inflect progressbar einops unidecode -entmax scipy librosa \ No newline at end of file diff --git a/tortoise/models/xtransformers.py b/tortoise/models/xtransformers.py index 70e8e63..df9ee25 100644 --- a/tortoise/models/xtransformers.py +++ b/tortoise/models/xtransformers.py @@ -10,7 +10,6 @@ from collections import namedtuple from einops import rearrange, repeat, reduce from einops.layers.torch import Rearrange -from entmax import entmax15 from torch.utils.checkpoint import checkpoint DEFAULT_DIM_HEAD = 64 @@ -556,7 +555,7 @@ class Attention(nn.Module): self.sparse_topk = sparse_topk # entmax - self.attn_fn = entmax15 if use_entmax15 else F.softmax + self.attn_fn = F.softmax # add memory key / values self.num_mem_kv = num_mem_kv