From dc0390ade15cb85df56fd5f5626aefee914e5568 Mon Sep 17 00:00:00 2001 From: James Betker Date: Mon, 2 May 2022 21:43:14 -0600 Subject: [PATCH] Remove entmax dep --- requirements.txt | 1 - tortoise/models/xtransformers.py | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index 85d0f26..d0d398e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,6 +6,5 @@ inflect progressbar einops unidecode -entmax scipy librosa \ No newline at end of file diff --git a/tortoise/models/xtransformers.py b/tortoise/models/xtransformers.py index 70e8e63..df9ee25 100644 --- a/tortoise/models/xtransformers.py +++ b/tortoise/models/xtransformers.py @@ -10,7 +10,6 @@ from collections import namedtuple from einops import rearrange, repeat, reduce from einops.layers.torch import Rearrange -from entmax import entmax15 from torch.utils.checkpoint import checkpoint DEFAULT_DIM_HEAD = 64 @@ -556,7 +555,7 @@ class Attention(nn.Module): self.sparse_topk = sparse_topk # entmax - self.attn_fn = entmax15 if use_entmax15 else F.softmax + self.attn_fn = F.softmax # add memory key / values self.num_mem_kv = num_mem_kv