From 29b2f36f556849162a95a16bd0ce257a3e4d0625 Mon Sep 17 00:00:00 2001 From: James Betker Date: Mon, 2 May 2022 21:43:14 -0600 Subject: [PATCH] Remove entmax dep --- requirements.txt | 1 - setup.py | 1 - tortoise/models/xtransformers.py | 3 +-- 3 files changed, 1 insertion(+), 4 deletions(-) diff --git a/requirements.txt b/requirements.txt index 85d0f26..d0d398e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,6 +6,5 @@ inflect progressbar einops unidecode -entmax scipy librosa \ No newline at end of file diff --git a/setup.py b/setup.py index f4e9e37..019e48d 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,6 @@ setuptools.setup( 'progressbar', 'einops', 'unidecode', - 'entmax', 'scipy', 'librosa', 'transformers', diff --git a/tortoise/models/xtransformers.py b/tortoise/models/xtransformers.py index 70e8e63..df9ee25 100644 --- a/tortoise/models/xtransformers.py +++ b/tortoise/models/xtransformers.py @@ -10,7 +10,6 @@ from collections import namedtuple from einops import rearrange, repeat, reduce from einops.layers.torch import Rearrange -from entmax import entmax15 from torch.utils.checkpoint import checkpoint DEFAULT_DIM_HEAD = 64 @@ -556,7 +555,7 @@ class Attention(nn.Module): self.sparse_topk = sparse_topk # entmax - self.attn_fn = entmax15 if use_entmax15 else F.softmax + self.attn_fn = F.softmax # add memory key / values self.num_mem_kv = num_mem_kv