From c69aba2a730252a3cee1fe54b109ef32f6e959d0 Mon Sep 17 00:00:00 2001 From: Kashif Rasul Date: Tue, 29 Nov 2022 00:11:38 +0100 Subject: [PATCH] fix call to activation_fn --- examples/fairseq/models/bert.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/fairseq/models/bert.py b/examples/fairseq/models/bert.py index 7bbe382..d3ffa3f 100644 --- a/examples/fairseq/models/bert.py +++ b/examples/fairseq/models/bert.py @@ -391,7 +391,7 @@ class ClassificationHead(nn.Module): x = features[:, 0, :] # take token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) - x = self.activation_fn(x) + x = self.activation_fn(x.float()).as_type(x) x = self.dropout(x) x = self.out_proj(x) return x @@ -418,7 +418,7 @@ class LMHead(nn.Module): features = features[masked_tokens, :] x = self.dense(features) - x = self.activation_fn(x) + x = self.activation_fn(x.float()).as_type(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = F.linear(x, self.weight) + self.bias