ExtensibleTrainer - don't compute backward when there is no loss

This commit is contained in:
James Betker 2020-10-02 20:54:06 -06:00
parent 146a9125f2
commit 567b4d50a4

View File

@ -159,6 +159,8 @@ class ConfigurableStep(Module):
# Scale the loss down by the accumulation factor.
total_loss = total_loss / self.env['mega_batch_factor']
# In some cases, the loss could not be set (e.g. all losses have 'after'
if isinstance(loss, torch.Tensor):
# Get dem grads!
if self.env['amp']:
with amp.scale_loss(total_loss, self.optimizers, amp_loss_id) as scaled_loss: