forked from mrq/DL-Art-School
ExtensibleTrainer - don't compute backward when there is no loss
This commit is contained in:
parent
146a9125f2
commit
567b4d50a4
|
@ -159,6 +159,8 @@ class ConfigurableStep(Module):
|
||||||
# Scale the loss down by the accumulation factor.
|
# Scale the loss down by the accumulation factor.
|
||||||
total_loss = total_loss / self.env['mega_batch_factor']
|
total_loss = total_loss / self.env['mega_batch_factor']
|
||||||
|
|
||||||
|
# In some cases, the loss could not be set (e.g. all losses have 'after'
|
||||||
|
if isinstance(loss, torch.Tensor):
|
||||||
# Get dem grads!
|
# Get dem grads!
|
||||||
if self.env['amp']:
|
if self.env['amp']:
|
||||||
with amp.scale_loss(total_loss, self.optimizers, amp_loss_id) as scaled_loss:
|
with amp.scale_loss(total_loss, self.optimizers, amp_loss_id) as scaled_loss:
|
||||||
|
|
Loading…
Reference in New Issue
Block a user