Don't log if you aren't 0 rank
This commit is contained in:
parent
922b1d76df
commit
6c9718ad64
|
@ -300,7 +300,8 @@ class ExtensibleTrainer(BaseModel):
|
|||
for name, net in netdict.items():
|
||||
load_path = self.opt['path']['pretrain_model_%s' % (name,)]
|
||||
if load_path is not None:
|
||||
logger.info('Loading model for [%s]' % (load_path))
|
||||
if self.rank <= 0:
|
||||
logger.info('Loading model for [%s]' % (load_path,))
|
||||
self.load_network(load_path, net, self.opt['path']['strict_load'])
|
||||
|
||||
def save(self, iter_step):
|
||||
|
|
|
@ -294,7 +294,7 @@ def main():
|
|||
# log
|
||||
logger.info('# Validation # PSNR: {:.4e} Fea: {:.4e}'.format(avg_psnr, avg_fea_loss))
|
||||
# tensorboard logger
|
||||
if opt['use_tb_logger'] and 'debug' not in opt['name']:
|
||||
if opt['use_tb_logger'] and 'debug' not in opt['name'] and rank <= 0:
|
||||
#tb_logger.add_scalar('val_psnr', avg_psnr, current_step)
|
||||
tb_logger.add_scalar('val_fea', avg_fea_loss, current_step)
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user