2019-08-23 13:42:47 +00:00
|
|
|
import os
|
|
|
|
from collections import OrderedDict
|
|
|
|
import torch
|
|
|
|
import torch.nn as nn
|
2020-10-17 02:47:35 +00:00
|
|
|
from torch.nn.parallel.distributed import DistributedDataParallel
|
|
|
|
|
2020-06-01 21:09:52 +00:00
|
|
|
import utils.util
|
2019-08-23 13:42:47 +00:00
|
|
|
|
|
|
|
|
|
|
|
class BaseModel():
|
|
|
|
def __init__(self, opt):
|
|
|
|
self.opt = opt
|
2020-10-03 17:09:09 +00:00
|
|
|
if opt['dist']:
|
|
|
|
self.rank = torch.distributed.get_rank()
|
|
|
|
else:
|
|
|
|
self.rank = -1 # non dist training
|
2019-08-23 13:42:47 +00:00
|
|
|
self.device = torch.device('cuda' if opt['gpu_ids'] is not None else 'cpu')
|
2020-04-21 22:28:06 +00:00
|
|
|
self.amp_level = 'O0' if opt['amp_opt_level'] is None else opt['amp_opt_level']
|
2019-08-23 13:42:47 +00:00
|
|
|
self.is_train = opt['is_train']
|
|
|
|
self.schedulers = []
|
|
|
|
self.optimizers = []
|
2020-08-04 22:42:58 +00:00
|
|
|
self.disc_optimizers = []
|
2021-11-01 22:14:59 +00:00
|
|
|
self.save_history = {}
|
2019-08-23 13:42:47 +00:00
|
|
|
|
|
|
|
def feed_data(self, data):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def optimize_parameters(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def get_current_visuals(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def get_current_losses(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def print_network(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def save(self, label):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def load(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def _set_lr(self, lr_groups_l):
|
|
|
|
"""Set learning rate for warmup
|
|
|
|
lr_groups_l: list for lr_groups. each for a optimizer"""
|
|
|
|
for optimizer, lr_groups in zip(self.optimizers, lr_groups_l):
|
|
|
|
for param_group, lr in zip(optimizer.param_groups, lr_groups):
|
|
|
|
param_group['lr'] = lr
|
|
|
|
|
|
|
|
def _get_init_lr(self):
|
|
|
|
"""Get the initial lr, which is set by the scheduler"""
|
|
|
|
init_lr_groups_l = []
|
|
|
|
for optimizer in self.optimizers:
|
|
|
|
init_lr_groups_l.append([v['initial_lr'] for v in optimizer.param_groups])
|
|
|
|
return init_lr_groups_l
|
|
|
|
|
|
|
|
def update_learning_rate(self, cur_iter, warmup_iter=-1):
|
|
|
|
for scheduler in self.schedulers:
|
2021-01-03 23:37:18 +00:00
|
|
|
scheduler.last_epoch = cur_iter
|
2019-08-23 13:42:47 +00:00
|
|
|
scheduler.step()
|
|
|
|
# set up warm-up learning rate
|
|
|
|
if cur_iter < warmup_iter:
|
|
|
|
# get initial lr for each group
|
|
|
|
init_lr_g_l = self._get_init_lr()
|
|
|
|
# modify warming-up learning rates
|
|
|
|
warm_up_lr_l = []
|
|
|
|
for init_lr_g in init_lr_g_l:
|
|
|
|
warm_up_lr_l.append([v / warmup_iter * cur_iter for v in init_lr_g])
|
|
|
|
# set learning rate
|
|
|
|
self._set_lr(warm_up_lr_l)
|
|
|
|
|
|
|
|
def get_current_learning_rate(self):
|
|
|
|
return [param_group['lr'] for param_group in self.optimizers[0].param_groups]
|
|
|
|
|
|
|
|
def get_network_description(self, network):
|
|
|
|
"""Get the string and total parameters of the network"""
|
|
|
|
if isinstance(network, nn.DataParallel) or isinstance(network, DistributedDataParallel):
|
|
|
|
network = network.module
|
|
|
|
return str(network), sum(map(lambda x: x.numel(), network.parameters()))
|
|
|
|
|
|
|
|
def save_network(self, network, network_label, iter_label):
|
|
|
|
save_filename = '{}_{}.pth'.format(iter_label, network_label)
|
|
|
|
save_path = os.path.join(self.opt['path']['models'], save_filename)
|
|
|
|
if isinstance(network, nn.DataParallel) or isinstance(network, DistributedDataParallel):
|
|
|
|
network = network.module
|
|
|
|
state_dict = network.state_dict()
|
|
|
|
for key, param in state_dict.items():
|
|
|
|
state_dict[key] = param.cpu()
|
|
|
|
torch.save(state_dict, save_path)
|
2021-11-01 22:14:59 +00:00
|
|
|
if network_label not in self.save_history.keys():
|
|
|
|
self.save_history[network_label] = []
|
|
|
|
self.save_history[network_label].append(save_path)
|
|
|
|
|
2020-05-16 15:10:51 +00:00
|
|
|
# Also save to the 'alt_path' which is useful for caching to Google Drive in colab, for example.
|
|
|
|
if 'alt_path' in self.opt['path'].keys():
|
|
|
|
torch.save(state_dict, os.path.join(self.opt['path']['alt_path'], save_filename))
|
2020-06-01 21:09:52 +00:00
|
|
|
if self.opt['colab_mode']:
|
|
|
|
utils.util.copy_files_to_server(self.opt['ssh_server'], self.opt['ssh_username'], self.opt['ssh_password'],
|
|
|
|
save_path, os.path.join(self.opt['remote_path'], 'models', save_filename))
|
2020-05-13 22:53:38 +00:00
|
|
|
return save_path
|
2019-08-23 13:42:47 +00:00
|
|
|
|
2021-06-01 23:25:24 +00:00
|
|
|
def load_network(self, load_path, network, strict=True, pretrain_base_path=None):
|
2021-06-13 16:25:23 +00:00
|
|
|
# Sometimes networks are passed in as DDP modules, we want the raw parameters.
|
|
|
|
if hasattr(network, 'module'):
|
|
|
|
network = network.module
|
2019-08-23 13:42:47 +00:00
|
|
|
load_net = torch.load(load_path)
|
2020-10-24 17:56:39 +00:00
|
|
|
|
|
|
|
# Support loading torch.save()s for whole models as well as just state_dicts.
|
|
|
|
if 'state_dict' in load_net:
|
|
|
|
load_net = load_net['state_dict']
|
2019-08-23 13:42:47 +00:00
|
|
|
load_net_clean = OrderedDict() # remove unnecessary 'module.'
|
2021-06-01 23:25:24 +00:00
|
|
|
|
|
|
|
if pretrain_base_path is not None:
|
|
|
|
t = load_net
|
|
|
|
load_net = {}
|
|
|
|
for k, v in t.items():
|
|
|
|
if k.startswith(pretrain_base_path):
|
|
|
|
load_net[k[len(pretrain_base_path):]] = v
|
|
|
|
|
2019-08-23 13:42:47 +00:00
|
|
|
for k, v in load_net.items():
|
2020-10-26 17:13:01 +00:00
|
|
|
if k.startswith('module.'):
|
2020-12-04 23:39:21 +00:00
|
|
|
load_net_clean[k.replace('module.', '')] = v
|
2019-08-23 13:42:47 +00:00
|
|
|
else:
|
|
|
|
load_net_clean[k] = v
|
|
|
|
network.load_state_dict(load_net_clean, strict=strict)
|
|
|
|
|
2022-01-06 19:38:20 +00:00
|
|
|
def save_training_state(self, state):
|
2019-08-23 13:42:47 +00:00
|
|
|
"""Save training state during training, which will be used for resuming"""
|
2022-01-06 19:38:20 +00:00
|
|
|
state.update({'schedulers': [], 'optimizers': []})
|
2019-08-23 13:42:47 +00:00
|
|
|
for s in self.schedulers:
|
|
|
|
state['schedulers'].append(s.state_dict())
|
|
|
|
for o in self.optimizers:
|
|
|
|
state['optimizers'].append(o.state_dict())
|
2020-07-31 17:20:39 +00:00
|
|
|
if 'amp_opt_level' in self.opt.keys():
|
|
|
|
state['amp'] = amp.state_dict()
|
2022-01-06 19:38:20 +00:00
|
|
|
save_filename = '{}.state'.format(utils.util.opt_get(state, ['iter'], 'no_step_provided'))
|
2019-08-23 13:42:47 +00:00
|
|
|
save_path = os.path.join(self.opt['path']['training_state'], save_filename)
|
|
|
|
torch.save(state, save_path)
|
2021-11-01 22:14:59 +00:00
|
|
|
if '__state__' not in self.save_history.keys():
|
|
|
|
self.save_history['__state__'] = []
|
|
|
|
self.save_history['__state__'].append(save_path)
|
|
|
|
|
2020-05-16 15:10:51 +00:00
|
|
|
# Also save to the 'alt_path' which is useful for caching to Google Drive in colab, for example.
|
|
|
|
if 'alt_path' in self.opt['path'].keys():
|
|
|
|
torch.save(state, os.path.join(self.opt['path']['alt_path'], 'latest.state'))
|
2020-06-01 21:09:52 +00:00
|
|
|
if self.opt['colab_mode']:
|
|
|
|
utils.util.copy_files_to_server(self.opt['ssh_server'], self.opt['ssh_username'], self.opt['ssh_password'],
|
|
|
|
save_path, os.path.join(self.opt['remote_path'], 'training_state', save_filename))
|
2019-08-23 13:42:47 +00:00
|
|
|
|
2020-09-14 21:21:42 +00:00
|
|
|
def resume_training(self, resume_state, load_amp=True):
|
2019-08-23 13:42:47 +00:00
|
|
|
"""Resume the optimizers and schedulers for training"""
|
|
|
|
resume_optimizers = resume_state['optimizers']
|
|
|
|
resume_schedulers = resume_state['schedulers']
|
|
|
|
assert len(resume_optimizers) == len(self.optimizers), 'Wrong lengths of optimizers'
|
|
|
|
assert len(resume_schedulers) == len(self.schedulers), 'Wrong lengths of schedulers'
|
|
|
|
for i, o in enumerate(resume_optimizers):
|
|
|
|
self.optimizers[i].load_state_dict(o)
|
|
|
|
for i, s in enumerate(resume_schedulers):
|
|
|
|
self.schedulers[i].load_state_dict(s)
|
2020-09-14 21:21:42 +00:00
|
|
|
if load_amp and 'amp' in resume_state.keys():
|
2021-06-13 03:01:41 +00:00
|
|
|
from apex import amp
|
2020-06-19 15:18:30 +00:00
|
|
|
amp.load_state_dict(resume_state['amp'])
|