Copy train.py mods to train2

This commit is contained in:
James Betker 2020-10-22 17:16:36 -06:00
parent e9c0b9f0fd
commit 8636492db0
2 changed files with 248 additions and 261 deletions

View File

@ -278,7 +278,7 @@ class Trainer:
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='../options/train_exd_imgset_chained_structured_trans_invariance.yml') parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='../options/train_prog_imgset_multifaceted_chained.yml')
parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none', help='job launcher') parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none', help='job launcher')
args = parser.parse_args() args = parser.parse_args()
opt = option.parse(args.opt, is_train=True) opt = option.parse(args.opt, is_train=True)

View File

@ -13,288 +13,275 @@ from data import create_dataloader, create_dataset
from models.ExtensibleTrainer import ExtensibleTrainer from models.ExtensibleTrainer import ExtensibleTrainer
from time import time from time import time
class Trainer:
def init_dist(self, backend, **kwargs):
# These packages have globals that screw with Windows, so only import them if needed.
import torch.distributed as dist
import torch.multiprocessing as mp
def init_dist(backend='nccl', **kwargs): """initialization for distributed training"""
# These packages have globals that screw with Windows, so only import them if needed. if mp.get_start_method(allow_none=True) != 'spawn':
import torch.distributed as dist mp.set_start_method('spawn')
import torch.multiprocessing as mp self.rank = int(os.environ['RANK'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(self.rank % num_gpus)
dist.init_process_group(backend=backend, **kwargs)
"""initialization for distributed training""" def init(self, opt, launcher, all_networks={}):
if mp.get_start_method(allow_none=True) != 'spawn': self._profile = False
mp.set_start_method('spawn')
rank = int(os.environ['RANK'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(backend=backend, **kwargs)
def main(): #### distributed training settings
#### options if len(opt['gpu_ids']) == 1 and torch.cuda.device_count() > 1:
parser = argparse.ArgumentParser() gpu = input(
parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='../options/train_exd_mi1_multifaceted_chained.yml') 'I noticed you have multiple GPUs. Starting two jobs on the same GPU sucks. Please confirm which GPU'
parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none', help='job launcher') 'you want to use. Press enter to use the specified one [%s]' % (opt['gpu_ids']))
parser.add_argument('--local_rank', type=int, default=0) if gpu:
args = parser.parse_args() opt['gpu_ids'] = [int(gpu)]
opt = option.parse(args.opt, is_train=True) if launcher == 'none': # disabled distributed training
opt['dist'] = False
self.rank = -1
print('Disabled distributed training.')
colab_mode = False if 'colab_mode' not in opt.keys() else opt['colab_mode']
if colab_mode:
# Check the configuration of the remote server. Expect models, resume_state, and val_images directories to be there.
# Each one should have a TEST file in it.
util.get_files_from_server(opt['ssh_server'], opt['ssh_username'], opt['ssh_password'],
os.path.join(opt['remote_path'], 'training_state', "TEST"))
util.get_files_from_server(opt['ssh_server'], opt['ssh_username'], opt['ssh_password'],
os.path.join(opt['remote_path'], 'models', "TEST"))
util.get_files_from_server(opt['ssh_server'], opt['ssh_username'], opt['ssh_password'],
os.path.join(opt['remote_path'], 'val_images', "TEST"))
# Load the state and models needed from the remote server.
if opt['path']['resume_state']:
util.get_files_from_server(opt['ssh_server'], opt['ssh_username'], opt['ssh_password'], os.path.join(opt['remote_path'], 'training_state', opt['path']['resume_state']))
if opt['path']['pretrain_model_G']:
util.get_files_from_server(opt['ssh_server'], opt['ssh_username'], opt['ssh_password'], os.path.join(opt['remote_path'], 'models', opt['path']['pretrain_model_G']))
if opt['path']['pretrain_model_D']:
util.get_files_from_server(opt['ssh_server'], opt['ssh_username'], opt['ssh_password'], os.path.join(opt['remote_path'], 'models', opt['path']['pretrain_model_D']))
#### distributed training settings
if len(opt['gpu_ids']) == 1 and torch.cuda.device_count() > 1:
gpu = input('I noticed you have multiple GPUs. Starting two jobs on the same GPU sucks. Please confirm which GPU'
'you want to use. Press enter to use the specified one [%s]' % (opt['gpu_ids']))
if gpu:
opt['gpu_ids'] = [int(gpu)]
if args.launcher == 'none': # disabled distributed training
opt['dist'] = False
rank = -1
print('Disabled distributed training.')
else:
opt['dist'] = True
init_dist()
world_size = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
#### loading resume state if exists
if opt['path'].get('resume_state', None):
# distributed resuming: all load into default GPU
device_id = torch.cuda.current_device()
resume_state = torch.load(opt['path']['resume_state'],
map_location=lambda storage, loc: storage.cuda(device_id))
option.check_resume(opt, resume_state['iter']) # check resume options
else:
resume_state = None
#### mkdir and loggers
if rank <= 0: # normal training (rank -1) OR distributed training (rank 0)
if resume_state is None:
util.mkdir_and_rename(
opt['path']['experiments_root']) # rename experiment folder if exists
util.mkdirs((path for key, path in opt['path'].items() if not key == 'experiments_root' and path is not None
and 'pretrain_model' not in key and 'resume' not in key))
# config loggers. Before it, the log will not work
util.setup_logger('base', opt['path']['log'], 'train_' + opt['name'], level=logging.INFO,
screen=True, tofile=True)
logger = logging.getLogger('base')
logger.info(option.dict2str(opt))
# tensorboard logger
if opt['use_tb_logger'] and 'debug' not in opt['name']:
tb_logger_path = os.path.join(opt['path']['experiments_root'], 'tb_logger')
version = float(torch.__version__[0:3])
if version >= 1.1: # PyTorch 1.1
from torch.utils.tensorboard import SummaryWriter
else:
logger.info(
'You are using PyTorch {}. Tensorboard will use [tensorboardX]'.format(version))
from tensorboardX import SummaryWriter
tb_logger = SummaryWriter(log_dir=tb_logger_path)
else:
util.setup_logger('base', opt['path']['log'], 'train', level=logging.INFO, screen=True)
logger = logging.getLogger('base')
# convert to NoneDict, which returns None for missing keys
opt = option.dict_to_nonedict(opt)
#### random seed
seed = opt['train']['manual_seed']
if seed is None:
seed = random.randint(1, 10000)
if rank <= 0:
logger.info('Random seed: {}'.format(seed))
util.set_random_seed(seed)
torch.backends.cudnn.benchmark = True
# torch.backends.cudnn.deterministic = True
# torch.autograd.set_detect_anomaly(True)
# Save the compiled opt dict to the global loaded_options variable.
util.loaded_options = opt
#### create train and val dataloader
dataset_ratio = 1 # enlarge the size of each epoch
for phase, dataset_opt in opt['datasets'].items():
if phase == 'train':
train_set = create_dataset(dataset_opt)
train_size = int(math.ceil(len(train_set) / dataset_opt['batch_size']))
total_iters = int(opt['train']['niter'])
total_epochs = int(math.ceil(total_iters / train_size))
if opt['dist']:
train_sampler = DistIterSampler(train_set, world_size, rank, dataset_ratio)
total_epochs = int(math.ceil(total_iters / (train_size * dataset_ratio)))
else:
train_sampler = None
train_loader = create_dataloader(train_set, dataset_opt, opt, train_sampler)
if rank <= 0:
logger.info('Number of train images: {:,d}, iters: {:,d}'.format(
len(train_set), train_size))
logger.info('Total epochs needed: {:d} for iters {:,d}'.format(
total_epochs, total_iters))
elif phase == 'val':
val_set = create_dataset(dataset_opt)
val_loader = create_dataloader(val_set, dataset_opt, opt, None)
if rank <= 0:
logger.info('Number of val images in [{:s}]: {:d}'.format(
dataset_opt['name'], len(val_set)))
else: else:
raise NotImplementedError('Phase [{:s}] is not recognized.'.format(phase)) opt['dist'] = True
assert train_loader is not None self.init_dist()
world_size = torch.distributed.get_world_size()
self.rank = torch.distributed.get_rank()
#### create model #### loading resume state if exists
model = ExtensibleTrainer(opt) if opt['path'].get('resume_state', None):
# distributed resuming: all load into default GPU
device_id = torch.cuda.current_device()
resume_state = torch.load(opt['path']['resume_state'],
map_location=lambda storage, loc: storage.cuda(device_id))
option.check_resume(opt, resume_state['iter']) # check resume options
else:
resume_state = None
#### resume training #### mkdir and loggers
if resume_state: if self.rank <= 0: # normal training (self.rank -1) OR distributed training (self.rank 0)
logger.info('Resuming training from epoch: {}, iter: {}.'.format( if resume_state is None:
resume_state['epoch'], resume_state['iter'])) util.mkdir_and_rename(
opt['path']['experiments_root']) # rename experiment folder if exists
util.mkdirs(
(path for key, path in opt['path'].items() if not key == 'experiments_root' and path is not None
and 'pretrain_model' not in key and 'resume' not in key))
start_epoch = resume_state['epoch'] # config loggers. Before it, the log will not work
current_step = resume_state['iter'] util.setup_logger('base', opt['path']['log'], 'train_' + opt['name'], level=logging.INFO,
model.resume_training(resume_state, 'amp_opt_level' in opt.keys()) # handle optimizers and schedulers screen=True, tofile=True)
else: self.logger = logging.getLogger('base')
current_step = -1 if 'start_step' not in opt.keys() else opt['start_step'] self.logger.info(option.dict2str(opt))
start_epoch = 0 # tensorboard logger
if 'force_start_step' in opt.keys(): if opt['use_tb_logger'] and 'debug' not in opt['name']:
current_step = opt['force_start_step'] self.tb_logger_path = os.path.join(opt['path']['experiments_root'], 'tb_logger')
version = float(torch.__version__[0:3])
if version >= 1.1: # PyTorch 1.1
from torch.utils.tensorboard import SummaryWriter
else:
self.self.logger.info(
'You are using PyTorch {}. Tensorboard will use [tensorboardX]'.format(version))
from tensorboardX import SummaryWriter
self.tb_logger = SummaryWriter(log_dir=self.tb_logger_path)
else:
util.setup_logger('base', opt['path']['log'], 'train', level=logging.INFO, screen=True)
self.logger = logging.getLogger('base')
#### training # convert to NoneDict, which returns None for missing keys
logger.info('Start training from epoch: {:d}, iter: {:d}'.format(start_epoch, current_step)) opt = option.dict_to_nonedict(opt)
for epoch in range(start_epoch, total_epochs + 1): self.opt = opt
if opt['dist']:
train_sampler.set_epoch(epoch)
tq_ldr = tqdm(train_loader)
_t = time() #### random seed
_profile = False seed = opt['train']['manual_seed']
for train_data in tq_ldr: if seed is None:
if _profile: seed = random.randint(1, 10000)
print("Data fetch: %f" % (time() - _t)) if self.rank <= 0:
_t = time() self.logger.info('Random seed: {}'.format(seed))
util.set_random_seed(seed)
current_step += 1 torch.backends.cudnn.benchmark = True
if current_step > total_iters: # torch.backends.cudnn.deterministic = True
break # torch.autograd.set_detect_anomaly(True)
#### update learning rate
model.update_learning_rate(current_step, warmup_iter=opt['train']['warmup_iter'])
#### training # Save the compiled opt dict to the global loaded_options variable.
if _profile: util.loaded_options = opt
print("Update LR: %f" % (time() - _t))
_t = time()
model.feed_data(train_data)
model.optimize_parameters(current_step)
if _profile:
print("Model feed + step: %f" % (time() - _t))
_t = time()
#### log #### create train and val dataloader
if current_step % opt['logger']['print_freq'] == 0 and rank <= 0: dataset_ratio = 1 # enlarge the size of each epoch
logs = model.get_current_log(current_step) for phase, dataset_opt in opt['datasets'].items():
message = '[epoch:{:3d}, iter:{:8,d}, lr:('.format(epoch, current_step) if phase == 'train':
for v in model.get_current_learning_rate(): self.train_set = create_dataset(dataset_opt)
message += '{:.3e},'.format(v) train_size = int(math.ceil(len(self.train_set) / dataset_opt['batch_size']))
message += ')] ' total_iters = int(opt['train']['niter'])
for k, v in logs.items(): self.total_epochs = int(math.ceil(total_iters / train_size))
if 'histogram' in k: if opt['dist']:
tb_logger.add_histogram(k, v, current_step) train_sampler = DistIterSampler(self.train_set, world_size, self.rank, dataset_ratio)
elif isinstance(v, dict): self.total_epochs = int(math.ceil(total_iters / (train_size * dataset_ratio)))
tb_logger.add_scalars(k, v, current_step) else:
else: train_sampler = None
message += '{:s}: {:.4e} '.format(k, v) self.train_loader = create_dataloader(self.train_set, dataset_opt, opt, train_sampler)
# tensorboard logger if self.rank <= 0:
if opt['use_tb_logger'] and 'debug' not in opt['name']: self.logger.info('Number of train images: {:,d}, iters: {:,d}'.format(
tb_logger.add_scalar(k, v, current_step) len(self.train_set), train_size))
logger.info(message) self.logger.info('Total epochs needed: {:d} for iters {:,d}'.format(
self.total_epochs, total_iters))
elif phase == 'val':
self.val_set = create_dataset(dataset_opt)
self.val_loader = create_dataloader(self.val_set, dataset_opt, opt, None)
if self.rank <= 0:
self.logger.info('Number of val images in [{:s}]: {:d}'.format(
dataset_opt['name'], len(self.val_set)))
else:
raise NotImplementedError('Phase [{:s}] is not recognized.'.format(phase))
assert self.train_loader is not None
#### save models and training states #### create model
if current_step % opt['logger']['save_checkpoint_freq'] == 0: self.model = ExtensibleTrainer(opt, cached_networks=all_networks)
if rank <= 0:
logger.info('Saving models and training states.')
model.save(current_step)
model.save_training_state(epoch, current_step)
if 'alt_path' in opt['path'].keys():
import shutil
print("Synchronizing tb_logger to alt_path..")
alt_tblogger = os.path.join(opt['path']['alt_path'], "tb_logger")
shutil.rmtree(alt_tblogger, ignore_errors=True)
shutil.copytree(tb_logger_path, alt_tblogger)
#### validation #### resume training
if opt['datasets'].get('val', None) and current_step % opt['train']['val_freq'] == 0: if resume_state:
if opt['model'] in ['sr', 'srgan', 'corruptgan', 'spsrgan', 'extensibletrainer'] and rank <= 0: # image restoration validation self.logger.info('Resuming training from epoch: {}, iter: {}.'.format(
avg_psnr = 0. resume_state['epoch'], resume_state['iter']))
avg_fea_loss = 0.
idx = 0
colab_imgs_to_copy = []
val_tqdm = tqdm(val_loader)
for val_data in val_tqdm:
idx += 1
for b in range(len(val_data['LQ_path'])):
img_name = os.path.splitext(os.path.basename(val_data['LQ_path'][b]))[0]
img_dir = os.path.join(opt['path']['val_images'], img_name)
util.mkdir(img_dir)
model.feed_data(val_data) self.start_epoch = resume_state['epoch']
model.test() self.current_step = resume_state['iter']
self.model.resume_training(resume_state, 'amp_opt_level' in opt.keys()) # handle optimizers and schedulers
else:
self.current_step = -1 if 'start_step' not in opt.keys() else opt['start_step']
self.start_epoch = 0
if 'force_start_step' in opt.keys():
self.current_step = opt['force_start_step']
visuals = model.get_current_visuals() def do_step(self, train_data):
if visuals is None: if self._profile:
continue print("Data fetch: %f" % (time() - _t))
_t = time()
if colab_mode: opt = self.opt
colab_imgs_to_copy.append(save_img_path) self.current_step += 1
#### update learning rate
self.model.update_learning_rate(self.current_step, warmup_iter=opt['train']['warmup_iter'])
# calculate PSNR #### training
sr_img = util.tensor2img(visuals['rlt'][b]) # uint8 if self._profile:
gt_img = util.tensor2img(visuals['GT'][b]) # uint8 print("Update LR: %f" % (time() - _t))
sr_img, gt_img = util.crop_border([sr_img, gt_img], opt['scale']) _t = time()
avg_psnr += util.calculate_psnr(sr_img, gt_img) self.model.feed_data(train_data)
self.model.optimize_parameters(self.current_step)
if self._profile:
print("Model feed + step: %f" % (time() - _t))
_t = time()
# calculate fea loss #### log
avg_fea_loss += model.compute_fea_loss(visuals['rlt'][b], visuals['GT'][b]) if self.current_step % opt['logger']['print_freq'] == 0 and self.rank <= 0:
logs = self.model.get_current_log(self.current_step)
# Save SR images for reference message = '[epoch:{:3d}, iter:{:8,d}, lr:('.format(self.epoch, self.current_step)
img_base_name = '{:s}_{:d}.png'.format(img_name, current_step) for v in self.model.get_current_learning_rate():
save_img_path = os.path.join(img_dir, img_base_name) message += '{:.3e},'.format(v)
util.save_img(sr_img, save_img_path) message += ')] '
for k, v in logs.items():
if colab_mode: if 'histogram' in k:
util.copy_files_to_server(opt['ssh_server'], opt['ssh_username'], opt['ssh_password'], self.tb_logger.add_histogram(k, v, self.current_step)
colab_imgs_to_copy, elif isinstance(v, dict):
os.path.join(opt['remote_path'], 'val_images', img_base_name)) self.tb_logger.add_scalars(k, v, self.current_step)
else:
avg_psnr = avg_psnr / idx message += '{:s}: {:.4e} '.format(k, v)
avg_fea_loss = avg_fea_loss / idx
# log
logger.info('# Validation # PSNR: {:.4e} Fea: {:.4e}'.format(avg_psnr, avg_fea_loss))
# tensorboard logger # tensorboard logger
if opt['use_tb_logger'] and 'debug' not in opt['name'] and rank <= 0: if opt['use_tb_logger'] and 'debug' not in opt['name']:
tb_logger.add_scalar('val_psnr', avg_psnr, current_step) self.tb_logger.add_scalar(k, v, self.current_step)
tb_logger.add_scalar('val_fea', avg_fea_loss, current_step) self.logger.info(message)
if rank <= 0: #### save models and training states
logger.info('Saving the final model.') if self.current_step % opt['logger']['save_checkpoint_freq'] == 0:
model.save('latest') if self.rank <= 0:
logger.info('End of training.') self.logger.info('Saving models and training states.')
tb_logger.close() self.model.save(self.current_step)
self.model.save_training_state(self.epoch, self.current_step)
if 'alt_path' in opt['path'].keys():
import shutil
print("Synchronizing tb_logger to alt_path..")
alt_tblogger = os.path.join(opt['path']['alt_path'], "tb_logger")
shutil.rmtree(alt_tblogger, ignore_errors=True)
shutil.copytree(self.tb_logger_path, alt_tblogger)
#### validation
if opt['datasets'].get('val', None) and self.current_step % opt['train']['val_freq'] == 0:
if opt['model'] in ['sr', 'srgan', 'corruptgan', 'spsrgan',
'extensibletrainer'] and self.rank <= 0: # image restoration validation
avg_psnr = 0.
avg_fea_loss = 0.
idx = 0
val_tqdm = tqdm(self.val_loader)
for val_data in val_tqdm:
idx += 1
for b in range(len(val_data['LQ_path'])):
img_name = os.path.splitext(os.path.basename(val_data['LQ_path'][b]))[0]
img_dir = os.path.join(opt['path']['val_images'], img_name)
util.mkdir(img_dir)
self.model.feed_data(val_data)
self.model.test()
visuals = self.model.get_current_visuals()
if visuals is None:
continue
# calculate PSNR
sr_img = util.tensor2img(visuals['rlt'][b]) # uint8
gt_img = util.tensor2img(visuals['GT'][b]) # uint8
sr_img, gt_img = util.crop_border([sr_img, gt_img], opt['scale'])
avg_psnr += util.calculate_psnr(sr_img, gt_img)
# calculate fea loss
avg_fea_loss += self.model.compute_fea_loss(visuals['rlt'][b], visuals['GT'][b])
# Save SR images for reference
img_base_name = '{:s}_{:d}.png'.format(img_name, self.current_step)
save_img_path = os.path.join(img_dir, img_base_name)
util.save_img(sr_img, save_img_path)
avg_psnr = avg_psnr / idx
avg_fea_loss = avg_fea_loss / idx
# log
self.logger.info('# Validation # PSNR: {:.4e} Fea: {:.4e}'.format(avg_psnr, avg_fea_loss))
# tensorboard logger
if opt['use_tb_logger'] and 'debug' not in opt['name'] and self.rank <= 0:
self.tb_logger.add_scalar('val_psnr', avg_psnr, self.current_step)
self.tb_logger.add_scalar('val_fea', avg_fea_loss, self.current_step)
def do_training(self):
self.logger.info('Start training from epoch: {:d}, iter: {:d}'.format(self.start_epoch, self.current_step))
for epoch in range(self.start_epoch, self.total_epochs + 1):
self.epoch = epoch
if opt['dist']:
self.train_sampler.set_epoch(epoch)
tq_ldr = tqdm(self.train_loader)
_t = time()
for train_data in tq_ldr:
self.do_step(train_data)
def create_training_generator(self, index):
self.logger.info('Start training from epoch: {:d}, iter: {:d}'.format(self.start_epoch, self.current_step))
for epoch in range(self.start_epoch, self.total_epochs + 1):
self.epoch = epoch
if self.opt['dist']:
self.train_sampler.set_epoch(epoch)
tq_ldr = tqdm(self.train_loader, position=index)
_t = time()
for train_data in tq_ldr:
yield self.model
self.do_step(train_data)
if __name__ == '__main__': if __name__ == '__main__':
main() parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='../options/train_prog_imgset_multifaceted_chained.yml')
parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none', help='job launcher')
args = parser.parse_args()
opt = option.parse(args.opt, is_train=True)
trainer = Trainer()
trainer.init(opt, args.launcher)
trainer.do_training()