diff --git a/codes/models/pixel_level_contrastive_learning/pixpro_lucidrains.py b/codes/models/pixel_level_contrastive_learning/pixpro_lucidrains.py index 73a0a973..75c28a46 100644 --- a/codes/models/pixel_level_contrastive_learning/pixpro_lucidrains.py +++ b/codes/models/pixel_level_contrastive_learning/pixpro_lucidrains.py @@ -328,7 +328,6 @@ class PixelCL(nn.Module): ppm_gamma = 2, distance_thres = 0.7, similarity_temperature = 0.3, - alpha = 1., cutout_ratio_range = (0.6, 0.8), cutout_interpolate_mode = 'nearest', coord_cutout_interpolate_mode = 'bilinear', @@ -363,7 +362,6 @@ class PixelCL(nn.Module): self.distance_thres = distance_thres self.similarity_temperature = similarity_temperature - self.alpha = alpha # This requirement is due to the way that these are processed, not a hard requirement. assert math.sqrt(max_latent_dim) == int(math.sqrt(max_latent_dim)) @@ -456,7 +454,7 @@ class PixelCL(nn.Module): l = l[:, :, prob.multinomial(num_samples=self.max_latent_dim, replacement=False)] # For compatibility with the existing pixpro code, reshape this stochastic sampling back into a 2d "square". # Note that the actual structure no longer matters going forwards. Pixels are only compared to themselves and others without regards - # to structure. + # to the original image structure. sqdim = int(math.sqrt(self.max_latent_dim)) extracted.append(l.reshape(b, c, sqdim, sqdim)) proj_pixel_one, proj_pixel_two, target_proj_pixel_one, target_proj_pixel_two = extracted diff --git a/codes/models/pixel_level_contrastive_learning/resnet_unet_3.py b/codes/models/pixel_level_contrastive_learning/resnet_unet_3.py new file mode 100644 index 00000000..49c87036 --- /dev/null +++ b/codes/models/pixel_level_contrastive_learning/resnet_unet_3.py @@ -0,0 +1,86 @@ +import torch +import torch.nn as nn +from torchvision.models.resnet import BasicBlock, Bottleneck, conv1x1, conv3x3 +from torchvision.models.utils import load_state_dict_from_url +import torchvision + +from models.arch_util import ConvBnRelu +from models.pixel_level_contrastive_learning.resnet_unet import ReverseBottleneck +from trainer.networks import register_model +from utils.util import checkpoint, opt_get + + +class UResNet50_3(torchvision.models.resnet.ResNet): + + def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, + groups=1, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None, out_dim=128): + super().__init__(block, layers, num_classes, zero_init_residual, groups, width_per_group, + replace_stride_with_dilation, norm_layer) + if norm_layer is None: + norm_layer = nn.BatchNorm2d + ''' + # For reference: + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, + dilate=replace_stride_with_dilation[2]) + ''' + uplayers = [] + inplanes = 2048 + first = True + for i in range(3): + uplayers.append(ReverseBottleneck(inplanes, inplanes // 2, norm_layer=norm_layer, passthrough=not first)) + inplanes = inplanes // 2 + first = False + self.uplayers = nn.ModuleList(uplayers) + + # These two variables are separated out and renamed so that I can re-use parameters from a pretrained resnet_unet2. + self.last_uplayer = ReverseBottleneck(256, 128, norm_layer=norm_layer, passthrough=True) + self.tail3 = nn.Sequential(conv1x1(192, 128), + norm_layer(128), + nn.ReLU(), + conv1x1(128, out_dim)) + + del self.fc # Not used in this implementation and just consumes a ton of GPU memory. + + + def _forward_impl(self, x): + x0 = self.relu(self.bn1(self.conv1(x))) + x = self.maxpool(x0) + + x1 = checkpoint(self.layer1, x) + x2 = checkpoint(self.layer2, x1) + x3 = checkpoint(self.layer3, x2) + x4 = checkpoint(self.layer4, x3) + unused = self.avgpool(x4) # This is performed for instance-level pixpro learning, even though it is unused. + + x = checkpoint(self.uplayers[0], x4) + x = checkpoint(self.uplayers[1], x, x3) + x = checkpoint(self.uplayers[2], x, x2) + x = checkpoint(self.last_uplayer, x, x1) + + return checkpoint(self.tail3, torch.cat([x, x0], dim=1)) + + def forward(self, x): + return self._forward_impl(x) + + +@register_model +def register_u_resnet50_3(opt_net, opt): + model = UResNet50_3(Bottleneck, [3, 4, 6, 3], out_dim=opt_net['odim']) + if opt_get(opt_net, ['use_pretrained_base'], False): + state_dict = load_state_dict_from_url('https://download.pytorch.org/models/resnet50-19c8e357.pth', progress=True) + model.load_state_dict(state_dict, strict=False) + return model + + +if __name__ == '__main__': + model = UResNet50_3(Bottleneck, [3,4,6,3]) + samp = torch.rand(1,3,224,224) + y = model(samp) + print(y.shape) + # For pixpro: attach to "tail.3" diff --git a/codes/models/vqvae/vqvae_no_conv_transpose.py b/codes/models/vqvae/vqvae_no_conv_transpose.py new file mode 100644 index 00000000..d1418d1d --- /dev/null +++ b/codes/models/vqvae/vqvae_no_conv_transpose.py @@ -0,0 +1,249 @@ +# Copyright 2018 The Sonnet Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + + +# Borrowed from https://github.com/rosinality/vq-vae-2-pytorch +# Which was itself orrowed from https://github.com/deepmind/sonnet + + +import torch +from torch import nn +from torch.nn import functional as F + +import torch.distributed as distributed + +from trainer.networks import register_model +from utils.util import checkpoint, opt_get + + +class Quantize(nn.Module): + def __init__(self, dim, n_embed, decay=0.99, eps=1e-5): + super().__init__() + + self.dim = dim + self.n_embed = n_embed + self.decay = decay + self.eps = eps + + embed = torch.randn(dim, n_embed) + self.register_buffer("embed", embed) + self.register_buffer("cluster_size", torch.zeros(n_embed)) + self.register_buffer("embed_avg", embed.clone()) + + def forward(self, input): + flatten = input.reshape(-1, self.dim) + dist = ( + flatten.pow(2).sum(1, keepdim=True) + - 2 * flatten @ self.embed + + self.embed.pow(2).sum(0, keepdim=True) + ) + _, embed_ind = (-dist).max(1) + embed_onehot = F.one_hot(embed_ind, self.n_embed).type(flatten.dtype) + embed_ind = embed_ind.view(*input.shape[:-1]) + quantize = self.embed_code(embed_ind) + + if self.training: + embed_onehot_sum = embed_onehot.sum(0) + embed_sum = flatten.transpose(0, 1) @ embed_onehot + + if distributed.is_initialized() and distributed.get_world_size() > 1: + distributed.all_reduce(embed_onehot_sum) + distributed.all_reduce(embed_sum) + + self.cluster_size.data.mul_(self.decay).add_( + embed_onehot_sum, alpha=1 - self.decay + ) + self.embed_avg.data.mul_(self.decay).add_(embed_sum, alpha=1 - self.decay) + n = self.cluster_size.sum() + cluster_size = ( + (self.cluster_size + self.eps) / (n + self.n_embed * self.eps) * n + ) + embed_normalized = self.embed_avg / cluster_size.unsqueeze(0) + self.embed.data.copy_(embed_normalized) + + diff = (quantize.detach() - input).pow(2).mean() + quantize = input + (quantize - input).detach() + + return quantize, diff, embed_ind + + def embed_code(self, embed_id): + return F.embedding(embed_id, self.embed.transpose(0, 1)) + + +class ResBlock(nn.Module): + def __init__(self, in_channel, channel): + super().__init__() + + self.conv = nn.Sequential( + nn.ReLU(inplace=True), + nn.Conv2d(in_channel, channel, 3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(channel, in_channel, 1), + ) + + def forward(self, input): + out = self.conv(input) + out += input + + return out + + +class Encoder(nn.Module): + def __init__(self, in_channel, channel, n_res_block, n_res_channel, stride): + super().__init__() + + if stride == 4: + blocks = [ + nn.Conv2d(in_channel, channel // 2, 4, stride=2, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(channel // 2, channel, 4, stride=2, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(channel, channel, 3, padding=1), + ] + + elif stride == 2: + blocks = [ + nn.Conv2d(in_channel, channel // 2, 4, stride=2, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(channel // 2, channel, 3, padding=1), + ] + + for i in range(n_res_block): + blocks.append(ResBlock(channel, n_res_channel)) + + blocks.append(nn.ReLU(inplace=True)) + + self.blocks = nn.Sequential(*blocks) + + def forward(self, input): + return self.blocks(input) + + +class Decoder(nn.Module): + def __init__( + self, in_channel, out_channel, channel, n_res_block, n_res_channel, stride + ): + super().__init__() + + blocks = [nn.Conv2d(in_channel, channel, 3, padding=1)] + + for i in range(n_res_block): + blocks.append(ResBlock(channel, n_res_channel)) + + blocks.append(nn.ReLU(inplace=True)) + + if stride == 4: + blocks.extend( + [ + nn.ConvTranspose2d(channel, channel // 2, 4, stride=2, padding=1), + nn.ReLU(inplace=True), + nn.ConvTranspose2d( + channel // 2, out_channel, 4, stride=2, padding=1 + ), + ] + ) + + elif stride == 2: + blocks.append( + nn.ConvTranspose2d(channel, out_channel, 4, stride=2, padding=1) + ) + + self.blocks = nn.Sequential(*blocks) + + def forward(self, input): + return self.blocks(input) + + +class VQVAE(nn.Module): + def __init__( + self, + in_channel=3, + channel=128, + n_res_block=2, + n_res_channel=32, + codebook_dim=64, + codebook_size=512, + decay=0.99, + ): + super().__init__() + + self.enc_b = Encoder(in_channel, channel, n_res_block, n_res_channel, stride=4) + self.enc_t = Encoder(channel, channel, n_res_block, n_res_channel, stride=2) + self.quantize_conv_t = nn.Conv2d(channel, codebook_dim, 1) + self.quantize_t = Quantize(codebook_dim, codebook_size) + self.dec_t = Decoder( + codebook_dim, codebook_dim, channel, n_res_block, n_res_channel, stride=2 + ) + self.quantize_conv_b = nn.Conv2d(codebook_dim + channel, codebook_dim, 1) + self.quantize_b = Quantize(codebook_dim, codebook_size) + self.upsample_t = nn.ConvTranspose2d( + codebook_dim, codebook_dim, 4, stride=2, padding=1 + ) + self.dec = Decoder( + codebook_dim + codebook_dim, + in_channel, + channel, + n_res_block, + n_res_channel, + stride=4, + ) + + def forward(self, input): + quant_t, quant_b, diff, _, _ = self.encode(input) + dec = self.decode(quant_t, quant_b) + + return dec, diff + + def encode(self, input): + enc_b = checkpoint(self.enc_b, input) + enc_t = checkpoint(self.enc_t, enc_b) + + quant_t = self.quantize_conv_t(enc_t).permute(0, 2, 3, 1) + quant_t, diff_t, id_t = self.quantize_t(quant_t) + quant_t = quant_t.permute(0, 3, 1, 2) + diff_t = diff_t.unsqueeze(0) + + dec_t = checkpoint(self.dec_t, quant_t) + enc_b = torch.cat([dec_t, enc_b], 1) + + quant_b = checkpoint(self.quantize_conv_b, enc_b).permute(0, 2, 3, 1) + quant_b, diff_b, id_b = self.quantize_b(quant_b) + quant_b = quant_b.permute(0, 3, 1, 2) + diff_b = diff_b.unsqueeze(0) + + return quant_t, quant_b, diff_t + diff_b, id_t, id_b + + def decode(self, quant_t, quant_b): + upsample_t = self.upsample_t(quant_t) + quant = torch.cat([upsample_t, quant_b], 1) + dec = checkpoint(self.dec, quant) + + return dec + + def decode_code(self, code_t, code_b): + quant_t = self.quantize_t.embed_code(code_t) + quant_t = quant_t.permute(0, 3, 1, 2) + quant_b = self.quantize_b.embed_code(code_b) + quant_b = quant_b.permute(0, 3, 1, 2) + + dec = self.decode(quant_t, quant_b) + + return dec + + +@register_model +def register_vqvae(opt_net, opt): + kw = opt_get(opt_net, ['kwargs'], {}) + return VQVAE(**kw) diff --git a/codes/train.py b/codes/train.py index 34b1ae76..fae59c70 100644 --- a/codes/train.py +++ b/codes/train.py @@ -295,7 +295,7 @@ class Trainer: if __name__ == '__main__': parser = argparse.ArgumentParser() - parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='../options/train_imagenet_resnet50.yml') + parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='../options/train_imgset_vqvae_stage1.yml') parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) args = parser.parse_args() diff --git a/codes/train2.py b/codes/train2.py new file mode 100644 index 00000000..27155799 --- /dev/null +++ b/codes/train2.py @@ -0,0 +1,325 @@ +import os +import math +import argparse +import random +import logging +from tqdm import tqdm + +import torch +from data.data_sampler import DistIterSampler +from trainer.eval.evaluator import create_evaluator + +from utils import util, options as option +from data import create_dataloader, create_dataset +from trainer.ExtensibleTrainer import ExtensibleTrainer +from time import time + +def init_dist(backend, **kwargs): + # These packages have globals that screw with Windows, so only import them if needed. + import torch.distributed as dist + import torch.multiprocessing as mp + + """initialization for distributed training""" + if mp.get_start_method(allow_none=True) != 'spawn': + mp.set_start_method('spawn') + rank = int(os.environ['RANK']) + num_gpus = torch.cuda.device_count() + torch.cuda.set_device(rank % num_gpus) + dist.init_process_group(backend=backend, **kwargs) + +class Trainer: + + def init(self, opt, launcher, all_networks={}): + self._profile = False + self.val_compute_psnr = opt['eval']['compute_psnr'] if 'compute_psnr' in opt['eval'].keys() else True + self.val_compute_fea = opt['eval']['compute_fea'] if 'compute_fea' in opt['eval'].keys() else True + + #### loading resume state if exists + if opt['path'].get('resume_state', None): + # distributed resuming: all load into default GPU + device_id = torch.cuda.current_device() + resume_state = torch.load(opt['path']['resume_state'], + map_location=lambda storage, loc: storage.cuda(device_id)) + option.check_resume(opt, resume_state['iter']) # check resume options + else: + resume_state = None + + #### mkdir and loggers + if self.rank <= 0: # normal training (self.rank -1) OR distributed training (self.rank 0) + if resume_state is None: + util.mkdir_and_rename( + opt['path']['experiments_root']) # rename experiment folder if exists + util.mkdirs( + (path for key, path in opt['path'].items() if not key == 'experiments_root' and path is not None + and 'pretrain_model' not in key and 'resume' not in key)) + + # config loggers. Before it, the log will not work + util.setup_logger('base', opt['path']['log'], 'train_' + opt['name'], level=logging.INFO, + screen=True, tofile=True) + self.logger = logging.getLogger('base') + self.logger.info(option.dict2str(opt)) + # tensorboard logger + if opt['use_tb_logger'] and 'debug' not in opt['name']: + self.tb_logger_path = os.path.join(opt['path']['experiments_root'], 'tb_logger') + version = float(torch.__version__[0:3]) + if version >= 1.1: # PyTorch 1.1 + from torch.utils.tensorboard import SummaryWriter + else: + self.self.logger.info( + 'You are using PyTorch {}. Tensorboard will use [tensorboardX]'.format(version)) + from tensorboardX import SummaryWriter + self.tb_logger = SummaryWriter(log_dir=self.tb_logger_path) + else: + util.setup_logger('base', opt['path']['log'], 'train', level=logging.INFO, screen=True) + self.logger = logging.getLogger('base') + + # convert to NoneDict, which returns None for missing keys + opt = option.dict_to_nonedict(opt) + self.opt = opt + + #### wandb init + if opt['wandb']: + import wandb + os.makedirs(os.path.join(opt['path']['log'], 'wandb'), exist_ok=True) + wandb.init(project=opt['name'], dir=opt['path']['log']) + + #### random seed + seed = opt['train']['manual_seed'] + if seed is None: + seed = random.randint(1, 10000) + if self.rank <= 0: + self.logger.info('Random seed: {}'.format(seed)) + seed += self.rank # Different multiprocessing instances should behave differently. + util.set_random_seed(seed) + + torch.backends.cudnn.benchmark = True + # torch.backends.cudnn.deterministic = True + # torch.autograd.set_detect_anomaly(True) + + # Save the compiled opt dict to the global loaded_options variable. + util.loaded_options = opt + + #### create train and val dataloader + dataset_ratio = 1 # enlarge the size of each epoch + for phase, dataset_opt in opt['datasets'].items(): + if phase == 'train': + self.train_set = create_dataset(dataset_opt) + train_size = int(math.ceil(len(self.train_set) / dataset_opt['batch_size'])) + total_iters = int(opt['train']['niter']) + self.total_epochs = int(math.ceil(total_iters / train_size)) + if opt['dist']: + self.train_sampler = DistIterSampler(self.train_set, self.world_size, self.rank, dataset_ratio) + self.total_epochs = int(math.ceil(total_iters / (train_size * dataset_ratio))) + else: + self.train_sampler = None + self.train_loader = create_dataloader(self.train_set, dataset_opt, opt, self.train_sampler) + if self.rank <= 0: + self.logger.info('Number of train images: {:,d}, iters: {:,d}'.format( + len(self.train_set), train_size)) + self.logger.info('Total epochs needed: {:d} for iters {:,d}'.format( + self.total_epochs, total_iters)) + elif phase == 'val': + self.val_set = create_dataset(dataset_opt) + self.val_loader = create_dataloader(self.val_set, dataset_opt, opt, None) + if self.rank <= 0: + self.logger.info('Number of val images in [{:s}]: {:d}'.format( + dataset_opt['name'], len(self.val_set))) + else: + raise NotImplementedError('Phase [{:s}] is not recognized.'.format(phase)) + assert self.train_loader is not None + + #### create model + self.model = ExtensibleTrainer(opt, cached_networks=all_networks) + + ### Evaluators + self.evaluators = [] + if 'evaluators' in opt['eval'].keys(): + for ev_key, ev_opt in opt['eval']['evaluators'].items(): + self.evaluators.append(create_evaluator(self.model.networks[ev_opt['for']], + ev_opt, self.model.env)) + + #### resume training + if resume_state: + self.logger.info('Resuming training from epoch: {}, iter: {}.'.format( + resume_state['epoch'], resume_state['iter'])) + + self.start_epoch = resume_state['epoch'] + self.current_step = resume_state['iter'] + self.model.resume_training(resume_state, 'amp_opt_level' in opt.keys()) # handle optimizers and schedulers + else: + self.current_step = -1 if 'start_step' not in opt.keys() else opt['start_step'] + self.start_epoch = 0 + if 'force_start_step' in opt.keys(): + self.current_step = opt['force_start_step'] + opt['current_step'] = self.current_step + + def do_step(self, train_data): + if self._profile: + print("Data fetch: %f" % (time() - _t)) + _t = time() + + opt = self.opt + self.current_step += 1 + #### update learning rate + self.model.update_learning_rate(self.current_step, warmup_iter=opt['train']['warmup_iter']) + + #### training + if self._profile: + print("Update LR: %f" % (time() - _t)) + _t = time() + self.model.feed_data(train_data, self.current_step) + self.model.optimize_parameters(self.current_step) + if self._profile: + print("Model feed + step: %f" % (time() - _t)) + _t = time() + + #### log + if self.current_step % opt['logger']['print_freq'] == 0 and self.rank <= 0: + logs = self.model.get_current_log(self.current_step) + message = '[epoch:{:3d}, iter:{:8,d}, lr:('.format(self.epoch, self.current_step) + for v in self.model.get_current_learning_rate(): + message += '{:.3e},'.format(v) + message += ')] ' + for k, v in logs.items(): + if 'histogram' in k: + self.tb_logger.add_histogram(k, v, self.current_step) + elif isinstance(v, dict): + self.tb_logger.add_scalars(k, v, self.current_step) + else: + message += '{:s}: {:.4e} '.format(k, v) + # tensorboard logger + if opt['use_tb_logger'] and 'debug' not in opt['name']: + self.tb_logger.add_scalar(k, v, self.current_step) + if opt['wandb']: + import wandb + wandb.log(logs) + self.logger.info(message) + + #### save models and training states + if self.current_step % opt['logger']['save_checkpoint_freq'] == 0: + if self.rank <= 0: + self.logger.info('Saving models and training states.') + self.model.save(self.current_step) + self.model.save_training_state(self.epoch, self.current_step) + if 'alt_path' in opt['path'].keys(): + import shutil + print("Synchronizing tb_logger to alt_path..") + alt_tblogger = os.path.join(opt['path']['alt_path'], "tb_logger") + shutil.rmtree(alt_tblogger, ignore_errors=True) + shutil.copytree(self.tb_logger_path, alt_tblogger) + + #### validation + if opt['datasets'].get('val', None) and self.current_step % opt['train']['val_freq'] == 0: + if opt['model'] in ['sr', 'srgan', 'corruptgan', 'spsrgan', + 'extensibletrainer'] and self.rank <= 0: # image restoration validation + avg_psnr = 0. + avg_fea_loss = 0. + idx = 0 + val_tqdm = tqdm(self.val_loader) + for val_data in val_tqdm: + idx += 1 + for b in range(len(val_data['HQ_path'])): + img_name = os.path.splitext(os.path.basename(val_data['HQ_path'][b]))[0] + img_dir = os.path.join(opt['path']['val_images'], img_name) + + util.mkdir(img_dir) + + self.model.feed_data(val_data, self.current_step) + self.model.test() + + visuals = self.model.get_current_visuals() + if visuals is None: + continue + + sr_img = util.tensor2img(visuals['rlt'][b]) # uint8 + # calculate PSNR + if self.val_compute_psnr: + gt_img = util.tensor2img(visuals['hq'][b]) # uint8 + sr_img, gt_img = util.crop_border([sr_img, gt_img], opt['scale']) + avg_psnr += util.calculate_psnr(sr_img, gt_img) + + # calculate fea loss + if self.val_compute_fea: + avg_fea_loss += self.model.compute_fea_loss(visuals['rlt'][b], visuals['hq'][b]) + + # Save SR images for reference + img_base_name = '{:s}_{:d}.png'.format(img_name, self.current_step) + save_img_path = os.path.join(img_dir, img_base_name) + util.save_img(sr_img, save_img_path) + + avg_psnr = avg_psnr / idx + avg_fea_loss = avg_fea_loss / idx + + # log + self.logger.info('# Validation # PSNR: {:.4e} Fea: {:.4e}'.format(avg_psnr, avg_fea_loss)) + + # tensorboard logger + if opt['use_tb_logger'] and 'debug' not in opt['name'] and self.rank <= 0: + self.tb_logger.add_scalar('val_psnr', avg_psnr, self.current_step) + self.tb_logger.add_scalar('val_fea', avg_fea_loss, self.current_step) + + if len(self.evaluators) != 0 and self.current_step % opt['train']['val_freq'] == 0 and self.rank <= 0: + eval_dict = {} + for eval in self.evaluators: + eval_dict.update(eval.perform_eval()) + if self.rank <= 0: + print("Evaluator results: ", eval_dict) + for ek, ev in eval_dict.items(): + self.tb_logger.add_scalar(ek, ev, self.current_step) + + def do_training(self): + self.logger.info('Start training from epoch: {:d}, iter: {:d}'.format(self.start_epoch, self.current_step)) + for epoch in range(self.start_epoch, self.total_epochs + 1): + self.epoch = epoch + if opt['dist']: + self.train_sampler.set_epoch(epoch) + tq_ldr = tqdm(self.train_loader) + + _t = time() + for train_data in tq_ldr: + self.do_step(train_data) + + def create_training_generator(self, index): + self.logger.info('Start training from epoch: {:d}, iter: {:d}'.format(self.start_epoch, self.current_step)) + for epoch in range(self.start_epoch, self.total_epochs + 1): + self.epoch = epoch + if self.opt['dist']: + self.train_sampler.set_epoch(epoch) + tq_ldr = tqdm(self.train_loader, position=index) + + _t = time() + for train_data in tq_ldr: + yield self.model + self.do_step(train_data) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('-opt', type=str, help='Path to option YAML file.', default='../options/train_imgset_pixpro_3.yml') + parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none', help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + args = parser.parse_args() + opt = option.parse(args.opt, is_train=True) + if args.launcher != 'none': + # export CUDA_VISIBLE_DEVICES for running in distributed mode. + if 'gpu_ids' in opt.keys(): + gpu_list = ','.join(str(x) for x in opt['gpu_ids']) + os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list + print('export CUDA_VISIBLE_DEVICES=' + gpu_list) + trainer = Trainer() + + #### distributed training settings + if args.launcher == 'none': # disabled distributed training + opt['dist'] = False + trainer.rank = -1 + if len(opt['gpu_ids']) == 1: + torch.cuda.set_device(opt['gpu_ids'][0]) + print('Disabled distributed training.') + else: + opt['dist'] = True + init_dist('nccl') + trainer.world_size = torch.distributed.get_world_size() + trainer.rank = torch.distributed.get_rank() + + trainer.init(opt, args.launcher) + trainer.do_training() diff --git a/codes/trainer/lr_scheduler.py b/codes/trainer/lr_scheduler.py index bc9f96f0..151ea1c4 100644 --- a/codes/trainer/lr_scheduler.py +++ b/codes/trainer/lr_scheduler.py @@ -124,7 +124,7 @@ class CosineAnnealingLR_Restart(_LRScheduler): if __name__ == "__main__": - optimizer = torch.optim.Adam([torch.zeros(3, 64, 3, 3)], lr=.2, weight_decay=0, + optimizer = torch.optim.Adam([torch.zeros(3, 64, 3, 3)], lr=1e-4, weight_decay=0, betas=(0.9, 0.99)) ############################## # MultiStepLR_Restart @@ -159,17 +159,17 @@ if __name__ == "__main__": restart_weights = [1] ## four - T_period = [25000, 25000] - restarts = [252000] - restart_weights = [.5] + T_period = [200000, 100000, 200000] + restarts = [200000, 300000] + restart_weights = [.5, .25] - scheduler = CosineAnnealingLR_Restart(optimizer, T_period, warmup=227000, eta_min=.01, restarts=restarts, + scheduler = CosineAnnealingLR_Restart(optimizer, T_period, warmup=10000, eta_min=1e-8, restarts=restarts, weights=restart_weights) ############################## # Draw figure ############################## - N_iter = 1000000 + N_iter = 500000 lr_l = list(range(N_iter)) for i in range(N_iter): scheduler.step()