2020-05-27 23:09:11 +00:00
|
|
|
import argparse
|
|
|
|
import logging
|
|
|
|
import os
|
2020-05-28 05:09:46 +00:00
|
|
|
import os.path as osp
|
2020-05-27 23:09:11 +00:00
|
|
|
import subprocess
|
|
|
|
import time
|
|
|
|
|
|
|
|
import torch
|
|
|
|
import torch.utils.data as data
|
|
|
|
import torchvision.transforms.functional as F
|
|
|
|
from PIL import Image
|
|
|
|
from tqdm import tqdm
|
|
|
|
|
2023-03-21 15:39:28 +00:00
|
|
|
import dlas.utils.util as util
|
|
|
|
from dlas.data import create_dataloader
|
|
|
|
from dlas.trainer.ExtensibleTrainer import ExtensibleTrainer
|
|
|
|
from dlas.utils import options as option
|
2020-05-27 23:09:11 +00:00
|
|
|
|
|
|
|
|
|
|
|
class FfmpegBackedVideoDataset(data.Dataset):
|
|
|
|
'''Pulls frames from a video one at a time using FFMPEG.'''
|
|
|
|
|
|
|
|
def __init__(self, opt, working_dir):
|
|
|
|
super(FfmpegBackedVideoDataset, self).__init__()
|
|
|
|
self.opt = opt
|
|
|
|
self.video = self.opt['video_file']
|
|
|
|
self.working_dir = working_dir
|
|
|
|
self.frame_rate = self.opt['frame_rate']
|
|
|
|
self.start_at = self.opt['start_at_seconds']
|
|
|
|
self.end_at = self.opt['end_at_seconds']
|
2020-10-10 01:21:43 +00:00
|
|
|
self.force_multiple = self.opt['force_multiple']
|
2020-05-27 23:09:11 +00:00
|
|
|
self.frame_count = (self.end_at - self.start_at) * self.frame_rate
|
|
|
|
# The number of (original) video frames that will be stored on the filesystem at a time.
|
|
|
|
self.max_working_files = 20
|
|
|
|
|
|
|
|
self.data_type = self.opt['data_type']
|
2023-03-21 15:39:28 +00:00
|
|
|
self.vertical_splits = self.opt['vertical_splits'] if 'vertical_splits' in opt.keys(
|
|
|
|
) else 1
|
2020-05-27 23:09:11 +00:00
|
|
|
|
|
|
|
def get_time_for_it(self, it):
|
|
|
|
secs = it / self.frame_rate + self.start_at
|
|
|
|
mins = int(secs / 60)
|
2020-05-29 18:47:22 +00:00
|
|
|
hours = int(mins / 60)
|
2020-09-11 19:10:14 +00:00
|
|
|
secs = secs - (mins * 60) - (hours * 3600)
|
2020-05-29 18:47:22 +00:00
|
|
|
mins = mins % 60
|
|
|
|
return '%02d:%02d:%06.3f' % (hours, mins, secs)
|
2020-05-27 23:09:11 +00:00
|
|
|
|
|
|
|
def __getitem__(self, index):
|
|
|
|
if self.vertical_splits > 0:
|
|
|
|
actual_index = int(index / self.vertical_splits)
|
|
|
|
else:
|
|
|
|
actual_index = index
|
|
|
|
|
|
|
|
# Extract the frame. Command template: `ffmpeg -ss 17:00.0323 -i <video file>.mp4 -vframes 1 destination.png`
|
2023-03-21 15:39:28 +00:00
|
|
|
working_file_name = osp.join(self.working_dir, "working_%d.png" % (
|
|
|
|
actual_index % self.max_working_files,))
|
2020-05-27 23:09:11 +00:00
|
|
|
vid_time = self.get_time_for_it(actual_index)
|
2023-03-21 15:39:28 +00:00
|
|
|
ffmpeg_args = ['ffmpeg', '-y', '-ss', vid_time, '-i',
|
|
|
|
self.video, '-vframes', '1', working_file_name]
|
|
|
|
process = subprocess.Popen(
|
|
|
|
ffmpeg_args, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
|
2020-05-27 23:09:11 +00:00
|
|
|
process.wait()
|
|
|
|
|
|
|
|
# get LQ image
|
|
|
|
LQ_path = working_file_name
|
|
|
|
img_LQ = Image.open(LQ_path)
|
|
|
|
split_index = (index % self.vertical_splits)
|
|
|
|
if self.vertical_splits > 0:
|
|
|
|
w, h = img_LQ.size
|
|
|
|
w_per_split = int(w / self.vertical_splits)
|
|
|
|
left = w_per_split * split_index
|
|
|
|
img_LQ = F.crop(img_LQ, 0, left, h, w_per_split)
|
|
|
|
img_LQ = F.to_tensor(img_LQ)
|
|
|
|
|
2020-09-08 14:03:41 +00:00
|
|
|
mask = torch.ones(1, img_LQ.shape[1], img_LQ.shape[2])
|
|
|
|
ref = torch.cat([img_LQ, mask], dim=0)
|
2020-10-10 01:21:43 +00:00
|
|
|
|
|
|
|
if self.force_multiple > 1:
|
2023-03-21 15:39:28 +00:00
|
|
|
# This is not compatible with vertical splits for now.
|
|
|
|
assert self.vertical_splits <= 1
|
2020-11-05 01:07:48 +00:00
|
|
|
c, h, w = img_LQ.shape
|
|
|
|
h_, w_ = h, w
|
2020-10-10 01:21:43 +00:00
|
|
|
height_removed = h % self.force_multiple
|
|
|
|
width_removed = w % self.force_multiple
|
|
|
|
if height_removed != 0:
|
2020-11-05 01:07:48 +00:00
|
|
|
h_ = self.force_multiple * ((h // self.force_multiple) + 1)
|
2020-10-10 01:21:43 +00:00
|
|
|
if width_removed != 0:
|
2020-11-05 01:07:48 +00:00
|
|
|
w_ = self.force_multiple * ((w // self.force_multiple) + 1)
|
2023-03-21 15:39:28 +00:00
|
|
|
lq_template = torch.zeros(c, h_, w_)
|
|
|
|
lq_template[:, :h, :w] = img_LQ
|
|
|
|
ref_template = torch.zeros(c, h_, w_)
|
|
|
|
ref_template[:, :h, :w] = img_LQ
|
2020-11-05 01:07:48 +00:00
|
|
|
img_LQ = lq_template
|
|
|
|
ref = ref_template
|
|
|
|
|
2020-12-05 03:14:53 +00:00
|
|
|
return {'lq': img_LQ, 'lq_fullsize_ref': ref,
|
2023-03-21 15:39:28 +00:00
|
|
|
'lq_center': torch.tensor([img_LQ.shape[1] // 2, img_LQ.shape[2] // 2], dtype=torch.long)}
|
2020-05-27 23:09:11 +00:00
|
|
|
|
|
|
|
def __len__(self):
|
|
|
|
return self.frame_count * self.vertical_splits
|
|
|
|
|
2023-03-21 15:39:28 +00:00
|
|
|
|
2020-05-28 05:09:46 +00:00
|
|
|
def merge_images(files, output_path):
|
|
|
|
"""Merges several image files together across the vertical axis
|
|
|
|
"""
|
|
|
|
images = [Image.open(f) for f in files]
|
|
|
|
w, h = images[0].size
|
|
|
|
|
|
|
|
result_width = w * len(images)
|
|
|
|
result_height = h
|
|
|
|
|
|
|
|
result = Image.new('RGB', (result_width, result_height))
|
|
|
|
for i in range(len(images)):
|
|
|
|
result.paste(im=images[i], box=(i * w, 0))
|
|
|
|
result.save(output_path)
|
2020-05-27 23:09:11 +00:00
|
|
|
|
2023-03-21 15:39:28 +00:00
|
|
|
|
2020-05-27 23:09:11 +00:00
|
|
|
if __name__ == "__main__":
|
2023-03-21 15:39:28 +00:00
|
|
|
# options
|
2020-05-27 23:09:11 +00:00
|
|
|
torch.backends.cudnn.benchmark = True
|
|
|
|
want_just_images = True
|
|
|
|
parser = argparse.ArgumentParser()
|
2023-03-21 15:39:28 +00:00
|
|
|
parser.add_argument('-opt', type=str, help='Path to options YAML file.',
|
|
|
|
default='../options/use_video_upsample.yml')
|
2020-05-27 23:09:11 +00:00
|
|
|
opt = option.parse(parser.parse_args().opt, is_train=False)
|
|
|
|
opt = option.dict_to_nonedict(opt)
|
|
|
|
|
|
|
|
util.mkdirs(
|
|
|
|
(path for key, path in opt['path'].items()
|
|
|
|
if not key == 'experiments_root' and 'pretrain_model' not in key and 'resume' not in key))
|
|
|
|
util.setup_logger('base', opt['path']['log'], 'test_' + opt['name'], level=logging.INFO,
|
|
|
|
screen=True, tofile=True)
|
|
|
|
logger = logging.getLogger('base')
|
|
|
|
logger.info(option.dict2str(opt))
|
2020-10-07 02:38:38 +00:00
|
|
|
util.loaded_options = opt
|
2020-05-27 23:09:11 +00:00
|
|
|
|
2023-03-21 15:39:28 +00:00
|
|
|
# Create test dataset and dataloader
|
2020-05-27 23:09:11 +00:00
|
|
|
test_loaders = []
|
|
|
|
|
2023-03-21 15:39:28 +00:00
|
|
|
test_set = FfmpegBackedVideoDataset(
|
|
|
|
opt['dataset'], opt['path']['results_root'])
|
2020-05-27 23:09:11 +00:00
|
|
|
test_loader = create_dataloader(test_set, opt['dataset'])
|
2023-03-21 15:39:28 +00:00
|
|
|
logger.info('Number of test images in [{:s}]: {:d}'.format(
|
|
|
|
opt['dataset']['name'], len(test_set)))
|
2020-05-27 23:09:11 +00:00
|
|
|
test_loaders.append(test_loader)
|
|
|
|
|
2020-10-15 16:13:17 +00:00
|
|
|
model = ExtensibleTrainer(opt)
|
2020-05-27 23:09:11 +00:00
|
|
|
test_set_name = test_loader.dataset.opt['name']
|
|
|
|
logger.info('\nTesting [{:s}]...'.format(test_set_name))
|
|
|
|
test_start_time = time.time()
|
|
|
|
dataset_dir = osp.join(opt['path']['results_root'], test_set_name)
|
|
|
|
util.mkdir(dataset_dir)
|
|
|
|
|
|
|
|
frame_counter = 0
|
|
|
|
frames_per_vid = opt['frames_per_mini_vid']
|
2020-05-28 02:04:45 +00:00
|
|
|
minivid_crf = opt['minivid_crf']
|
2023-03-21 15:39:28 +00:00
|
|
|
vid_output = opt['mini_vid_output_folder'] if 'mini_vid_output_folder' in opt.keys(
|
|
|
|
) else dataset_dir
|
|
|
|
vid_counter = opt['minivid_start_no'] if 'minivid_start_no' in opt.keys(
|
|
|
|
) else 0
|
2020-08-20 17:57:34 +00:00
|
|
|
img_index = opt['generator_img_index']
|
2020-10-10 01:21:43 +00:00
|
|
|
recurrent_mode = opt['recurrent_mode']
|
2020-10-14 02:47:05 +00:00
|
|
|
if recurrent_mode:
|
2023-03-21 15:39:28 +00:00
|
|
|
# Can only do 1 frame at a time in recurrent mode, by definition.
|
|
|
|
assert opt['dataset']['batch_size'] == 1
|
2020-10-14 02:47:05 +00:00
|
|
|
scale = opt['scale']
|
2020-10-10 01:21:43 +00:00
|
|
|
first_frame = True
|
2020-05-28 05:09:46 +00:00
|
|
|
ffmpeg_proc = None
|
2020-05-27 23:09:11 +00:00
|
|
|
|
|
|
|
tq = tqdm(test_loader)
|
|
|
|
for data in tq:
|
|
|
|
need_GT = False if test_loader.dataset.opt['dataroot_GT'] is None else True
|
2020-10-10 01:21:43 +00:00
|
|
|
|
|
|
|
if recurrent_mode and first_frame:
|
2020-12-05 03:14:53 +00:00
|
|
|
b, c, h, w = data['lq'].shape
|
2023-03-21 15:39:28 +00:00
|
|
|
recurrent_entry = torch.zeros(
|
|
|
|
(b, c, h*scale, w*scale), device=data['lq'].device)
|
2020-10-24 17:57:39 +00:00
|
|
|
# Optionally swap out the 'generator' for the first frame to create a better image that the recurrent generator works off of.
|
|
|
|
if 'recurrent_hr_generator' in opt.keys():
|
|
|
|
recurrent_gen = model.env['generators']['generator']
|
|
|
|
model.env['generators']['generator'] = model.env['generators'][opt['recurrent_hr_generator']]
|
|
|
|
|
2020-10-10 01:21:43 +00:00
|
|
|
first_frame = False
|
|
|
|
if recurrent_mode:
|
|
|
|
data['recurrent'] = recurrent_entry
|
|
|
|
|
2021-01-10 03:53:46 +00:00
|
|
|
model.feed_data(data, 0, need_GT=need_GT)
|
2020-05-27 23:09:11 +00:00
|
|
|
model.test()
|
2020-10-21 17:08:12 +00:00
|
|
|
visuals = model.get_current_visuals()['rlt']
|
2020-05-27 23:09:11 +00:00
|
|
|
|
2020-10-10 01:21:43 +00:00
|
|
|
if recurrent_mode:
|
2020-10-14 02:47:05 +00:00
|
|
|
recurrent_entry = visuals
|
2020-10-10 01:21:43 +00:00
|
|
|
visuals = visuals.cpu().float()
|
2020-05-27 23:09:11 +00:00
|
|
|
for i in range(visuals.shape[0]):
|
|
|
|
sr_img = util.tensor2img(visuals[i]) # uint8
|
|
|
|
|
|
|
|
# save images
|
2023-03-21 15:39:28 +00:00
|
|
|
save_img_path = osp.join(
|
|
|
|
dataset_dir, '%08d.png' % (frame_counter,))
|
2020-05-27 23:09:11 +00:00
|
|
|
util.save_img(sr_img, save_img_path)
|
|
|
|
frame_counter += 1
|
|
|
|
|
|
|
|
if frame_counter % frames_per_vid == 0:
|
2020-05-28 05:09:46 +00:00
|
|
|
if ffmpeg_proc is not None:
|
|
|
|
print("Waiting for last encode..")
|
|
|
|
ffmpeg_proc.wait()
|
2020-05-27 23:09:11 +00:00
|
|
|
print("Encoding minivid %d.." % (vid_counter,))
|
|
|
|
# Perform stitching.
|
2023-03-21 15:39:28 +00:00
|
|
|
num_splits = opt['dataset']['vertical_splits'] if 'vertical_splits' in opt['dataset'].keys(
|
|
|
|
) else 1
|
2020-05-27 23:09:11 +00:00
|
|
|
if num_splits > 1:
|
|
|
|
procs = []
|
|
|
|
src_imgs_path = osp.join(dataset_dir, "joined")
|
|
|
|
os.makedirs(src_imgs_path, exist_ok=True)
|
|
|
|
for i in range(int(frames_per_vid / num_splits)):
|
2023-03-21 15:39:28 +00:00
|
|
|
to_join = [osp.join(dataset_dir, "%08d.png" % (j,)) for j in range(
|
|
|
|
i * num_splits, i * num_splits + num_splits)]
|
|
|
|
merge_images(to_join, osp.join(
|
|
|
|
src_imgs_path, "%08d.png" % (i,)))
|
2020-05-27 23:09:11 +00:00
|
|
|
else:
|
|
|
|
src_imgs_path = dataset_dir
|
|
|
|
|
|
|
|
# Encoding command line:
|
2020-05-28 02:04:45 +00:00
|
|
|
# ffmpeg -framerate 30 -i %08d.png -c:v libx265 -crf 12 -preset slow -pix_fmt yuv444p test.mkv
|
|
|
|
cmd = ['ffmpeg', '-y', '-framerate', str(opt['dataset']['frame_rate']), '-f', 'image2', '-i', osp.join(src_imgs_path, "%08d.png"),
|
|
|
|
'-c:v', 'libx265', '-crf', str(minivid_crf), '-preset', 'slow', '-pix_fmt', 'yuv444p', osp.join(vid_output, "mini_%06d.mkv" % (vid_counter,))]
|
2020-05-30 02:44:50 +00:00
|
|
|
print(ffmpeg_proc)
|
2023-03-21 15:39:28 +00:00
|
|
|
# , stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
|
|
|
|
ffmpeg_proc = subprocess.Popen(cmd)
|
2020-05-27 23:09:11 +00:00
|
|
|
vid_counter += 1
|
|
|
|
frame_counter = 0
|
|
|
|
print("Done.")
|
|
|
|
|
|
|
|
if want_just_images:
|
2023-03-21 15:39:28 +00:00
|
|
|
continue
|