import argparse import logging import os import os.path as osp import subprocess import time import torch import torch.utils.data as data import torchvision.transforms.functional as F from PIL import Image from tqdm import tqdm from trainer.ExtensibleTrainer import ExtensibleTrainer from utils import options as option import utils.util as util from data import create_dataloader class FfmpegBackedVideoDataset(data.Dataset): '''Pulls frames from a video one at a time using FFMPEG.''' def __init__(self, opt, working_dir): super(FfmpegBackedVideoDataset, self).__init__() self.opt = opt self.video = self.opt['video_file'] self.working_dir = working_dir self.frame_rate = self.opt['frame_rate'] self.start_at = self.opt['start_at_seconds'] self.end_at = self.opt['end_at_seconds'] self.force_multiple = self.opt['force_multiple'] self.frame_count = (self.end_at - self.start_at) * self.frame_rate # The number of (original) video frames that will be stored on the filesystem at a time. self.max_working_files = 20 self.data_type = self.opt['data_type'] self.vertical_splits = self.opt['vertical_splits'] if 'vertical_splits' in opt.keys() else 1 def get_time_for_it(self, it): secs = it / self.frame_rate + self.start_at mins = int(secs / 60) hours = int(mins / 60) secs = secs - (mins * 60) - (hours * 3600) mins = mins % 60 return '%02d:%02d:%06.3f' % (hours, mins, secs) def __getitem__(self, index): if self.vertical_splits > 0: actual_index = int(index / self.vertical_splits) else: actual_index = index # Extract the frame. Command template: `ffmpeg -ss 17:00.0323 -i