Esempio n. 1
0
    def __init__(self, opt):
        # self.opt = opt

        self.display_id = opt.display_id
        self.use_html = opt.isTrain and not opt.no_html
        self.win_size = opt.display_winsize
        self.name = opt.expr_name
        self.opt = opt
        self.saved = False
        self.writer = SummaryWriter(opt.expr_dir)

        if self.display_id > 0:
            self.vis = visdom.Visdom(port=opt.display_port)
        logging.config.fileConfig(
            os.path.join(opt.configure_path, 'logging.conf'))
        self.logger = logging.getLogger()
        if self.use_html:
            self.web_dir = os.path.join(opt.expr_dir, 'web')
            self.img_dir = os.path.join(self.web_dir, 'images')
            # print('=> creating web directory:\n\t{}'.format(os.path.abspath(self.web_dir)))
            util.make_dirs([self.web_dir, self.img_dir])
        self.log_name = os.path.join(opt.expr_dir, 'loss_log.txt')
        self.logger.addHandler(
            logging.FileHandler(self.log_name,
                                mode='a',
                                encoding=None,
                                delay=False))
        now = time.strftime("%c")
        self.logger.info(
            '================ Training Loss (%s) ================' % now)
Esempio n. 2
0
def cluster_by_masked_area(root_dir, args):
    logger.info(f'Clustering {root_dir}')
    clustered_dir = root_dir + '_clustered'
    make_dirs(clustered_dir)
    radius = 5

    # all masks with ratio in x +- radius will be stored in sub-directory x
    clustered_centors = np.arange(radius, 100, radius * 2)
    clustered_subdirs = []
    for c in clustered_centors:
        # make sub-directories for each ratio range
        clustered_subdirs.append(make_dir_under_root(clustered_dir, str(c)))

    for i, filename in enumerate(get_everything_under(root_dir)):
        if i % 100 == 0:
            logger.info(f'clustering {filename}')
        if args.image_masks:
            ratio = get_masked_ratio(Image.open(filename))
        else:
            # filename is a diretory containing multiple mask files
            ratio = np.mean([
                get_masked_ratio(Image.open(f))
                for f in get_everything_under(filename, pattern='*.png')
            ])

        # find the nearest centor
        for i, c in enumerate(clustered_centors):
            if c - radius <= ratio * 100 <= c + radius:
                shutil.move(filename, clustered_subdirs[i])
                break

    shutil.rmtree(root_dir)
    os.rename(clustered_dir, root_dir)
Esempio n. 3
0
def save_video_to_frames(video_filename,
                         output_dir,
                         max_len,
                         min_h,
                         min_w,
                         prefix=''):
    video_name = prefix + video_filename.split('/')[-1].split('.')[0]
    cap = cv2.VideoCapture(video_filename)
    frame_count = 1
    video_dir = os.path.join(output_dir, video_name)
    while frame_count <= max_len:
        ret, img = cap.read()
        if not ret:
            logger.warning(
                f"{video_filename} len {frame_count} < max_len {max_len}")
            break
        h, w, c = img.shape
        if h < min_h or w < min_w:
            logger.warning(f"h {h} < min_h {min_h} or w {w} < min_w {min_w}")
            break
        make_dirs(video_dir)
        output_filename = os.path.join(video_dir, f"{frame_count:04d}.png")
        logger.debug(f"  Saving {output_filename}")
        cv2.imwrite(output_filename, img)
        frame_count += 1
Esempio n. 4
0
 def __init__(self, mask_output_dir, size, bboxeses, save_masks=True):
     self.bboxeses = bboxeses
     self.size = size
     super().__init__(mask_output_dir, read=False)
     self.files = self.generate_masks()
     if save_masks:
         make_dirs(mask_output_dir)
         self.save_files(mask_output_dir)
Esempio n. 5
0
 def save_bboxes(self):
     make_dirs(self.bboxes_list_dir)
     logger.info(f"Saving bboxes to {self.bboxes_list_dir}")
     for i, bboxes in enumerate(self.bboxes_list):
         save_path = os.path.join(self.bboxes_list_dir,
                                  f"bboxes_{i:04}.txt")
         if len(bboxes) > 0:
             np.savetxt(save_path, bboxes[0], fmt='%4u')
Esempio n. 6
0
def main(args, vid_lens):
    preset = get_stroke_preset(args.stroke_preset)
    make_dirs(args.output_dir)

    if args.redo_without_generation:
        assert (len(get_everything_under(args.output_dir)) > 0)
        # put back clustered masks
        for clustered_subdir in get_everything_under(args.output_dir):
            if not os.path.isdir(clustered_subdir):
                continue
            for f in get_everything_under(clustered_subdir):
                shutil.move(f, args.output_dir)
            os.rmdir(clustered_subdir)

    else:
        if args.image_masks:
            for i in range(args.n):
                if i % 100 == 0:
                    logger.info(f'Generating mask number {i:07d}')
                nStroke = decide_nStroke(args)
                mask = get_video_masks_by_moving_random_stroke(
                    video_len=1,
                    imageWidth=args.image_width,
                    imageHeight=args.image_height,
                    nStroke=nStroke,
                    **preset)[0]
                mask.save(os.path.join(args.output_dir, f'{i:07d}.png'))

        else:
            for i in range(args.n):
                mask_dir = make_dir_under_root(args.output_dir, f'{i:05d}')
                mask_reader = MaskReader(mask_dir, read=False)

                nStroke = decide_nStroke(args)
                masks = get_video_masks_by_moving_random_stroke(
                    imageWidth=args.image_width,
                    imageHeight=args.image_height,
                    video_len=vid_lens[i],
                    nStroke=nStroke,
                    **preset)

                mask_reader.set_files(masks)
                mask_reader.save_files(output_dir=mask_reader.dir_name)

    if args.leave_boarder_unmasked is not None:
        logger.info(
            f'Create a copy of all output and erase the copies\' s boarder '
            f'by {args.leave_boarder_unmasked} pixels')
        dir_leave_boarder = copy_masks_without_boarder(args.output_dir, args)
        if args.cluster_by_area:
            cluster_by_masked_area(dir_leave_boarder, args)

    if args.cluster_by_area:
        cluster_by_masked_area(args.output_dir, args)
Esempio n. 7
0
def main(args):
    make_dirs(args.output_root_dir)
    dirnames = read_dirnames_under_root(args.input_root_dir)
    for dirname in dirnames:
        try:
            output_path = os.path.join(args.output_root_dir, f"{dirname}.mp4")
            dirpath = os.path.join(args.input_root_dir, dirname,
                                   args.input_postfix)
            reader = FrameReader(dirpath, max_length=args.max_len)
            reader.write_files_to_video(output_path)
        except Exception as err:
            logger.error(err, exc_info=True)
Esempio n. 8
0
def init_i3d_model():
    global i3d_model
    if i3d_model is not None:
        return

    logger.info("Loading I3D model for FID score ..")
    i3d_model_weight = '../libs/model_weights/i3d_rgb_imagenet.pt'
    if not os.path.exists(i3d_model_weight):
        make_dirs(os.path.dirname(i3d_model_weight))
        urllib.request.urlretrieve('http://www.cmlab.csie.ntu.edu.tw/~zhe2325138/i3d_rgb_imagenet.pt', i3d_model_weight)
    i3d_model = InceptionI3d(400, in_channels=3, final_endpoint='Logits')
    i3d_model.load_state_dict(torch.load(i3d_model_weight))
    i3d_model.to(torch.device('cuda:0'))
Esempio n. 9
0
def main(args):
    make_dirs(args.output_dir)
    # root_frames_dirs: methods' output directories
    root_frames_dirs = args.root_frames_dirs
    if args.assume_ordered:
        # frames_dirnames[i][j]: target video directory's basename
        # i: method index, j: video index
        frames_dirnames_list = [
            read_dirnames_under_root(root_frames_dir)
            for root_frames_dir in root_frames_dirs
        ]
        for j in range(len(frames_dirnames_list[0])):
            # Find the target video directory of each methods
            targets = [
                os.path.join(root_frames_dirs[i], frames_dirnames_list[i][j])
                for i in range(len(frames_dirnames_list))
            ]
            # For each target video directory, transform its path if it contains args.result_postfix
            targets = [
                target if args.result_postfix not in os.listdir(target) else
                os.path.join(target, args.result_postfix) for target in targets
            ]
            reader = CompareFramesReader(targets,
                                         names=args.names,
                                         col=args.col)
            reader.write_files_to_video(
                os.path.join(args.output_dir,
                             f"{args.name_prefix}{j:04d}_compare.mp4"),
                fps=args.fps,
                frame_num_when_repeat_list=args.frame_num_when_repeat_list)

    else:
        frames_dirnames = read_dirnames_under_root(
            root_frames_dirs[0])[:args.test_num]
        for name in frames_dirnames:
            reader = CompareFramesReader(
                [
                    os.path.join(x, name) if args.result_postfix
                    not in os.listdir(os.path.join(x, name)) else os.path.join(
                        x, name, args.result_postfix) for x in root_frames_dirs
                ],
                names=args.names,
                col=args.col
                # mask_dir=os.path.join(args.root_mask_dir, name)
            )
            reader.write_files_to_video(
                os.path.join(args.output_dir,
                             f"{args.name_prefix}{name}_compare.mp4"),
                fps=args.fps,
                frame_num_when_repeat_list=args.frame_num_when_repeat_list)
def init_i3d_model():
    global i3d_model
    if i3d_model is not None:
        return

    logger.info("Loading I3D model for FID score ..")
    i3d_model_weight = '../libs/model_weights/i3d_rgb_imagenet.pt'
    if not os.path.exists(i3d_model_weight):
        make_dirs(os.path.dirname(i3d_model_weight))
        urllib.request.urlretrieve(
            'https://github.com/piergiaj/pytorch-i3d/'
            'raw/master/models/rgb_imagenet.pt', i3d_model_weight)
    i3d_model = InceptionI3d(400, in_channels=3, final_endpoint='Logits')
    i3d_model.load_state_dict(torch.load(i3d_model_weight))
    i3d_model.to(torch.device('cuda:0'))
Esempio n. 11
0
    def train(self):
        """
        Train the model
        """
        for epoch in range(self.start_epoch, self.epochs):
            torch.manual_seed(self.config.seed)
            self._train_epoch(epoch)

            self.logger.info(f"{'!' * 10}    VALIDATION   , {'!' * 10}")
            validation_loss = self._valid_epoch(epoch, 'validation',
                                                self.valid_data_loader)
            make_dirs(self.checkpoint_dir)

            self.checkpointer(epoch, validation_loss)
            self.lr_scheduler.step(validation_loss)
Esempio n. 12
0
    def parse(self, configure=None):
        if not self.initialized:
            self.initialize()

        if configure is not None:
            for k, v in configure.items():
                self.opt.__setattr__(k, v)
        os.environ['CUDA_VISIBLE_DEVICES'] = self.opt.gpu_ids
        args = vars(self.opt)
        print('------------ Options -------------')
        for k, v in sorted(args.items()):
            print('%s: %s' % (str(k), str(v)))
        print('-------------- End ----------------')
        # save to the disk
        time_str = time.strftime("#%m-%d-%H", time.localtime(time.time()))
        expr_name = '-'.join([
            self.opt.model, self.opt.dataset,
            str(self.opt.lr), self.opt.optimizer, self.opt.suffix, time_str
        ])
        expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.task_name,
                                expr_name)
        self.opt.__setattr__('expr_name', expr_name)
        self.opt.__setattr__('expr_dir', expr_dir)
        print("-> set tensorboard by: \n",
              "tensorboard --logdir {}".format(os.path.abspath(expr_dir)))
        os.system("kill -9 $(ps axu | grep " + os.path.abspath(expr_dir) +
                  " | awk '{print $2}')")
        os.system("tensorboard --logdir {} &".format(
            os.path.abspath(expr_dir)))
        self.opt.__setattr__('data_gen', self.opt.checkpoints_dir)
        # self.opt.__setattr__('resume', expr_dir)
        util.make_dirs(expr_dir)
        file_name = os.path.join(expr_dir, 'opt.txt')
        with open(file_name, 'wt') as opt_file:
            opt_file.write('------------ Options -------------\n')
            for k, v in sorted(args.items()):
                opt_file.write('%s: %s\n' % (str(k), str(v)))
            opt_file.write('-------------- End ----------------\n')
        return self.opt
Esempio n. 13
0
def main(config, resume, output_root_dir=None, pretrained_path=None):
    train_logger = Logger()

    # setup data_loader instances
    inference_only = config.get('inference_only', False)
    if inference_only:
        data_loader = None
    else:
        data_loader = get_instance(module_data, 'data_loader', config)

    if data_loader is not None and data_loader.valid_sampler:
        assert 'valid_data_loader' not in config.keys(), (
            'valid set can only be eigther split '
            'from train set or specified in config.')
        valid_data_loader = data_loader.split_validation()
    elif 'valid_data_loader' in config.keys():
        valid_data_loader = get_instance(module_data, 'valid_data_loader',
                                         config)
    else:
        valid_data_loader = None

    if 'test_data_loader' in config.keys():
        if isinstance(config['test_data_loader'], list):
            test_data_loader = [
                getattr(module_data, entry['type'])(**entry['args'])
                for entry in config['test_data_loader']
            ]
        else:
            test_data_loader = get_instance(module_data, 'test_data_loader',
                                            config)
    else:
        test_data_loader = None

    # setup argument of losses
    losses = {
        entry['nickname']: (entry['weight'])
        for entry in config['losses']
    }

    # build model architecture
    model = get_instance(module_arch, 'arch', config, losses=losses)
    model.summary()

    metrics = [getattr(module_metric, met) for met in config['metrics']]

    # build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler
    # trainable_params = filter(lambda p: p.requires_grad, model.parameters())
    g_params = []
    d_s_params = []
    d_t_params = []
    for name, param in model.named_parameters():
        if not param.requires_grad:
            continue
        if 'temporal_discriminator' in name:
            d_t_params.append(param)
        elif 'spatial_discriminator' in name:
            d_s_params.append(param)
        else:
            g_params.append(param)

    optimizer_g = get_instance(torch.optim, 'optimizer', config, g_params)
    if hasattr(model, 'spatial_discriminator'):
        optimizer_d_s = get_instance(torch.optim, 'optimizer', config,
                                     d_s_params)
    else:
        optimizer_d_s = None
    if hasattr(model, 'temporal_discriminator'):
        optimizer_d_t = get_instance(torch.optim, 'optimizer', config,
                                     d_t_params)
    else:
        optimizer_d_t = None
    lr_scheduler = get_instance(torch.optim.lr_scheduler, 'lr_scheduler',
                                config, optimizer_g)

    trainer = Trainer(model,
                      losses,
                      metrics,
                      optimizer_g,
                      optimizer_d_s,
                      optimizer_d_t,
                      resume=resume,
                      config=config,
                      data_loader=data_loader,
                      valid_data_loader=valid_data_loader,
                      lr_scheduler=lr_scheduler,
                      train_logger=train_logger,
                      test_data_loader=test_data_loader,
                      pretrained_path=pretrained_path)

    if output_root_dir is not None:
        make_dirs(output_root_dir)
        trainer.printlog = True
        trainer.evaluate_test_set(output_root_dir=output_root_dir, epoch=0)
    else:
        trainer.train()
Esempio n. 14
0
 def save_files(self, output_dir=None):
     make_dirs(output_dir)
     logger.info(f"Saving {self.__class__.__name__} files to {output_dir}")
     for i, file_ in enumerate(self.files):
         self._save_file(output_dir, i, file_)