def test(model_cfg, dataset_cfg, checkpoint, batch_size=64, gpus=1, workers=4):
    dataset = call_obj(**dataset_cfg)
    data_loader = torch.utils.data.DataLoader(dataset=dataset,
                                              batch_size=batch_size,
                                              shuffle=False,
                                              num_workers=workers)

    # put model on gpus
    if isinstance(model_cfg, list):
        model = [call_obj(**c) for c in model_cfg]
        model = torch.nn.Sequential(*model)
    else:
        model = call_obj(**model_cfg)
    load_checkpoint(model, checkpoint, map_location='cpu')
    model = MMDataParallel(model, device_ids=range(gpus)).cuda()
    model.eval()

    results = []
    labels = []
    prog_bar = ProgressBar(len(dataset))
    for data, label in data_loader:
        with torch.no_grad():
            output = model(data).data.cpu().numpy()
        results.append(output)
        labels.append(label)
        for i in range(len(data)):
            prog_bar.update()
    results = np.concatenate(results)
    labels = np.concatenate(labels)

    print('Top 1: {:.2f}%'.format(100 * topk_accuracy(results, labels, 1)))
    print('Top 5: {:.2f}%'.format(100 * topk_accuracy(results, labels, 5)))
Esempio n. 2
0
    def evaluate_full_dataset(
            self, data_loader: torch.utils.data.DataLoader) -> Dict[str, Any]:
        if self.data_config["backend"] == "fake":
            return {"bbox_mAP": 0}

        # Will need custom reducer to do this across gpus
        prog_bar = ProgressBar(len(data_loader.dataset))

        results = []
        for i, batch in enumerate(data_loader):
            # TODO: modify this to use cpu_only field of DataContainers.
            batch["img"] = [self.context.to_device(batch["img"])]
            batch = {key: batch[key][0] for key in batch}
            with torch.no_grad():
                result = self.model(return_loss=False, rescale=True, **batch)
            if isinstance(result[0], tuple):
                result = [(bbox_results, encode_mask_results(mask_results))
                          for bbox_results, mask_results in result]
            batch_size = len(result)
            results.extend(result)

            for _ in range(batch_size):
                prog_bar.update()

        eval_kwargs = self.cfg.evaluation

        for key in ["interval", "tmpdir", "start", "gpu_collect"]:
            eval_kwargs.pop(key, None)

        metrics = data_loader.dataset.evaluate(results, **eval_kwargs)
        return metrics
Esempio n. 3
0
def test_initial_stage(args):
    torch.manual_seed(777)
    torch.cuda.manual_seed(777)

    args.INITIAL_HOLE = True
    args.get_mask = True

    eval_dataset = FlowInitial.FlowSeq(args, isTest=True)
    eval_dataloader = DataLoader(eval_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 drop_last=False,
                                 num_workers=args.n_threads)

    if args.ResNet101:
        dfc_resnet101 = resnet_models.Flow_Branch(33, 2)
        dfc_resnet = nn.DataParallel(dfc_resnet101).cuda()
    else:
        dfc_resnet50 = resnet_models.Flow_Branch_Multi(input_chanels=33,
                                                       NoLabels=2)
        dfc_resnet = nn.DataParallel(dfc_resnet50).cuda()

    dfc_resnet.eval()
    resume_iter = load_ckpt(args.PRETRAINED_MODEL, [('model', dfc_resnet)],
                            strict=True)
    print('Load Pretrained Model from', args.PRETRAINED_MODEL)

    task_bar = ProgressBar(eval_dataset.__len__())
    for i, item in enumerate(eval_dataloader):
        with torch.no_grad():
            input_x = item[0].cuda()
            flow_masked = item[1].cuda()
            mask = item[3].cuda()
            output_dir = item[4][0]

            res_flow = dfc_resnet(input_x)
            res_complete = res_flow * mask[:, 10:
                                           11, :, :] + flow_masked[:, 10:12, :, :] * (
                                               1. - mask[:, 10:11, :, :])

            output_dir_split = output_dir.split(',')
            output_file = os.path.join(args.output_root, output_dir_split[0])
            output_basedir = os.path.dirname(output_file)
            if not os.path.exists(output_basedir):
                os.makedirs(output_basedir)
            res_save = res_complete[0].permute(
                1, 2, 0).contiguous().cpu().data.numpy()
            cvb.write_flow(res_save, output_file)
            task_bar.update()
    sys.stdout.write('\n')
    dfc_resnet = None
    torch.cuda.empty_cache()
    print('Initial Results Saved in', args.output_root)
Esempio n. 4
0
def test(model_cfg, dataset_cfg, checkpoint, batch_size=64, gpus=1, workers=2):
    dataset = call_obj(**dataset_cfg)
    data_loader = torch.utils.data.DataLoader(dataset=dataset,
                                              batch_size=batch_size,
                                              shuffle=False,
                                              num_workers=workers)

    # put model on gpus
    if isinstance(model_cfg, list):
        model = [call_obj(**c) for c in model_cfg]
        model = torch.nn.Sequential(*model)
    else:
        model = call_obj(**model_cfg)
    load_checkpoint(model, checkpoint, map_location='cpu')
    model = MMDataParallel(model, device_ids=range(gpus)).cuda()
    #model = MMDataParallel(model)
    model.eval()

    results = []
    labels = []
    prog_bar = ProgressBar(len(dataset))
    total_time = 0
    for data, label in data_loader:
        with torch.no_grad():
            start = time.time()
            output = model(data).data.cpu().numpy()

            if torch.cuda.is_available():
                torch.cuda.synchronize()

            t = time.time() - start
            total_time += t

        results.append(output)
        labels.append(label)
        for i in range(len(data)):
            prog_bar.update()
    results = np.concatenate(results)
    labels = np.concatenate(labels)

    #macs, params = get_model_complexity_info(model.cuda(), (3, 300, 18, 2), as_strings=True,
    #                                              print_per_layer_stat=True, verbose=True)
    #print('{:<30}  {:<8}'.format('Computational complexity: ', macs))
    #print('{:<30}  {:<8}'.format('Number of parameters: ', params))

    print("Average infer time: ", total_time / len(data_loader))
    print("Total infer time: ", total_time)
    print('Top 1: {:.2f}%'.format(100 * topk_accuracy(results, labels, 1)))
    print('Top 5: {:.2f}%'.format(100 * topk_accuracy(results, labels, 5)))
Esempio n. 5
0
def test(model_cfg, dataset_cfg, checkpoint, batch_size=64, gpus=1, workers=4):
    #cnt = 0
    #confusion
    conf_matrix = torch.zeros(model_cfg.num_class, model_cfg.num_class)
    #confusion
    set_determined_seed(seed)
    torch.multiprocessing.set_sharing_strategy('file_system')
    dataset = call_obj(**dataset_cfg)
    data_loader = torch.utils.data.DataLoader(dataset=dataset,
                                              batch_size=batch_size,
                                              shuffle=False,
                                              num_workers=workers)

    # put model on gpus
    if isinstance(model_cfg, list):
        model = [call_obj(**c) for c in model_cfg]
        model = torch.nn.Sequential(*model)
    else:
        model = call_obj(**model_cfg)
    load_checkpoint(model, checkpoint, map_location='cpu')
    model = MMDataParallel(model, device_ids=get_gpus(gpus)).cuda()
    model.eval()

    results = []
    labels = []
    prog_bar = ProgressBar(len(dataset))
    for data, label in data_loader:
        with torch.no_grad():
            # cnt += 1
            # print("\n"+str(cnt))
            # torch.cuda.empty_cache()
            output = model(data).data.cpu().numpy()
        results.append(output)
        labels.append(label)
        for i in range(len(data)):
            prog_bar.update()
    results = np.concatenate(results)
    labels = np.concatenate(labels)

    #confusion
    conf_matrix = confusion_matrix(
        torch.max(torch.from_numpy(results), 1)[1], labels, conf_matrix)
    np.save('/home/computer/WBH/GCN/INTERGCN/conf.npy', conf_matrix)
    #confusion

    print('Top 1: {:.2f}%'.format(100 * topk_accuracy(results, labels, 1)))
    print('Top 5: {:.2f}%'.format(100 * topk_accuracy(results, labels, 5)))
Esempio n. 6
0
def detect(inputs,
           results,
           model_cfg,
           dataset_cfg,
           checkpoint,
           video_dir,
           batch_size=64,
           gpus=1,
           workers=4):
    print('detect start')
    # put model on gpus
    if isinstance(model_cfg, list):
        model = [call_obj(**c) for c in model_cfg]
        model = torch.nn.Sequential(*model)
    else:
        model = call_obj(**model_cfg)
    load_checkpoint(model, checkpoint, map_location='cpu')
    model = MMDataParallel(model, device_ids=range(gpus)).cuda()
    model.eval()

    results = []
    labels = []
    video_file_list = os.listdir(video_dir)
    prog_bar = ProgressBar(len(video_file_list))
    for video_file in video_file_list:
        data = inputs.get()
        data_loader = data_parse(data, dataset_cfg.pipeline,
                                 dataset_cfg.data_source.num_track)
        data, label = data_loader
        with torch.no_grad():
            data = torch.from_numpy(data)
            # 增加一维,表示batch_size
            data = data.unsqueeze(0)
            data = data.float().to("cuda:0").detach()
            output = model(data).data.cpu().numpy()
        results.append(output)
        labels.append(torch.tensor([label]))
        for i in range(len(data)):
            prog_bar.update()
    print('--------', results, labels, '--------------')
    results = np.concatenate(results)
    labels = np.concatenate(labels)

    print('Top 1: {:.2f}%'.format(100 * topk_accuracy(results, labels, 1)))
    print('Top 5: {:.2f}%'.format(100 * topk_accuracy(results, labels, 5)))
Esempio n. 7
0
    def inference(self, dataloader):
        self.model.eval()
        inf_res, ith = [], 0
        logging.info('Start to inference {} images...'.format(len(dataloader)))
        logging.info('Test config:')
        logging.info(str(self.test_cfg))
        prog_bar = ProgressBar(len(dataloader))
        with torch.no_grad():
            for test_data in dataloader:
                img_metas = test_data['img_meta'].data[0]
                img_data = test_data['img'].data[0].to(self.device)

                bboxes, scores, categories = self.inference_one(
                    img_data, img_metas)
                for i in range(len(img_metas)):
                    bbox, score, category, img_meta = bboxes[i], scores[
                        i], categories[i], img_metas[i]
                    scale = img_meta['scale_factor']
                    filename = img_meta['filename']
                    filename = osp.basename(filename)
                    iid = int(filename[:-4])
                    img_w, img_h = img_meta['ori_shape'][:2]
                    img_res = {
                        'width': img_w,
                        'height': img_h,
                        'image_id': iid,
                        'file_name': filename
                    }
                    if len(bbox) == 0:
                        logging.warning(
                            '0 predictions for image {}'.format(iid))
                        continue
                    img_res['bbox'] = utils.xyxy2xywh(bbox).t() / scale
                    img_res['score'] = score
                    img_res['category'] = category
                    logging.info(
                        '{} bbox predictions for {}-th image with image id: {}'
                        .format(bbox.shape[1], ith, iid))
                    inf_res.append(img_res)
                ith += 1
                prog_bar.update()
        return inf_res
Esempio n. 8
0
def _test_load_pretrained():
    """We traversed all potential config files under the `config` file. If you
    need to print details or debug code, you can use this function.

    Returns:
        check_cfg_names (list[str]): Config files that backbone initialized
        from pretrained checkpoint might be problematic. Need to recheck
        the config file. The output including the config files that the
        backbone.init_cfg is None
    """
    check_cfg_names = _traversed_config_file()
    need_check_cfg = []

    prog_bar = ProgressBar(len(check_cfg_names))
    for config in check_cfg_names:
        init_cfg_name = _check_backbone(config)
        if init_cfg_name is not None:
            need_check_cfg.append(init_cfg_name)
        prog_bar.update()
    print('These config files need to be checked again')
    print(need_check_cfg)
Esempio n. 9
0
def main():
    args = parse_args()
    wind_w, wind_h = args.window_size.split('*')
    wind_w, wind_h = int(wind_w), int(wind_h)
    cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options,
                            args.phase)

    dataset, pipeline = build_dataset_pipeline(cfg, args.phase)
    CLASSES = dataset.CLASSES
    display_number = min(args.number, len(dataset))
    progressBar = ProgressBar(display_number)

    with vis.ImshowInfosContextManager(fig_size=(wind_w, wind_h)) as manager:
        for i, item in enumerate(itertools.islice(dataset, display_number)):
            image = get_display_img(item, pipeline, args.mode, args.bgr2rgb)
            if args.adaptive:
                image = adaptive_size(args.mode, image, args.min_edge_length,
                                      args.max_edge_length)

            # dist_path is None as default, means not save pictures
            dist_path = None
            if args.output_dir:
                # some datasets do not have filename, such as cifar, use id
                src_path = item.get('filename', '{}.jpg'.format(i))
                dist_path = os.path.join(args.output_dir, Path(src_path).name)

            infos = dict(label=CLASSES[item['gt_label']])

            ret, _ = manager.put_img_infos(image,
                                           infos,
                                           font_size=20,
                                           out_file=dist_path,
                                           show=args.show,
                                           **args.show_options)

            progressBar.update()

            if ret == 1:
                print('\nMannualy interrupted.')
                break
Esempio n. 10
0
def infer(args):
    assert args.data_list is not None or args.frame_dir is not None

    if args.frame_dir is not None:
        data_list = generate_flow_list(args.frame_dir)
        args.data_list = data_list

    device = torch.device('cuda:0')

    Flownet = FlowNet2(args, requires_grad=False)
    print('====> Loading', args.pretrained_model_flownet2)
    flownet2_ckpt = torch.load(args.pretrained_model_flownet2)
    Flownet.load_state_dict(flownet2_ckpt['state_dict'])
    Flownet.to(device)
    Flownet.eval()

    dataset_ = FlowInfer(args.data_list, size=args.img_size)
    dataloader_ = DataLoader(dataset_, batch_size=1, shuffle=False)
    task_bar = ProgressBar(dataset_.__len__())

    for i, (f1, f2, output_path_) in enumerate(dataloader_):
        f1 = f1.to(device)
        f2 = f2.to(device)

        flow = Flownet(f1, f2)

        output_path = output_path_[0]

        output_file = os.path.dirname(output_path)
        if not os.path.exists(output_file):
            os.makedirs(output_file)

        flow_numpy = flow[0].permute(1, 2, 0).data.cpu().numpy()
        cvb.write_flow(flow_numpy, output_path)
        task_bar.update()
    sys.stdout.write('\n')
    print('FlowNet2 Inference has been finished~!')
    print('Extracted Flow has been save in', output_file)

    return output_file
Esempio n. 11
0
def run_train(opt, model, crit, optimizer, loader, device, logger=None, epoch=-1, return_all_info=False, **kwargs):
    model.train()
    crit.reset_loss_recorder()
    vocab = loader.dataset.get_vocab()

    pb = ProgressBar(len(loader))
    pb.start()
    for data in loader:
        optimizer.zero_grad()
        results = get_forword_results(opt, model, data, device=device, only_data=False, vocab=vocab, **kwargs)
        loss = crit.get_loss(results, epoch=epoch)
        loss.backward()

        clip_grad_value_(model.parameters(), opt['grad_clip'])
        optimizer.step()
        pb.update()

    name, loss_info = crit.get_loss_info()
    if logger is not None:
        logger.write_text('\t'.join(['%10s: %05.3f' % (item[0], item[1]) for item in zip(name, loss_info)]))

    if return_all_info:
        return loss_info
    return loss_info[0]
Esempio n. 12
0
def process_tasks(tasks, dataset, model, out_dir, batch_size,
                  input_clip_length, pipeline):
    flat_tasks = [(idx, v[0], v[1], v[2]) for idx, sub_tasks in tasks.items()
                  for v in sub_tasks]

    progress_bar = ProgressBar(len(flat_tasks))

    batch = []
    for task in flat_tasks:
        batch.append(task)
        if len(batch) == batch_size:
            process_batch(batch, dataset, model, out_dir, input_clip_length,
                          pipeline)

            for _ in range(batch_size):
                progress_bar.update()
            batch = []

    if len(batch) > 0:
        process_batch(batch, dataset, model, out_dir, input_clip_length,
                      pipeline)

        for _ in range(len(batch)):
            progress_bar.update()
Esempio n. 13
0
def build(inputs,
          detection_cfg,
          estimation_cfg,
          tracker_cfg,
          video_dir,
          gpus=1,
          video_max_length=10000,
          category_annotation=None):
    print('data build start')
    cache_checkpoint(detection_cfg.checkpoint_file)
    cache_checkpoint(estimation_cfg.checkpoint_file)

    if category_annotation is None:
        video_categories = dict()
    else:
        with open(category_annotation) as f:
            video_categories = json.load(f)['annotations']

    if tracker_cfg is not None:
        raise NotImplementedError

    pose_estimators = init_pose_estimator(detection_cfg,
                                          estimation_cfg,
                                          device=0)

    video_file_list = []
    get_all_file(video_dir, video_file_list)

    prog_bar = ProgressBar(len(video_file_list))
    for video_path in video_file_list:
        video_file = os.path.basename(video_path)
        reader = mmcv.VideoReader(video_path)
        video_frames = reader[:video_max_length]

        annotations = []
        num_keypoints = -1
        for i, image in enumerate(video_frames):
            res = inference_pose_estimator(pose_estimators, image)
            res['frame_index'] = i
            if not res['has_return']:
                continue
            num_person = len(res['joint_preds'])
            assert len(res['person_bbox']) == num_person

            for j in range(num_person):
                keypoints = [[p[0], p[1], round(s[0], 2)] for p, s in zip(
                    res['joint_preds'][j].round().astype(int).tolist(),
                    res['joint_scores'][j].tolist())]
                num_keypoints = len(keypoints)
                person_info = dict(person_bbox=res['person_bbox']
                                   [j].round().astype(int).tolist(),
                                   frame_index=res['frame_index'],
                                   id=j,
                                   person_id=None,
                                   keypoints=keypoints)
                annotations.append(person_info)
        annotations = sorted(annotations, key=lambda x: x['frame_index'])
        category_id = video_categories[video_file][
            'category_id'] if video_file in video_categories else -1
        info = dict(video_name=video_file,
                    resolution=reader.resolution,
                    num_frame=len(video_frames),
                    num_keypoints=num_keypoints,
                    keypoint_channels=['x', 'y', 'score'],
                    version='1.0')
        video_info = dict(info=info,
                          category_id=category_id,
                          annotations=annotations)
        inputs.put(video_info)
        prog_bar.update()
Esempio n. 14
0
def infer(args):
    assert args.data_list is not None or args.frame_dir is not None

    if args.frame_dir is not None:
        data_list = generate_flow_list(args.frame_dir)
        args.data_list = data_list

    device = torch.device('cuda:0')

    Flownet = FlowNet2(args, requires_grad=False)
    print('====> Loading', args.pretrained_model_flownet2)
    flownet2_ckpt = torch.load(args.pretrained_model_flownet2)
    Flownet.load_state_dict(flownet2_ckpt['state_dict'])
    Flownet.to(device)
    Flownet.eval()

    dataset_ = FlowInfer(args.data_list, size=args.img_size)
    dataloader_ = DataLoader(dataset_, batch_size=1, shuffle=False)
    task_bar = ProgressBar(dataset_.__len__())

    for i, (f1, f2, f3, f4, f5, output_path_1, output_path_2, output_path_3,
            output_path_4) in enumerate(dataloader_):
        f1 = f1.to(device)
        f2 = f2.to(device)
        f3 = f3.to(device)
        f4 = f4.to(device)
        f5 = f5.to(device)
        if (output_path_1[0][-4:] == 'rflo'):
            flow_1 = Flownet(f3, f1)
            flow_2 = Flownet(f3, f2)
            flow_3 = Flownet(f3, f4)
            flow_4 = Flownet(f3, f5)
        else:
            flow_1 = Flownet(f1, f3)
            flow_2 = Flownet(f2, f3)
            flow_3 = Flownet(f4, f3)
            flow_4 = Flownet(f5, f3)
        output_path_01 = output_path_1[0]
        output_path_02 = output_path_2[0]
        output_path_03 = output_path_3[0]
        output_path_04 = output_path_4[0]
        #print(output_path_1)
        output_file = os.path.dirname(output_path_01)
        if not os.path.exists(output_file):
            os.makedirs(output_file)

        flow_numpy = flow_1[0].permute(1, 2, 0).data.cpu().numpy()
        cvb.write_flow(flow_numpy, output_path_01)

        output_file = os.path.dirname(output_path_02)
        if not os.path.exists(output_file):
            os.makedirs(output_file)

        flow_numpy = flow_2[0].permute(1, 2, 0).data.cpu().numpy()
        cvb.write_flow(flow_numpy, output_path_02)

        output_file = os.path.dirname(output_path_03)
        if not os.path.exists(output_file):
            os.makedirs(output_file)

        flow_numpy = flow_3[0].permute(1, 2, 0).data.cpu().numpy()
        cvb.write_flow(flow_numpy, output_path_03)

        output_file = os.path.dirname(output_path_04)
        if not os.path.exists(output_file):
            os.makedirs(output_file)

        flow_numpy = flow_4[0].permute(1, 2, 0).data.cpu().numpy()
        cvb.write_flow(flow_numpy, output_path_04)

        task_bar.update()
    sys.stdout.write('\n')
    print('FlowNet2 Inference has been finished~!')
    print('Extracted Flow has been save in', output_file)

    return output_file
def test_refine_stage(args):
    torch.manual_seed(777)
    torch.cuda.manual_seed(777)

    eval_dataset = FlowRefine.FlowSeq(args, isTest=True)
    eval_dataloader = DataLoader(eval_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 drop_last=False,
                                 num_workers=args.n_threads)

    if args.ResNet101:
        dfc_resnet101 = resnet_models.Flow_Branch(33, 2)
        dfc_resnet = nn.DataParallel(dfc_resnet101).cuda()
    else:
        dfc_resnet50 = resnet_models.Flow_Branch_Multi(input_chanels=33,
                                                       NoLabels=2)
        dfc_resnet = nn.DataParallel(dfc_resnet50).cuda()

    dfc_resnet.eval()

    resume_iter = load_ckpt(args.PRETRAINED_MODEL, [('model', dfc_resnet)],
                            strict=True)

    print('Load Pretrained Model from', args.PRETRAINED_MODEL)

    task_bar = ProgressBar(eval_dataset.__len__())
    for i, item in enumerate(eval_dataloader):
        if i > 1:
            break
        with torch.no_grad():
            input_x = item[0].cuda()
            flow_masked = item[1].cuda()
            gt_flow = item[2].cuda()
            mask = item[3].cuda()
            output_dir = item[3][0]

            res_flow = dfc_resnet(input_x)

            res_flow_f = res_flow[:, :2, :, :]
            res_flow_r = res_flow[:, 2:, :, :]

            res_complete_f = res_flow_f * mask[:, 10:
                                               11, :, :] + flow_masked[:, 10:12, :, :] * (
                                                   1. - mask[:, 10:11, :, :])
            res_complete_r = res_flow_r * mask[:, 32:
                                               34, :, :] + flow_masked[:, 32:34, :, :] * (
                                                   1. - mask[:, 32:34, :, :])

            output_dir_split = output_dir.split(',')

            output_file_f = os.path.join(args.output_root, output_dir_split[0])
            output_file_r = os.path.join(args.output_root, output_dir_split[1])
            output_basedir = os.path.dirname(output_file_f)
            if not os.path.exists(output_basedir):
                os.makedirs(output_basedir)

            res_save_f = res_complete_f[0].permute(
                1, 2, 0).contiguous().cpu().data.numpy()
            cvb.write_flow(res_save_f, output_file_f)
            res_save_r = res_complete_r[0].permute(
                1, 2, 0).contiguous().cpu().data.numpy()
            cvb.write_flow(res_save_r, output_file_r)
            task_bar.update()

    print('Refined Results Saved in', args.output_root)
Esempio n. 16
0
def run_eval(
        opt, model, crit, loader, vocab, device,
        json_path='', json_name='', scorer=COCOScorer(),
        teacher_model=None, dict_mapping={},
        no_score=False, print_sent=False, analyze=False,
        collect_best_candidate_iterative_results=False, collect_path=None,
        extra_opt={}, summarywriter=None, global_step=0):
    opt.update(extra_opt)
    model.eval()
    if teacher_model is not None:
        teacher_model.eval()

    gt_captions = loader.dataset.get_references()
    pred_captions = defaultdict(list)

    opt['collect_best_candidate_iterative_results'] = collect_best_candidate_iterative_results
    translator = Translator(model=model, opt=opt, teacher_model=teacher_model, dict_mapping=dict_mapping)

    best_candidate_sents = defaultdict(list)
    best_candidate_score = defaultdict(list)

    best_ar_sent = []
    all_time = 0

    if crit is not None:
        crit.reset_loss_recorder()

    collect_ar_flag = (opt['decoding_type'] == 'ARFormer' and collect_best_candidate_iterative_results)

    pb = ProgressBar(len(loader))
    pb.start()
    for data in loader:
        with torch.no_grad():
            encoder_outputs, category, labels = get_forword_results(opt, model, data, device=device, only_data=True,
                                                                    vocab=vocab)
            if crit is not None:
                _ = crit.get_loss(encoder_outputs)

            if teacher_model is not None:
                teacher_encoder_outputs, *_ = get_forword_results(opt, teacher_model, data, device=device,
                                                                  only_data=True, vocab=vocab)
            else:
                teacher_encoder_outputs = None

            if opt['batch_size'] == 1:
                start_time = time.time()
            all_hyp, all_scores = translator.translate_batch(encoder_outputs, category, labels, vocab,
                                                             teacher_encoder_outputs=teacher_encoder_outputs)
            if opt['batch_size'] == 1:
                all_time += (time.time() - start_time)

            if isinstance(all_hyp, torch.Tensor):
                if len(all_hyp.shape) == 2:
                    all_hyp = all_hyp.unsqueeze(1)
                all_hyp = all_hyp.tolist()
            if isinstance(all_scores, torch.Tensor):
                if len(all_scores.shape) == 2:
                    all_scores = all_scores.unsqueeze(1)
                all_scores = all_scores.tolist()

            video_ids = np.array(data['video_ids']).reshape(-1)

        for k, hyps in enumerate(all_hyp):
            video_id = video_ids[k]
            if not no_score:
                assert len(hyps) == 1

            for j, hyp in enumerate(hyps):
                sent = to_sentence(hyp, vocab)
                if opt.get('duplicate', False) and opt['decoding_type'] == 'NARFormer':
                    sent, _ = duplicate(sent)

                if not collect_ar_flag:
                    # for evaluation
                    pred_captions[video_id].append({'image_id': video_id, 'caption': sent})
                else:
                    # for collection
                    pred_captions[video_id].append({'caption': sent, 'score': all_scores[k][j]})

        if collect_best_candidate_iterative_results and not collect_ar_flag:
            assert isinstance(all_scores, tuple)
            all_sents = all_scores[0].tolist()
            all_score = all_scores[1].tolist()

            if len(video_ids) != len(all_sents):
                video_ids = np.array(data['video_ids'])[:, np.newaxis].repeat(opt['length_beam_size'], axis=1).reshape(
                    -1)
                assert len(video_ids) == len(all_sents)

            for k, (hyps, scores) in enumerate(zip(all_sents, all_score)):
                video_id = video_ids[k]
                pre_sent_len = 0
                assert len(hyps) == len(scores)

                for j, (hyp, score) in enumerate(zip(hyps, scores)):
                    sent = to_sentence(hyp, vocab)

                    if not pre_sent_len:
                        pre_sent_len = len(sent.split(' '))
                    else:
                        assert len(sent.split(' ')) == pre_sent_len

                    best_candidate_sents[video_id].append(sent)
                    best_candidate_score[video_id].append(score)
        pb.update()

    if collect_best_candidate_iterative_results:
        assert collect_path is not None
        if not collect_ar_flag:
            pickle.dump(
                    [best_candidate_sents, best_candidate_score],
                    open(collect_path, 'wb')
                )
        else:
            pickle.dump(pred_captions, open(collect_path, 'wb'))

    if opt['batch_size'] == 1:
        latency = all_time/len(loader)
        print(latency, len(loader))

    res = {}
    if analyze:
        ave_length, novel, unique, usage, hy_res, gram4 = analyze_length_novel_unique(loader.dataset.captions,
                                                                                      pred_captions, vocab,
                                                                                      splits=loader.dataset.splits, n=1)
        res.update({'ave_length': ave_length, 'novel': novel, 'unique': unique, 'usage': usage, 'gram4': gram4})

    if not no_score:
        # with suppress_stdout_stderr():
        valid_score, detail_scores = scorer.score(gt_captions, pred_captions, pred_captions.keys())

        res.update(valid_score)
        metric_sum = opt.get('metric_sum', [1, 1, 1, 1])
        candidate = [res["Bleu_4"], res["METEOR"], res["ROUGE_L"], res["CIDEr"]]
        res['Sum'] = sum([item for index, item in enumerate(candidate) if metric_sum[index]])
        if crit is not None:
            names, metrics = crit.get_loss_info()
            for n, m in zip(names, metrics):
                res[n] = m

    if summarywriter is not None:
        for k, v in res.items():
            summarywriter.add_scalar(k, v, global_step=global_step)

    if json_path:
        if not os.path.exists(json_path):
            os.makedirs(json_path)

        with open(os.path.join(json_path, json_name), 'w') as prediction_results:
            json.dump({"predictions": pred_captions, "scores": valid_score}, prediction_results)
            prediction_results.close()

    return res
Esempio n. 17
0
def propagation(args, frame_inapint_model=None):
    # Setup dataset

    img_root = args.img_root
    mask_root = args.mask_root
    flow_root = args.flow_root
    output_root = args.output_root_propagation

    # print(img_root)
    # print(args.img_shape)
    # print(mask_root)

    # the shape list may be changed in the below, pls check it
    img_shape = args.img_shape
    th_warp = args.th_warp

    video_list = os.listdir(flow_root)
    video_list.sort()

    st_time = time.time()

    flow_no_list = [int(x[:5]) for x in os.listdir(flow_root) if '.flo' in x]
    flow_start_no = min(flow_no_list)
    print('Flow Start no', flow_start_no)
    if not os.path.exists(output_root):
        os.makedirs(output_root)

    frame_name_list = sorted(os.listdir(img_root))
    frames_num = len(frame_name_list)
    frame_inpaint_seq = np.ones(frames_num - 1)
    masked_frame_num = np.sum((frame_inpaint_seq > 0).astype(np.int))
    print(masked_frame_num, 'frames need to be inpainted.')

    image = cv2.imread(os.path.join(img_root, frame_name_list[0]))
    if img_shape[0] < 1:
        shape = image.shape
    else:
        shape = img_shape
    print('The output shape is:', shape)

    image = cv2.resize(image, (shape[1], shape[0]))
    iter_num = 0
    result_pool = [
        np.zeros(image.shape, dtype=image.dtype) for _ in range(frames_num)
    ]
    label_pool = [
        np.zeros(image.shape, dtype=image.dtype) for _ in range(frames_num)
    ]

    while masked_frame_num > 0:

        results = [
            np.zeros(image.shape + (2, ), dtype=image.dtype)
            for _ in range(frames_num)
        ]
        time_stamp = [
            -np.ones(image.shape[:2] + (2, ), dtype=int)
            for _ in range(frames_num)
        ]

        print('Iter', iter_num, 'Forward Propagation')
        # forward
        if iter_num == 0:
            image = cv2.imread(os.path.join(img_root, frame_name_list[0]))
            image = cv2.resize(image, (shape[1], shape[0]))
            if args.FIX_MASK:
                label = cv2.imread(os.path.join(mask_root),
                                   cv2.IMREAD_UNCHANGED)
            else:
                label = cv2.imread(
                    os.path.join(mask_root, '%05d.png' % (flow_start_no)),
                    cv2.IMREAD_UNCHANGED)
            print(flow_start_no)
            label = cv2.resize(label, (image.shape[1], image.shape[0]),
                               interpolation=cv2.INTER_NEAREST)
        else:
            image = result_pool[0]
            label = label_pool[0]

        if len(label.shape) == 3:
            label = label[:, :, 0]
        if args.enlarge_mask and iter_num == 0:
            kernel = np.ones((args.enlarge_kernel, args.enlarge_kernel),
                             np.uint8)
            label = cv2.dilate(label, kernel, iterations=1)

        label = (label > 0).astype(np.uint8)
        image[label > 0, :] = 0

        results[0][..., 0] = image
        time_stamp[0][label == 0, 0] = 0
        prog_bar = ProgressBar(frames_num - 1)
        for th in range(1, frames_num - 1):
            prog_bar.update()
            if iter_num == 0:
                image = cv2.imread(os.path.join(img_root, frame_name_list[th]))
                image = cv2.resize(image, (shape[1], shape[0]))
            else:
                image = result_pool[th]

            flow1 = flo.readFlow(os.path.join(flow_root, '%05d0.flo' % (th)))
            flow2 = flo.readFlow(os.path.join(flow_root, '%05d1.flo' % (th)))
            flow3 = flo.readFlow(os.path.join(flow_root, '%05d2.flo' % (th)))
            flow4 = flo.readFlow(os.path.join(flow_root, '%05d3.flo' % (th)))

            flow1 = flo.flow_tf(flow1, image.shape)
            flow2 = flo.flow_tf(flow2, image.shape)
            flow3 = flo.flow_tf(flow3, image.shape)
            flow4 = flo.flow_tf(flow4, image.shape)

            if iter_num == 0:
                if not args.FIX_MASK:
                    label = cv2.imread(
                        os.path.join(mask_root, '%05d.png' % (th)),
                        cv2.IMREAD_UNCHANGED)
                else:
                    label = cv2.imread(os.path.join(mask_root),
                                       cv2.IMREAD_UNCHANGED)
                label = cv2.resize(label, (image.shape[1], image.shape[0]),
                                   interpolation=cv2.INTER_NEAREST)
            else:
                label = label_pool[th]

            if len(label.shape) == 3:
                label = label[:, :, 0]

            if args.enlarge_mask and iter_num == 0:
                kernel = np.ones((args.enlarge_kernel, args.enlarge_kernel),
                                 np.uint8)
                label = cv2.dilate(label, kernel, iterations=1)

            label = (label > 0).astype(np.uint8)
            image[(label > 0), :] = 0

            temp1 = flo.get_warp_label(flow1,
                                       flow2,
                                       results[th - 1][..., 0],
                                       th=th_warp)
            #print(temp1)
            temp3 = flo.get_warp_label(flow3, flow4, temp1, th=th_warp)

            temp2 = flo.get_warp_label(flow1,
                                       flow2,
                                       time_stamp[th - 1],
                                       th=th_warp,
                                       value=-1)[..., 0]
            #print(time_stamp[th - 1])
            temp6 = [-np.ones(image.shape[:2] + (2, ), dtype=int)]
            #print(temp5[0].shape)
            temp6[0][..., 0] = temp2
            #print(temp5.shape)
            temp4 = flo.get_warp_label(flow3,
                                       flow4,
                                       temp6[0],
                                       th=th_warp,
                                       value=-1)[..., 0]

            results[th][..., 0] = temp3
            time_stamp[th][..., 0] = temp4

            results[th][label == 0, :, 0] = image[label == 0, :]
            time_stamp[th][label == 0, 0] = th

        sys.stdout.write('\n')
        print('Iter', iter_num, 'Backward Propagation')
        # backward
        if iter_num == 0:

            image = cv2.imread(
                os.path.join(img_root, frame_name_list[frames_num - 1]))
            image = cv2.resize(image, (shape[1], shape[0]))

            if not args.FIX_MASK:
                label = cv2.imread(
                    os.path.join(mask_root, '%05d.png' % (frames_num - 1)),
                    cv2.IMREAD_UNCHANGED)
            else:
                label = cv2.imread(os.path.join(mask_root),
                                   cv2.IMREAD_UNCHANGED)
            label = cv2.resize(label, (image.shape[1], image.shape[0]),
                               interpolation=cv2.INTER_NEAREST)
        else:
            image = result_pool[-1]
            label = label_pool[-1]

        if len(label.shape) == 3:
            label = label[:, :, 0]
        if args.enlarge_mask and iter_num == 0:
            kernel = np.ones((args.enlarge_kernel, args.enlarge_kernel),
                             np.uint8)
            label = cv2.dilate(label, kernel, iterations=1)

        label = (label > 0).astype(np.uint8)
        image[(label > 0), :] = 0

        results[frames_num - 1][..., 1] = image
        time_stamp[frames_num - 1][label == 0, 1] = frames_num - 1
        prog_bar = ProgressBar(frames_num - 1)
        for th in range(frames_num - 2, 0, -1):
            prog_bar.update()
            if iter_num == 0:
                image = cv2.imread(os.path.join(img_root, frame_name_list[th]))
                image = cv2.resize(image, (shape[1], shape[0]))
                if not args.FIX_MASK:
                    label = cv2.imread(
                        os.path.join(mask_root, '%05d.png' % (th)),
                        cv2.IMREAD_UNCHANGED)
                else:
                    label = cv2.imread(os.path.join(mask_root),
                                       cv2.IMREAD_UNCHANGED)
                label = cv2.resize(label, (image.shape[1], image.shape[0]),
                                   interpolation=cv2.INTER_NEAREST)
            else:
                image = result_pool[th]
                label = label_pool[th]

            flow1 = flo.readFlow(os.path.join(flow_root, '%05d3.rflo' % (th)))
            flow2 = flo.readFlow(os.path.join(flow_root, '%05d2.rflo' % (th)))
            flow3 = flo.readFlow(os.path.join(flow_root, '%05d1.rflo' % (th)))
            flow4 = flo.readFlow(os.path.join(flow_root, '%05d0.rflo' % (th)))

            flow1 = flo.flow_tf(flow1, image.shape)
            flow2 = flo.flow_tf(flow2, image.shape)
            flow3 = flo.flow_tf(flow3, image.shape)
            flow4 = flo.flow_tf(flow4, image.shape)

            if len(label.shape) == 3:
                label = label[:, :, 0]
            if args.enlarge_mask and iter_num == 0:
                kernel = np.ones((args.enlarge_kernel, args.enlarge_kernel),
                                 np.uint8)
                label = cv2.dilate(label, kernel, iterations=1)

            label = (label > 0).astype(np.uint8)
            image[(label > 0), :] = 0

            temp1 = flo.get_warp_label(flow1,
                                       flow2,
                                       results[th + 1][..., 1],
                                       th=th_warp)
            temp3 = flo.get_warp_label(flow3, flow4, temp1, th=th_warp)
            temp2 = flo.get_warp_label(
                flow1,
                flow2,
                time_stamp[th + 1],
                value=-1,
                th=th_warp,
            )[..., 1]

            temp6 = [-np.ones(image.shape[:2] + (2, ), dtype=int)]
            #print(temp5[0].shape)
            temp6[0][..., 1] = temp2
            #print(temp5.shape)
            temp7 = flo.get_warp_label(flow3,
                                       flow4,
                                       temp6[0],
                                       th=th_warp,
                                       value=-1)[..., 1]

            results[th][..., 1] = temp3
            time_stamp[th][..., 1] = temp7

            results[th][label == 0, :, 1] = image[label == 0, :]
            time_stamp[th][label == 0, 1] = th

        sys.stdout.write('\n')
        tmp_label_seq = np.zeros(frames_num - 1)
        print('Iter', iter_num, 'Merge Results')
        # merge
        prog_bar = ProgressBar(frames_num)
        for th in range(0, frames_num - 1):
            prog_bar.update()
            v1 = (time_stamp[th][..., 0] == -1)
            v2 = (time_stamp[th][..., 1] == -1)

            hole_v = (v1 & v2)

            result = results[th][..., 0].copy()
            result[v1, :] = results[th][v1, :, 1].copy()

            v3 = ((v1 == 0) & (v2 == 0))

            dist = time_stamp[th][..., 1] - time_stamp[th][..., 0]
            dist[dist < 1] = 1

            w2 = (th - time_stamp[th][..., 0]) / dist
            w2 = (w2 > 0.5).astype(np.float)

            result[v3, :] = (results[th][..., 1] * w2[..., np.newaxis] +
                             results[th][..., 0] *
                             (1 - w2)[..., np.newaxis])[v3, :]

            result_pool[th] = result.copy()

            tmp_mask = np.zeros_like(result)
            tmp_mask[hole_v, :] = 255
            label_pool[th] = tmp_mask.copy()
            tmp_label_seq[th] = np.sum(tmp_mask)

        sys.stdout.write('\n')
        frame_inpaint_seq[tmp_label_seq == 0] = 0
        masked_frame_num = np.sum((frame_inpaint_seq > 0).astype(np.int))
        print(masked_frame_num)
        iter_num += 1

        if masked_frame_num > 0:
            key_frame_ids = get_key_ids(frame_inpaint_seq)
            print(key_frame_ids)
            for id in key_frame_ids:
                with torch.no_grad():
                    tmp_inpaint_res = frame_inapint_model.forward(
                        result_pool[id], label_pool[id])
                label_pool[id] = label_pool[id] * 0.
                result_pool[id] = tmp_inpaint_res
        else:
            print(frames_num, 'frames have been inpainted by', iter_num,
                  'iterations.')

        tmp_label_seq = np.zeros(frames_num - 1)
        for th in range(0, frames_num - 1):
            tmp_label_seq[th] = np.sum(label_pool[th])
        frame_inpaint_seq[tmp_label_seq == 0] = 0
        masked_frame_num = np.sum((frame_inpaint_seq > 0).astype(np.int))
        print(masked_frame_num)

        print('Writing frames to:', os.path.join(output_root, 'inpaint_res'))

        if not os.path.exists(os.path.join(output_root, 'inpaint_res')):
            os.makedirs(os.path.join(output_root, 'inpaint_res'))

        for th in range(1, frames_num - 1):
            cv2.imwrite(
                os.path.join(output_root, 'inpaint_res', '%05d.png' % (th)),
                result_pool[th].astype(np.uint8))

    print('Propagation has been finished')
    pro_time = time.time() - st_time
    print(pro_time)
Esempio n. 18
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self.progress_bar = ProgressBar(self._max_iters, start=False)
Esempio n. 19
0
class DummyIterBasedRunner(IterBasedRunner):
    """Fake Iter-based Runner.

    This runner won't train model, and it will only call hooks and return all
    learning rate in each iteration.
    """
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.progress_bar = ProgressBar(self._max_iters, start=False)

    def train(self, data_loader, **kwargs):
        lr_list = []
        self.model.train()
        self.mode = 'train'
        self.data_loader = data_loader
        self._epoch = data_loader.epoch
        next(data_loader)
        self.call_hook('before_train_iter')
        lr_list.append(self.current_lr())
        self.call_hook('after_train_iter')
        self._inner_iter += 1
        self._iter += 1
        self.progress_bar.update(1)
        return lr_list

    def run(self, data_loaders, workflow, **kwargs):
        assert isinstance(data_loaders, list)
        assert mmcv.is_list_of(workflow, tuple)
        assert len(data_loaders) == len(workflow)
        assert self._max_iters is not None, (
            'max_iters must be specified during instantiation')

        self.logger.info('workflow: %s, max: %d iters', workflow,
                         self._max_iters)
        self.call_hook('before_run')

        iter_loaders = [IterLoader(x) for x in data_loaders]

        self.call_hook('before_epoch')

        self.progress_bar.start()
        lr_list = []
        while self.iter < self._max_iters:
            for i, flow in enumerate(workflow):
                self._inner_iter = 0
                mode, iters = flow
                if not isinstance(mode, str) or not hasattr(self, mode):
                    raise ValueError(
                        'runner has no method named "{}" to run a workflow'.
                        format(mode))
                iter_runner = getattr(self, mode)
                for _ in range(iters):
                    if mode == 'train' and self.iter >= self._max_iters:
                        break
                    lr_list.extend(iter_runner(iter_loaders[i], **kwargs))

        self.progress_bar.file.write('\n')
        time.sleep(1)  # wait for some hooks like loggers to finish
        self.call_hook('after_epoch')
        self.call_hook('after_run')
        return lr_list
Esempio n. 20
0
class DummyEpochBasedRunner(EpochBasedRunner):
    """Fake Epoch-based Runner.

    This runner won't train model, and it will only call hooks and return all
    learning rate in each iteration.
    """
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.progress_bar = ProgressBar(self._max_epochs, start=False)

    def train(self, data_loader, **kwargs):
        lr_list = []
        self.model.train()
        self.mode = 'train'
        self.data_loader = data_loader
        self._max_iters = self._max_epochs * len(self.data_loader)
        self.call_hook('before_train_epoch')
        for i in range(len(self.data_loader)):
            self._inner_iter = i
            self.call_hook('before_train_iter')
            lr_list.append(self.current_lr())
            self.call_hook('after_train_iter')
            self._iter += 1

        self.call_hook('after_train_epoch')
        self._epoch += 1
        self.progress_bar.update(1)
        return lr_list

    def run(self, data_loaders, workflow, **kwargs):
        assert isinstance(data_loaders, list)
        assert mmcv.is_list_of(workflow, tuple)
        assert len(data_loaders) == len(workflow)

        assert self._max_epochs is not None, (
            'max_epochs must be specified during instantiation')

        for i, flow in enumerate(workflow):
            mode, epochs = flow
            if mode == 'train':
                self._max_iters = self._max_epochs * len(data_loaders[i])
                break

        self.logger.info('workflow: %s, max: %d epochs', workflow,
                         self._max_epochs)
        self.call_hook('before_run')

        self.progress_bar.start()
        lr_list = []
        while self.epoch < self._max_epochs:
            for i, flow in enumerate(workflow):
                mode, epochs = flow
                if isinstance(mode, str):  # self.train()
                    if not hasattr(self, mode):
                        raise ValueError(
                            f'runner has no method named "{mode}" to run an '
                            'epoch')
                    epoch_runner = getattr(self, mode)
                else:
                    raise TypeError(
                        'mode in workflow must be a str, but got {}'.format(
                            type(mode)))

                for _ in range(epochs):
                    if mode == 'train' and self.epoch >= self._max_epochs:
                        break
                    lr_list.extend(epoch_runner(data_loaders[i], **kwargs))

        self.progress_bar.file.write('\n')
        time.sleep(1)  # wait for some hooks like loggers to finish
        self.call_hook('after_run')
        return lr_list
Esempio n. 21
0
if __name__ == "__main__":
    args = parse_args()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # Load model and config file
    with open(args.config) as fh:
        params = json.load(fh)
    model = ContextualRescorer(params).to(device)
    state_dict = torch.load(args.model)
    model.load_state_dict(state_dict)
    model.eval()

    # Preprocess input detections
    helper = Helper(args.path_anns, path_dets=args.path_dets)
    prog_bar = ProgressBar(len(helper.detections))
    rescored = []

    for id_, dets in helper.detections.items():
        ipt = input_tensor(id_, helper, device).unsqueeze(0)
        dets.sort(key=lambda det: det["score"], reverse=True)
        mask = torch.ones(1, 1, ipt.size(1), device=device, dtype=torch.float)

        # add paddings
        if ipt.size(1) < 100:
            pad = torch.zeros((1, 1, 100 - ipt.size(1)), device=device)
            mask = torch.cat((mask, pad), dim=2)
            pad = torch.zeros((1, 100 - ipt.size(1), 85), device=device)
            ipt = torch.cat((ipt, pad), dim=1)
        scores = model.forward(ipt, [ipt.size(1)], mask).reshape(-1)
        for i, det in enumerate(dets):
Esempio n. 22
0
def main():
    args = parse_args()
    data_dir = args.raw_dir
    output_dir = args.out_dir
    zip_dir = os.path.join(output_dir, 'zips')
    ann_dir = os.path.join(output_dir, 'annotations')
    mkdir_or_exist(zip_dir)
    mkdir_or_exist(ann_dir)

    cls_name_list = [
        fn for fn in os.listdir(data_dir)
        if os.path.isdir(os.path.join(data_dir, fn))
    ]
    assert len(cls_name_list) == 51
    print("All {} classes".format(len(cls_name_list)))

    video_path_list = []
    video_name_list = []
    for cls_id, cls_name in enumerate(cls_name_list):
        cls_path = os.path.join(data_dir, cls_name)
        file_list = [fn for fn in os.listdir(cls_path) if fn.endswith('.avi')]
        for file_name in file_list:
            video_path_list.append(os.path.join(cls_path, file_name))
            video_name_list.append(file_name[:-4])
    print("All {} videos".format(len(video_path_list)))

    print("Generate annotations...")
    for sp_id in range(3):
        train_fid = open(
            os.path.join(ann_dir, 'train_split_{}.txt'.format(sp_id + 1)), 'w')
        test_fid = open(
            os.path.join(ann_dir, 'test_split_{}.txt'.format(sp_id + 1)), 'w')
        print("Annotation split {}".format(sp_id + 1))
        prog_bar = ProgressBar(len(cls_name_list))
        for cls_id, cls_name in enumerate(cls_name_list):
            sp_file_path = os.path.join(
                args.ann_dir,
                '{}_test_split{}.txt'.format(cls_name, sp_id + 1))
            with open(sp_file_path, 'r') as f:
                lines = f.read().splitlines()
            for line in lines:
                if line.strip() == '':
                    continue
                video_name, tid = line.split(' ')[0:2]
                assert video_name.endswith('.avi')
                video_name = video_name[:-4]
                tid = int(tid)
                assert tid in (0, 1, 2)
                if tid == 1:
                    train_fid.write('{} {}\n'.format(video_name, cls_id + 1))
                elif tid == 2:
                    test_fid.write('{} {}\n'.format(video_name, cls_id + 1))
            prog_bar.update()

        train_fid.close()
        test_fid.close()

    print("Generate zip files...")
    prog_bar = ProgressBar(len(video_path_list))
    for i in range(len(video_path_list)):
        video_name = video_name_list[i]
        video_path = video_path_list[i]
        zip_path = os.path.join(zip_dir, '{}.zip'.format(video_name))
        video_to_zip(video_path, zip_path)
        prog_bar.update()
def test(test_cfg,
         model_cfg,
         dataset_cfg,
         checkpoint,
         batch_size,
         work_dir,
         gpus=1,
         workers=4):

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    dataset = call_obj(**dataset_cfg,
                       transform=transforms.Compose([
                           transforms.ToTensor(),
                           normalize,
                       ]))

    data_loader = torch.utils.data.DataLoader(dataset=dataset,
                                              batch_size=batch_size * gpus,
                                              shuffle=False,
                                              num_workers=workers * gpus)

    # put model on gpus
    if isinstance(model_cfg, list):
        model = [call_obj(**c) for c in model_cfg]
        model = torch.nn.Sequential(*model)
    else:
        model = call_obj(**model_cfg)

    load_checkpoint(model, checkpoint, map_location='cpu')
    model = MMDataParallel(model, device_ids=range(gpus)).cuda()
    model.eval()
    # prepare for evaluation
    num_samples = len(dataset)
    prog_bar = ProgressBar(num_samples // (batch_size * gpus) + 1)
    all_preds = np.zeros((num_samples, model_cfg.skeleton_head.num_joints, 3),
                         dtype=np.float32)

    all_boxes = np.zeros((num_samples, 6))
    filenames = []
    imgnums = []
    image_path = []
    idx = 0

    # copy from hrnet
    with torch.no_grad():
        for i, (input, meta, target, target_weight) in enumerate(data_loader):
            # get prediction
            outputs = model.forward(input, return_loss=False)
            if isinstance(outputs, list):
                output = outputs[-1]
            else:
                output = outputs
            # filp test
            if test_cfg.flip:
                input_flipped = np.flip(input.cpu().numpy(), 3).copy()
                input_flipped = torch.from_numpy(input_flipped).cuda()
                outputs_flipped = model(input_flipped, return_loss=False)
                if isinstance(outputs_flipped, list):
                    output_flipped = outputs_flipped[-1]
                else:
                    output_flipped = outputs_flipped
                output_flipped = flip_back(output_flipped.cpu().numpy(),
                                           dataset.flip_pairs)
                output_flipped = torch.from_numpy(output_flipped.copy()).cuda()
                # feature is not aligned, shift flipped heatmap for higher accuracy
                if test_cfg.shift_heatmap:
                    output_flipped[:, :, :, 1:] = \
                        output_flipped.clone()[:, :, :, 0:-1]
                output = (output + output_flipped) * 0.5

            c = meta['center'].numpy()
            s = meta['scale'].numpy()
            score = meta['score'].numpy()

            num_images = input.size(0)
            preds, maxvals = get_final_preds(test_cfg.post_process,
                                             output.detach().cpu().numpy(), c,
                                             s)

            all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
            all_preds[idx:idx + num_images, :, 2:3] = maxvals
            # double check this all_boxes parts
            all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
            all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
            all_boxes[idx:idx + num_images, 4] = np.prod(s * 200, 1)
            all_boxes[idx:idx + num_images, 5] = score
            image_path.extend(meta['image'])

            idx += num_images
            prog_bar.update()

        name_values, perf_indicator = dataset.evaluate(test_cfg, all_preds,
                                                       work_dir, all_boxes,
                                                       image_path, filenames,
                                                       imgnums)
    return perf_indicator
Esempio n. 24
0
    args = parse_args()

    # Setup camera
    pipeline, align, clipping_distance = setup_pipeline(args)

    if args.outdir:
        if os.path.exists(args.outdir):
            shutil.rmtree(args.outdir)
        os.makedirs(args.outdir, exist_ok=True)
        os.makedirs(os.path.join(args.outdir, 'Depth'), exist_ok=True)
        os.makedirs(os.path.join(args.outdir, 'RGB'), exist_ok=True)

    # Streaming loop
    frame_i = 0
    # progress = ProgressBar(task_num=args.num_frames if args.num_frames else 1e3)
    progress = ProgressBar(task_num=1e1)
    while True:
        frames = pipeline.wait_for_frames()

        # Align the depth frame and color frame
        aligned_frames = align.process(frames)
        depth_frame = aligned_frames.get_depth_frame()
        color_frame = aligned_frames.get_color_frame()

        if not depth_frame or not color_frame:
            continue

        # Convert images to numpy arrays
        depth_image = np.asanyarray(depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())
        if args.color_mode == 'rgb8':
Esempio n. 25
0
def train_network_all(opt, model, device, **kwargs):
    if opt.get('load_teacher_weights', False):
        assert opt.get('teacher_path', None) is not None
        model = load_satisfied_weights(
            model=model,
            checkpoint_path=opt['teacher_path'],
            str_mapping={'decoder.bert.': 'decoder.'}
        )

    model.to(device)
    summarywriter = SummaryWriter(os.path.join(opt['checkpoint_path'], 'trainval'))
    optimizer = get_optimizer(opt, model, summarywriter=summarywriter)
    crit = get_criterion(opt, summarywriter=summarywriter)
    crit_eval = get_criterion_during_evaluation(opt)

    if opt.get('with_teacher', False) and opt['method'] in ['NAB', 'NACF']:
        assert opt.get('teacher_path', None) is not None
        teacher_model, _ = load_model_and_opt(opt['teacher_path'], device)
    else:
        teacher_model = None

    folder_path = os.path.join(opt["checkpoint_path"], 'tmp_models')
    best_model = k_PriorityQueue(
        k_best_model=opt.get('k_best_model', 1),
        folder_path=folder_path,
        standard=opt.get('standard', ['METEOR', 'CIDEr'])
        )

    train_loader = get_loader(opt, 'train', print_info=False, **kwargs)
    vali_loader = get_loader(opt, 'validate', print_info=False)
    # test_loader = get_loader(opt, 'test', print_info=False)
    vocab = vali_loader.dataset.get_vocab()

    logger = CsvLogger(
        filepath=opt["checkpoint_path"],
        filename='trainning_record.csv',
        fieldsnames=[
            'epoch', 'train_loss',
            'Bleu_1', 'Bleu_2', 'Bleu_3', 'Bleu_4',
            'METEOR', 'ROUGE_L', 'CIDEr', 'Sum']
            + crit.get_fieldsnames()
        )

    start_epoch = opt['start_epoch']
    pb = ProgressBar(opt['epochs'])
    pb.start()
    for epoch in range(opt['epochs']):
        if epoch < start_epoch:
            continue

        train_loader.dataset.shuffle()

        logger.write_text("epoch %d lr=%g (ss_prob=%g)" % (epoch, optimizer.get_lr(), opt.get('teacher_prob', 1)))
        # training
        train_loss = run_train(opt, model, crit, optimizer, train_loader, device, logger=logger, epoch=epoch)

        optimizer.epoch_update_learning_rate()

        if (epoch+1) > opt['start_eval_epoch'] and (epoch+1) % opt["save_checkpoint_every"] == 0:
            res = run_eval(opt, model, crit_eval, vali_loader, vocab, device, teacher_model=teacher_model, analyze=True,
                           summarywriter=summarywriter, global_step=epoch)
            res['train_loss'] = train_loss
            res['epoch'] = epoch
            logger.write(res)

            save_checkpoint(
                    {'epoch': epoch + 1, 'state_dict': model.state_dict(), 'validate_result': res, 'settings': opt},
                    False,
                    filepath=opt["checkpoint_path"],
                    filename='checkpoint.pth.tar'
                )

            model_name = 'model_%04d.pth.tar' % res['epoch']
            model_path = os.path.join(folder_path, model_name)
            not_break, info = best_model.check(res, opt, model_path, model_name)
            if not not_break:
                # reach the tolerence
                break
            logger.write_text(info)

        pb.update()

    if not opt.get('no_test', False):
        model = model.to('cpu')
        del model
        del optimizer
        torch.cuda.empty_cache()
        os.system(
            'python translate.py --default --method {} --dataset {} --record --scope {} --field {} -em test --use_ct --base_checkpoint_path {}'.format(
                opt['method'], opt['dataset'], opt['scope'] if opt['scope'] else '\"\"', ' '.join(opt['field']), opt['base_checkpoint_path'])
        )

    if opt['k_best_model'] > 1:
        shutil.rmtree(folder_path)
Esempio n. 26
0
    def validate(self, checkpoint_file_path, output_file):
        """Runs validation with QualitAI metrics."""

        print('Loading model...')
        self.cfg.data.test.test_mode = True
        self.cfg.model.pretrained = None

        model = build_detector(self.cfg.model,
                               train_cfg=None,
                               test_cfg=self.cfg.test_cfg)
        fp16_cfg = self.cfg.get('fp16', None)
        if fp16_cfg is not None:
            wrap_fp16_model(model)

        checkpoint = load_checkpoint(model,
                                     checkpoint_file_path,
                                     map_location='cpu')

        if 'CLASSES' in checkpoint['meta']:
            model.CLASSES = checkpoint['meta']['CLASSES']
        else:
            model.CLASSES = self.dataset.CLASSES

        model = MMDataParallel(model, device_ids=[0]).cuda()
        model.eval()
        print('Done!')
        print('Starting inference run...')

        # Do inference
        results = []
        bool_preds = []
        bool_targets = []
        prog_bar = ProgressBar(len(self.loader.dataset))

        file_id_lookup = self.get_ids_of_files(self.dataset.coco)

        for i, data in enumerate(self.loader):
            with torch.no_grad():
                result = model(return_loss=False, rescale=True, **data)
            results.append(result)

            img_shape = data['img_meta'][0].data[0][0]['ori_shape']
            bool_pred = self.transform_preds_to_boolean(
                img_shape[0:2], result[0])
            bool_preds.append(bool_pred)
            path, img_name = split(data['img_meta'][0].data[0][0]['filename'])
            if img_name in file_id_lookup:
                img_id = file_id_lookup[img_name]
            else:
                img_name = join(split(path)[1], img_name)
                if img_name in file_id_lookup:
                    img_id = file_id_lookup[img_name]
                else:
                    raise KeyError(img_name)

            bool_target = self.transform_targets_to_boolean(
                self.dataset.coco, img_id, img_shape[0:2])
            bool_targets.append(bool_target)

            target_img = np.zeros(img_shape, dtype='uint8')
            target_img[bool_target] = [0, 255, 0]
            target_img = Image.fromarray(target_img)
            pred_img = np.zeros(img_shape, dtype='uint8')
            pred_img[bool_pred] = [255, 0, 0]
            pred_img = Image.fromarray(pred_img)
            intersection_img = np.zeros(img_shape, dtype='uint8')
            intersection_img[bool_target * bool_pred] = [0, 0, 255]
            intersection_img = Image.fromarray(intersection_img)
            target_img.save('/workspace/outputs/{}-target.png'.format(i))
            pred_img.save('/workspace/outputs/{}-pred.png'.format(i))
            intersection_img.save(
                '/workspace/outputs/{}-intersection.png'.format(i))

            prog_bar.update()

        # Dump out result files
        if not isinstance(results[0], dict):
            results2json(self.dataset, results, output_file)
        else:
            for name in results[0]:
                results_ = [result[name] for result in results]
                result_file = output_file + '.{}'.format(name)
                results2json(self.dataset, results_, result_file)

        # Calculate values
        "\nWriting out results..."

        print('\nStarting evaluation according to QualitAI metrics...')
        accuracy = 0.
        precision = 0.
        recall = 0.
        num_imgs = 0.
        for target, pred in zip(bool_targets, bool_preds):
            accuracy += self.calculate_accuracy(target, pred)
            precision += self.calculate_precision(target, pred)
            recall += self.calculate_recall(target, pred)
            num_imgs += 1.

        accuracy /= num_imgs
        precision /= num_imgs
        recall /= num_imgs

        print('Done!')

        print("\nResults:")
        print("======================")
        print("Num imgs:  {}".format(int(num_imgs)))
        print("Accuracy:  {:.7f}".format(accuracy))
        print("Precision: {:.7f}".format(precision))
        print("Recall:    {:.7f}".format(recall))
Esempio n. 27
0
def main(config, params, dataset):

    helper = Helper("data/annotations/instances_val2017.json")

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    start = time()
    print("Loading train dataset...")
    train_dataset = Dataset("data/preprocessed/preprocessed_train2017_" +
                            dataset + ".pt")
    torch.cuda.empty_cache()

    print("Loading validation set...")
    val_dataset = Dataset("data/preprocessed/preprocessed_val2017_" + dataset +
                          ".pt")
    torch.cuda.empty_cache()
    print("Loaded validation set. (t=%.1f seconds)" % (time() - start))

    val_params = {
        "batch_size": params["val_batch_size"],
        "collate_fn": PadCollate()
    }
    val_dataloader = torch.utils.data.DataLoader(val_dataset, **val_params)

    train_params = {
        "batch_size": params["batch_size"],
        "shuffle": True,
        "collate_fn": PadCollate(shuffle_rate=params["shuffle_rate"]),
    }
    train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                                   **train_params)

    # Train loop
    model = ContextualRescorer(params).to(device)
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=params["learning_rate"])
    scheduler = LrScheduler(optimizer)
    logger = Logger(config, params, dataset=dataset)
    early_stopping_params = {"mode": "max", "patience": 20, "delta": 0.0001}
    early_stopper = EarlyStopping(**early_stopping_params)

    start = time()
    for epoch in range(params["n_epochs"]):
        loss, corrects, total = 0, 0, 0
        prog_bar = ProgressBar(len(train_dataloader))
        for i, (input_batch, target_batch,
                lengths) in enumerate(train_dataloader):
            batch_loss, corrects_, total_ = training_step(
                model, optimizer, input_batch, target_batch, lengths)
            loss += batch_loss
            corrects += corrects_
            total += total_
            prog_bar.update()

        loss = loss / (i + 1)
        accuracy = corrects / total * 100

        # Measure loss and accuracy on validation set
        val_loss, val_accuracy = validate(val_dataloader, model)

        # Evaluate the AP on the validation set
        model.eval()
        print("\n --> Evaluating AP")
        write_validation_results(val_dataset, model, helper)
        stats = coco_eval()
        ap = stats[0]
        print("AP: {} \n\n".format(ap))

        if scheduler.step(ap):
            print(" --> Backtracking to best model")
            model.load_state_dict(logger.best_model)

        # Logging and early stopping
        logger.epoch(model, loss, accuracy, val_loss, val_accuracy, ap,
                     optimizer.param_groups[0]["lr"])
        if early_stopper.step(ap):
            print("	--> Early stopping")
            break

    logger.close()
    #visualize_model(helper, params, logger.best_model, val_dataset)
    print(config)