Пример #1
0
def main():
    import os
    os.environ["CUDA_VISIBLE_DEVICES"] = "1"
    args = parse_args()
    args.out = args.save_path + 'result_test.pkl'
    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # get dataset
    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))

    # build model
    assert args.gpus == 1
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    load_checkpoint(model, args.checkpoint)
    model = MMDataParallel(model, device_ids=[0])

    # model_flow = FlowNetCHead()
    # load_checkpoint(model_flow, args.checkpointflow)

    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   num_gpus=1,
                                   dist=False,
                                   shuffle=False)
    if args.load_result:
        outputs = mmcv.load(args.out)

    else:
        # test
        outputs = single_test(model,
                              data_loader,
                              cfg,
                              args.show,
                              save_path=args.save_path)

    if args.out:
        if not args.load_result:
            print('writing results to {}'.format(args.out))

            mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if not isinstance(outputs[0], dict):
                result_file = args.out + '.json'
                results2json_videoseg(dataset, outputs, result_file)
                ytvos_eval(result_file, eval_types, dataset.ytvos)
            else:
                NotImplemented
Пример #2
0
def main():
    args = parse_args()

    assert args.out or args.eval or args.format_only or args.show, \
        ('Please specify at least one operation (save/eval/format/show the '
         'results) with the argument "--out", "--eval", "--format_only" '
         'or "--show"')

    if args.eval and args.format_only:
        raise ValueError('--eval and --format_only cannot be both specified')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    vid_idx_list = args.vid_idx
    cfg.data.test.vid_idx_list = vid_idx_list

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)


    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    assert args.gpus == 1
    model = build_detector(
        cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    load_checkpoint(model, args.checkpoint)
    model = MMDataParallel(model, device_ids=[0])

    data_loader = build_dataloader(
        dataset,
        imgs_per_gpu=1,
        workers_per_gpu=cfg.data.workers_per_gpu,
        num_gpus=1,
        dist=False,
        shuffle=False)
    if args.load_result:
        outputs = mmcv.load(args.out)
    else:
        outputs = single_test(model, data_loader, args.show, save_path=args.save_path)

    if args.out:
        if not args.load_result:
          print('writing results to {}'.format(args.out))

          mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if not isinstance(outputs[0], dict):
                result_file = args.out + '.json'
                
                dataset = obj_from_dict(cfg.data.test_video, datasets, dict(test_mode=True, vid_idx_list = vid_idx_list))
                results2json_videoseg(dataset, outputs, result_file)
                
                ytvos_eval(result_file, eval_types, dataset.ytvos, vid_idx_list = vid_idx_list)
            else:
                NotImplemented
Пример #3
0
def main():
    import os

    os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
    os.environ["CUDA_VISIBLE_DEVICES"] = "1"
    args = parse_args()
    args.out = args.save_path + 'result_test.pkl'
    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # get dataset
    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))

    # build model
    assert args.gpus == 1
    model = build_detector(
        cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)

    load_checkpoint(model, args.checkpoint)
    # model.load_flow()
    resume = "/home/ubuntu/code/fengda/MaskTrackRCNN/pretrained_models/epoch_12.pth"
    load_checkpoint(model, resume)
    model = MMDataParallel(model, device_ids=[0])

    # device = torch.device("cuda")
    # model.to(device)

    # from mmdet.models.decision_net.ddpg import ActorNet as Net
    # rl_model = Net(512, 4, checkpoint="/home/ubuntu/code/fengda/MaskTrackRCNN/tools/output_ddpg/2020-03-11 21:12:05/epoch0--0.09926817708735841/policy_net.pkl")
    # rl_model = rl_model.cuda()
    rl_model = None

    # args.load_result = "/home/ubuntu/datasets/YT-VIS/results/test-flow-trained-29-all/result_test.pkl"
    Test_video = False
    if not Test_video:
        if args.load_result:
            outputs = mmcv.load(args.out)
        else:
            data_loader = build_dataloader(
                dataset,
                imgs_per_gpu=1,
                workers_per_gpu=cfg.data.workers_per_gpu,
                num_gpus=1,
                dist=False,
                shuffle=False)
            outputs,stats = single_test(model, data_loader, args, rl_model=rl_model, show=args.show, save_path=args.save_path)

        if args.out:
            if not args.load_result:
                print('writing results to {}'.format(args.out))

                mmcv.dump(outputs, args.out)
            eval_types = args.eval
            if eval_types:
                print('Starting evaluate {}'.format(' and '.join(eval_types)))
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    results2json_videoseg(dataset, outputs, result_file)
                    ytvos_eval(result_file, eval_types, dataset.ytvos, show=True)
                else:
                    NotImplemented
    #     # test
    else:
        import pandas as pd
        from mmdet.models.decision_net.utils import modify_cfg, get_dataloader
        from mmdet.datasets.utils import get_dataset
        # val_videos = list(pd.read_csv('train.csv').video_name)
        val_videos = list(pd.read_csv('val.csv').video_name)
        # val_videos = ['0065b171f9', '01c76f0a82', '4083cfbe15']
        from tqdm import tqdm
        results = {}
        for video_name in tqdm(val_videos):
            # get data loader of the selected video
            cfg_test = modify_cfg(cfg, video_name)
            ann_file = cfg_test.ann_file
            # self.dataset = obj_from_dict(self.cfg_test, datasets, dict(test_mode=True))
            try:
                dataset = obj_from_dict(cfg_test, datasets, dict(test_mode=True))
                print('video name: {}.\t len of dataset:{}.'.format(video_name, len(dataset)))
                data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=0, num_gpus=1, dist=False, shuffle=False)

                outputs, stats = single_test(model, data_loader, args, args.show, save_path=os.path.join(args.save_path, video_name))
                # print(np.mean(stats))
            except:
                print(traceback.print_exc())
                continue
            # break
            # continue
            if args.out:
                if not args.load_result:
                    print('writing results to {}'.format(args.out))

                    mmcv.dump(outputs, args.out)
                eval_types = args.eval
                if eval_types:
                    print('Starting evaluate {}'.format(' and '.join(eval_types)))
                    if not isinstance(outputs[0], dict):
                        result_file = args.out + '.json'
                        results2json_videoseg(dataset, outputs, result_file)
                        stats = ytvos_eval(result_file, eval_types, dataset.ytvos, show=True)
                        results[video_name] = stats[0]
                    else:
                        NotImplemented
        '''save dict into json file'''
        import json
        with open(os.path.join(args.save_path, 'test-flow-trained-29.json'), 'w') as json_file:
            json.dump(results, json_file, ensure_ascii=False)
Пример #4
0
def test(model, rl_model, data_loader, args, epoch):
    print("================= Testing.=================")
    model.eval()
    results = []
    scales = []
    stats = []
    dataset = data_loader.dataset
    prog_bar = mmcv.ProgressBar(len(dataset))
    num = 0
    device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    last_full = dict()
    scale_facotrs = [1, 1 / 2, 1 / 3, 1 / 4]
    for i, data in enumerate(data_loader):
        is_first = data['img_meta'][0].data[0][0]['is_first']

        if is_first:
            # Full resolution if it is the first frame of the video.
            scale_facotr = 1
            num = 1

            def state_reset():
                """Reset Features for states."""
                feat_self = get_self_feat(model, data['img'][0])
                feat_diff = torch.zeros_like(feat_self)
                feat_FAR = torch.tensor([0.]).cuda()
                feat_history = torch.zeros([10]).cuda()

                return [feat_diff, feat_self, feat_FAR, feat_history]

            state = state_reset()
        else:
            action = rl_model(state).argmax()
            scale_facotr = scale_facotrs[action]

        if scale_facotr == 1:
            with torch.no_grad():
                # data = get_low_data(deepcopy(data), 1/4, size_divisor=32, is_train=False)
                result = model(return_loss=False, rescale=True, **data)
            last_full['img'] = data['img'][0]
            last_full['feat_map_last'] = result[2]
            last_full['feat_self_last'] = get_self_feat(model, data['img'][0])

        else:
            # resize data
            low_data = get_low_data(deepcopy(data), scale_facotr, size_divisor=32, is_train=False)
            with torch.no_grad():
                result = model(return_loss=False, rescale=True, key_frame=last_full, **low_data)

        def state_step(s, a):
            feat_self = get_self_feat(model, data['img'][0])
            feat_diff = get_diff_feat(last_full['feat_self_last'], feat_self)
            if a == 1:
                feat_FAR = torch.tensor([0.]).cuda()
            else:
                feat_FAR = s[2] + 0.05
                if feat_FAR > 1:
                    feat_FAR = torch.tensor([1.]).cuda()
            feat_history = torch.zeros([10]).cuda()
            return [feat_diff, feat_self, feat_FAR, feat_history]
        state = state_step(state, scale_facotr)

        scales.append(scale_facotr)
        result = result[0:2]
        results.append(result)

        batch_size = data['img'][0].size(0)
        for _ in range(batch_size):
            prog_bar.update()

    eval_types = ['segm']
    result_file = 'results.json'
    results2json_videoseg(dataset, results, result_file)
    stat = ytvos_eval(result_file, eval_types, dataset.ytvos, show=False)

    ec = [2.486862653249294, 0.8707167229351835, 0.7147069263013582, 0.6858342799104453, 0.6757044771429019]
    print("Count for 1 : ", scales.count(1))
    print("Count for 1/2 : ", scales.count(1/2))
    print("Count for 1/3 : ", scales.count(1/3))
    print("Count for 1/4 : ", scales.count(1/4))

    e = scales.count(1)*ec[1] + scales.count(1/2)*ec[2] + scales.count(1/3)*ec[3] + scales.count(1/4)*ec[4]
    print("Energy Consumption:{}".format(e))

    # Save model
    if args.save_model:
        # now = time.strftime("%Y-%m-%d %X",time.localtime())
        dir = os.path.join(args.output_folder, "epoch%d-EC%.3f-MAP%.3f" %(epoch, e, stat[0]))
        if not os.path.exists(dir):
            os.makedirs(dir)
        agent.save_model(dir)
    return results, stats