예제 #1
0
def main():
    parser = ArgumentParser(description='COCO Evaluation')
    parser.add_argument('result', help='result file path')
    parser.add_argument('--ann', help='annotation file path')
    parser.add_argument(
        '--types',
        type=str,
        nargs='+',
        choices=['proposal_fast', 'proposal', 'bbox', 'segm', 'keypoint'],
        default=['bbox'],
        help='result types')
    parser.add_argument(
        '--max-dets',
        type=int,
        nargs='+',
        default=[100, 300, 1000],
        help='proposal numbers, only used for recall evaluation')
    parser.add_argument('--LRPEval',
                        type=int,
                        default=1,
                        help='Whether or not to provide oLRP results')
    parser.add_argument('--LRPtau',
                        type=float,
                        default=0.5,
                        help='True Positive Validation Threshold for LRP')
    args = parser.parse_args()
    coco_eval(args.result, args.types, args.ann, args.max_dets, args.LRPEval,
              args.LRPtau)
예제 #2
0
def main():
    parser = ArgumentParser(description='COCO Evaluation')
    parser.add_argument('result', help='result file path')
    parser.add_argument('--ann', help='annotation file path')
    parser.add_argument('--in_detail',
                        type=int,
                        default=0,
                        help='whether or not use detail evaluation')
    parser.add_argument(
        '--types',
        type=str,
        nargs='+',
        choices=['proposal_fast', 'proposal', 'bbox', 'segm', 'keypoint'],
        default=['bbox'],
        help='result types')
    parser.add_argument(
        '--max-dets',
        type=int,
        nargs='+',
        default=[100, 300, 1000],
        help='proposal numbers, only used for recall evaluation')

    args = parser.parse_args()
    results = {m: args.result for m in args.types}
    coco_eval(results, args.types, args.ann, args.max_dets, args.in_detail)
예제 #3
0
def main():
    parser = ArgumentParser(description='COCO Evaluation')
    parser.add_argument(
        '--result',
        default=
        "/data/liphone/detcomp/mmdet-v2/tile/baseline_cut_1000x1000/data_mode=test+.bbox.json",
        help='result file path')
    parser.add_argument(
        '--ann',
        default=
        "/home/lifeng/undone-work/dataset/detection/tile/annotations/cut_1000x1000/cut_1000x1000_test.json",
        help='annotation file path')
    parser.add_argument(
        '--types',
        type=str,
        nargs='+',
        choices=['proposal_fast', 'proposal', 'bbox', 'segm', 'keypoint'],
        default=['bbox'],
        help='result types')
    parser.add_argument(
        '--max-dets',
        type=int,
        nargs='+',
        default=[100, 300, 1000],
        help='proposal numbers, only used for recall evaluation')
    parser.add_argument('--classwise',
                        default=True,
                        action='store_true',
                        help='whether eval class wise ap')
    args = parser.parse_args()
    coco_eval(args.result, args.types, args.ann, args.max_dets, args.classwise)
def main():
    parser = ArgumentParser(description='COCO Evaluation')
    parser.add_argument('result', help='result file path')
    parser.add_argument('--ann', help='annotation file path')
    parser.add_argument(
        '--types',
        type=str,
        nargs='+',
        choices=['proposal_fast', 'proposal', 'bbox', 'segm', 'keypoint'],
        default=['bbox'],
        help='result types')
    parser.add_argument(
        '--max-dets',
        type=int,
        nargs='+',
        default=[100, 300, 1000],
        help='proposal numbers, only used for recall evaluation')
    parser.add_argument('--classwise',
                        action='store_true',
                        help='whether eval class wise ap')
    parser.add_argument('--gzsi',
                        action='store_true',
                        help='whether eval class with gzsd setting')
    parser.add_argument('--num-seen-classes', type=int, default=48)
    args = parser.parse_args()
    coco_eval(args.result, args.types, args.ann, args.max_dets, args.classwise,
              args.gzsi, args.num_seen_classes)
예제 #5
0
def main():
    parser = ArgumentParser(description="COCO Evaluation")
    parser.add_argument("result", help="result file path")
    parser.add_argument("--ann", help="annotation file path")
    parser.add_argument(
        "--types",
        type=str,
        nargs="+",
        choices=[
            "proposal_fast",
            "proposal",
            "bbox",
            "segm",
            "keypoint",
        ],
        default=["bbox"],
        help="result types",
    )
    parser.add_argument(
        "--max-dets",
        type=int,
        nargs="+",
        default=[100, 300, 1000],
        help="proposal numbers, only used for recall evaluation",
    )
    args = parser.parse_args()
    coco_eval(args.result, args.types, args.ann, args.max_dets)
예제 #6
0
def main():
    parser = ArgumentParser(description='COCO Evaluation')
    parser.add_argument('result', help='result file path')
    parser.add_argument(
        '--ann',
        help='annotation file path',
        default=
        '/media/gzzn/Data/Datasets/ObjectDetection/COCO/annotations/instances_val2017.json'
    )
    parser.add_argument(
        '--types',
        type=str,
        nargs='+',
        choices=['proposal_fast', 'proposal', 'bbox', 'segm', 'keypoint'],
        default=['bbox'],
        help='result types')
    parser.add_argument(
        '--max-dets',
        type=int,
        nargs='+',
        default=[100, 300, 1000],
        help='proposal numbers, only used for recall evaluation')
    parser.add_argument('--class_wise',
                        action='store_true',
                        help='whether eval class wise ap')
    args = parser.parse_args()
    coco_eval(args.result, args.types, args.ann, args.max_dets,
              args.class_wise)
예제 #7
0
파일: test.py 프로젝트: zzmcdc/mmdetection
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_detector(cfg.model,
                               train_cfg=None,
                               test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader, args.show)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(detectors, model_args.pop('type'))
        outputs = parallel_test(model_type,
                                model_args,
                                args.checkpoint,
                                dataset,
                                _data_func,
                                range(args.gpus),
                                workers_per_gpu=args.proc_per_gpu)

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
            else:
                result_file = args.out + '.json'
                results2json(dataset, outputs, result_file)
            coco_eval(result_file, eval_types, dataset.coco)
예제 #8
0
파일: test.py 프로젝트: zxduan90/SA-SSD
def main():
    args = parse_args()

    # if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
    #     raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    cfg.model.pretrained = None

    dataset = utils.get_dataset(cfg.data.val)
    class_names = cfg.data.val.class_names
    if args.gpus == 1:
        model = build_detector(cfg.model,
                               train_cfg=None,
                               test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            1,
            cfg.data.workers_per_gpu,
            num_gpus=1,
            #collate_fn= cfg.data.collate_fn,
            shuffle=False,
            dist=False)
        outputs = single_test(model, data_loader, args.out, class_names)
    else:
        NotImplementedError
    # kitti evaluation
    gt_annos = kitti.get_label_annos(dataset.label_prefix, dataset.sample_ids)
    result = get_official_eval_result(gt_annos,
                                      outputs,
                                      current_classes=class_names)
    print(result)
    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    results2json(dataset, outputs, result_file)
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
예제 #9
0
def main():
    args = parse_args()

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)

    rank, _ = get_dist_info()
    if args.input and rank == 0:
        print('\n results is {}'.format(args.input))
        root_Path = '/'.join(args.input.split('/')[:-1])
        if not os.path.exists(root_Path):
            os.makedirs(root_Path)
        #mmcv.dump(outputs, args.out)
        outputs = mmcv.load(args.input)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if not args.is_coco:
                #  test VisDrone2019
                if args.eval == ['bbox']:
                    print("eval {}".format(args.eval))
                    test_dataset = cfg.data.test
                    eval_visdrone_det(cfg.work_dir, args.out, test_dataset,
                                      args.is_patch, args.show)
            else:
                if eval_types == ['proposal_fast']:
                    result_file = args.out
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    if not isinstance(outputs[0], dict):
                        result_file = args.out + '.json'
                        results2json(dataset, outputs, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
                    else:
                        for name in outputs[0]:
                            print('\nEvaluating {}'.format(name))
                            outputs_ = [out[name] for out in outputs]
                            result_file = args.out + '.{}.json'.format(name)
                            results2json(dataset, outputs_, result_file)
                            coco_eval(result_file, eval_types, dataset.coco)
예제 #10
0
파일: test.py 프로젝트: rhythm-A/mm
    def start_testing(self):

        if not self.distributed:
            self.model = MMDataParallel(self.model, device_ids=[0])
            outputs = self.__single_gpu_test()
        else:
            self.model = MMDistributedDataParallel(self.model.cuda())
            outputs = self.__multi_gpu_test()

        dir_name = os.path.dirname(self.out_pkl)
        if not os.path.exists(dir_name):
            os.makedirs(dir_name)

        rank, _ = get_dist_info()
        if self.out_pkl and rank == 0:
            print('\nwriting results to {}'.format(self.out_pkl))
            mmcv.dump(outputs, self.out_pkl)
            eval_types = self.eval_types
            if eval_types:
                print('Starting evaluate {}'.format(' and '.join(eval_types)))
                if eval_types == ['proposal_fast']:
                    result_file = self.out_pkl
                    coco_eval(result_file, eval_types, self.dataset.coco)
                else:
                    if not isinstance(outputs[0], dict):
                        result_files = results2json(self.dataset, outputs,
                                                    self.out_pkl)
                        coco_eval(result_files, eval_types, self.dataset.coco)
                    else:
                        for name in outputs[0]:
                            print('\nEvaluating {}'.format(name))
                            outputs_ = [out[name] for out in outputs]
                            result_file = self.out_pkl + '.{}'.format(name)
                            result_files = results2json(
                                self.dataset, outputs_, result_file)
                            coco_eval(result_files, eval_types,
                                      self.dataset.coco)

        # Save predictions in the COCO json format
        if self.out_json and rank == 0:
            if not isinstance(outputs[0], dict):
                results2json(self.dataset, outputs, self.out_json)
            else:
                for name in outputs[0]:
                    outputs_ = [out[name] for out in outputs]
                    result_file = self.out_json + '.{}'.format(name)
                    results2json(self.dataset, outputs_, result_file)
예제 #11
0
def main():
    args = parse_args()
    cfg = mmcv.Config.fromfile(args.config)
    if args.saved_preds is None:
        outputs = get_outputs(args)
    else:
        outputs = mmcv.load(args.saved_preds)

    dataset = build_dataset(cfg.data.test)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_files = results2json(dataset, outputs, args.out)
                    coco_eval(result_files, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}'.format(name)
                        result_files = results2json(dataset, outputs_,
                                                    result_file)
                        coco_eval(result_files, eval_types, dataset.coco)

    # Save predictions in the COCO json format
    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)
예제 #12
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = get_dataset(cfg.data.test)
    outputs = mmcv.load(args.out)
    eval_types = args.eval
    if eval_types:
        print('Starting evaluate {}'.format(' and '.join(eval_types)))
        if eval_types == ['proposal_fast']:
            result_file = args.out
            coco_eval(result_file, eval_types, dataset.coco)
        else:
            if not isinstance(outputs[0], dict):
                result_files = results2json(dataset, outputs, args.out)
                coco_eval(result_files, eval_types, dataset.coco)
            else:
                for name in outputs[0]:
                    print('\nEvaluating {}'.format(name))
                    outputs_ = [out[name] for out in outputs]
                    result_file = args.out + '.{}'.format(name)
                    result_files = results2json(dataset, outputs_, result_file)
                    coco_eval(result_files, eval_types, dataset.coco)
예제 #13
0
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    # cfg.data.test.test_mode = False

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    ## uncomment to only eval on first 100 imgs

    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)

    prefix = './mrcnn_r50_dual'
    # prefix = './mrcnn_r50_ag_cocolt'

    print(prefix)

    set0 = mmcv.load('./{}_set0.pkl'.format(prefix))
    set1 = mmcv.load('./{}_set1.pkl'.format(prefix))
    set2 = mmcv.load('./{}_set2.pkl'.format(prefix))
    set3 = mmcv.load('./{}_set3.pkl'.format(prefix))
    set4 = mmcv.load('./{}_set4.pkl'.format(prefix))
    set5 = mmcv.load('./{}_set5.pkl'.format(prefix))
    set6 = mmcv.load('./{}_set6.pkl'.format(prefix))
    set7 = mmcv.load('./{}_set7.pkl'.format(prefix))

    set_combine = set0 + set1 + set2 + set3 + set4 + set5 + set6 + set7

    print('start eval')
    if hasattr(dataset, 'coco'):
        result_files = results2json(dataset, set_combine, args.out, dump=False)
        coco_eval(result_files, args.eval, dataset.coco)

    elif hasattr(dataset, 'lvis'):
        result_files = results2json(dataset, set_combine, args.out, dump=False)
        lvis_eval(result_files, args.eval, dataset.lvis)
예제 #14
0
def main():
    args = parse_args()
    search_cfg = mmcv_config.fromfile(args.fb_cfg)
    _space = search_cfg.search_space
    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    # cfg = mmcv.Config.fromfile(args.config)
    cfg = mmcv.Config.fromfile(args.model_cfg)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = detection(mmcv_config(cfg['model_cfg']),
                          mmcv_config(cfg['train_cfg']),
                          mmcv_config(cfg['test_cfg']), _space, args.theta_txt)
        # model = build_detector(
        #     cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader, args.show)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(detectors, model_args.pop('type'))
        outputs = parallel_test(model_type,
                                model_args,
                                args.checkpoint,
                                dataset,
                                _data_func,
                                range(args.gpus),
                                workers_per_gpu=args.proc_per_gpu)

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    results2json(dataset, outputs, result_file)
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
예제 #15
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(
        (".pkl", ".pickle")
    ):
        raise ValueError("The output file must be a pkl file.")

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get("cudnn_benchmark", False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    dataset = obj_from_dict(
        cfg.data.test, datasets, dict(test_mode=True)
    )
    if args.gpus == 1:
        model = build_detector(
            cfg.model, train_cfg=None, test_cfg=cfg.test_cfg
        )
        load_checkpoint(model, args.checkpoint)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False,
        )
        outputs = single_test(model, data_loader, args.show)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(detectors, model_args.pop("type"))
        outputs = parallel_test(
            model_type,
            model_args,
            args.checkpoint,
            dataset,
            _data_func,
            range(args.gpus),
            workers_per_gpu=args.proc_per_gpu,
        )

    if args.out:
        print("writing results to {}".format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print(
                "Starting evaluate {}".format(
                    " and ".join(eval_types)
                )
            )
            if eval_types == ["proposal_fast"]:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_file = args.out + ".json"
                    results2json(dataset, outputs, result_file)
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print("\nEvaluating {}".format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + ".{}.json".format(
                            name
                        )
                        results2json(dataset, outputs_, result_file)
                        coco_eval(
                            result_file, eval_types, dataset.coco
                        )
예제 #16
0
def main():
    args = parse_args()

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    for t in cfg.data.test:
        t.test_mode = True
    cfg.out_path = args.out.split('.pkl')[0] if args.out else None

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the model and load checkpoint
    model = build_detector(
        cfg.model, train_cfg=None, test_cfg=cfg.test_cfg, global_cfg=cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    if args.det_ckpt is not None:
        print('Loading detection models...')
        det_ckpt = load_checkpoint(model, args.det_ckpt, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    if not type(cfg.data.test) == list:
        cfg.data.test = [cfg.data.test]

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
    else:
        model = MMDistributedDataParallel(model.cuda())

    outputs = dict()
    for c in cfg.data.test:
        dataset = build_dataset(c)
        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            dist=distributed,
            shuffle=False)

        if not distributed:
            results = single_gpu_test(model, data_loader, args.out, args.show)
            if results is not None:
                outputs.update(results)
        else:
            outputs.update(multi_gpu_test(model, data_loader, args.tmpdir))

    rank, _ = get_dist_info()
    if len(outputs.keys()) > 0 and args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        if not (args.out.endswith('.pkl') or args.out.endswith('.json')):
            args.out += '.pkl'
        if 'track_results' in outputs.keys():
            mmcv.dump(outputs['track_results'], args.out)
        else:
            mmcv.dump(outputs, args.out)
        if 'bbox_results' in outputs.keys():
            result_files = results2json(dataset, outputs['bbox_results'], args.out)
            coco_eval(result_files, ['bbox', 'segm'], cfg.data.test[0].ann_file)
        if 'segm_results' in outputs.keys():
            result_files = results2json(dataset, [(b, s) for b, s in zip(outputs['bbox_results'], outputs['segm_results'])], args.out)
            coco_eval(result_files, ['segm'], cfg.data.test[0].ann_file)
        # if 'new_bbox_results' in outputs.keys():
        #     # For tracking
        #     result_files = results2json(dataset, outputs['new_bbox_results'],
        #                                 args.out)
        #     coco_eval(result_files, ['bbox'], cfg.data.test[0].ann_file)
        if 'track_results' in outputs.keys():
            print("Evaluating box tracking...")
            mdat_eval(outputs['track_results'], dataset, args.out, cfg)
        if 'segm_track_results' in outputs.keys():
            print("Evaluating segmentation tracking...")
            mdat_eval(outputs['segm_track_results'], dataset, args.out, cfg, with_mask=True)
예제 #17
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True
    if args.ann_file is not None:
        cfg.data.test.ann_file = args.ann_file
    if args.img_prefix is not None:
        cfg.data.test.img_prefix = args.img_prefix
    if args.flip:
        cfg.data.test.flip_ratio = 1

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = get_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    load_checkpoint(model, args.checkpoint, map_location='cpu')

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, args.show)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    results2json(dataset, outputs, result_file)
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
예제 #18
0
def main():
    args = parse_args()

    if os.path.isdir(args.checkpoint):
        print(args.checkpoint)
        checkpoints = glob.glob(args.checkpoint + '/epoch_*.pth')
        checkpoints = sorted(
            checkpoints,
            key=lambda x: int(x.split('epoch_')[-1].split('.')[0]))
        print(checkpoints)
        if args.out is not None:
            if not os.path.exists(args.out):
                os.mkdir(args.out)
            elif os.path.isfile(args.out):
                raise ValueError('args.out must be a directory.')
        # Create TensorBoard writer for output checkpoint dir.
        tensorboard_writer = SummaryWriter(args.out)
    else:
        checkpoints = [args.checkpoint]
        tensorboard_writer = None
        if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
            raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    for i, checkpoint in enumerate(checkpoints):
        outpath = args.out
        if os.path.isdir(args.checkpoint):
            outpath = args.out + '/%d_out.pkl' % i

        if not os.path.exists(outpath):
            if args.gpus == 1:
                model = build_detector(cfg.model,
                                       train_cfg=None,
                                       test_cfg=cfg.test_cfg)
                load_checkpoint(model, checkpoint)
                model = MMDataParallel(model, device_ids=[0])

                data_loader = build_dataloader(
                    dataset,
                    imgs_per_gpu=1,
                    workers_per_gpu=cfg.data.workers_per_gpu,
                    num_gpus=1,
                    dist=False,
                    shuffle=False)
                outputs = single_test(model, data_loader, args.show)
            else:
                model_args = cfg.model.copy()
                model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
                model_type = getattr(detectors, model_args.pop('type'))
                outputs = parallel_test(model_type,
                                        model_args,
                                        checkpoint,
                                        dataset,
                                        _data_func,
                                        range(args.gpus),
                                        workers_per_gpu=args.proc_per_gpu)

        # TODO: Currently assume test set is same size as training set.
        num_iters = (i + 1) * len(dataset)
        if outpath:
            if os.path.exists(outpath):
                print('reading results from {}'.format(outpath))
                outputs = mmcv.load(outpath)
            else:
                print('writing results to {}'.format(outpath))
                mmcv.dump(outputs, outpath)
            eval_types = args.eval
            if eval_types:
                print('Starting evaluate {}'.format(' and '.join(eval_types)))
                if eval_types == ['proposal_fast']:
                    result_file = outpath
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    if not isinstance(outputs[0], dict):
                        result_file = outpath + '.json'
                        results2json(dataset, outputs, result_file)
                        results_dict = coco_eval(result_file, eval_types,
                                                 dataset.coco)
                        if tensorboard_writer:
                            for eval_type in eval_types:
                                out = capture_stdout(lambda: results_dict[
                                    eval_type].summarize())
                                for line in out.split('\n')[:-1]:
                                    parts = line.split('=')
                                    name, score = '='.join(parts[:-1]), float(
                                        parts[-1])
                                    tensorboard_writer.add_scalar(
                                        'eval/' + name, score, num_iters)
                    else:
                        for name in outputs[0]:
                            print('\nEvaluating {}'.format(name))
                            outputs_ = [out[name] for out in outputs]
                            result_file = outpath + '.{}.json'.format(name)
                            results2json(dataset, outputs_, result_file)
                            results_dict = coco_eval(result_file, eval_types,
                                                     dataset.coco)
                            if tensorboard_writer:
                                for eval_type in eval_types:
                                    out = capture_stdout(lambda: results_dict[
                                        eval_type].summarize())
예제 #19
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # VOCDataset(ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
    #           img_prefix=data_root + 'VOC2007/',
    #           img_scale=(300, 300),
    #           img_norm_cfg=img_norm_cfg,
    #           size_divisor=None,
    #           flip_ratio=0,
    #           with_mask=False,
    #           with_label=False,
    #           test_mode=True,
    #           resize_keep_ratio=False)
    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        # build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
        # SingleStageDetector(pretrained=..., backbone=..., neck=..., bbox_head=...,
        #                     train_cfg=None, test_cfg=...)

        # 首先要先注册 BACKBONES、 NECKS、 ROI_EXTRACTORS、 HEADS、 DETECTORS、
        # 然后 BACKBONES.register_module(class SSDVGG) @HEADS.register_module(class AnchorHead)
        #     @HEADS.register_module(class SSDHead)   @DETECTORS.register_module(class SingleStageDetector)
        # 最后 build_detector() 相当于SingleStageDetector(**args)
        model = build_detector(cfg.model,
                               train_cfg=None,
                               test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader, args.show)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(detectors, model_args.pop('type'))
        outputs = parallel_test(model_type,
                                model_args,
                                args.checkpoint,
                                dataset,
                                _data_func,
                                range(args.gpus),
                                workers_per_gpu=args.proc_per_gpu)

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    results2json(dataset, outputs, result_file)
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
예제 #20
0
def main():
    # import os
    # os.environ['CUDA_VISIBLE_DEVICES'] = '2'

    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    global work_dirs
    work_dirs = os.path.dirname(args.checkpoint)
    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        import torch.distributed as dist
        dist.init_process_group('gloo',
                                init_method='file:///tmp/somefile',
                                rank=0,
                                world_size=1)
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = get_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    if not distributed:
        # pass
        model = MMDataParallel(model, device_ids=[0])
        # model = MMDataParallel(model, device_ids=[3])
        outputs = single_gpu_test(model, data_loader, args.show, args.log_dir)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)

    rank, _ = get_dist_info()
    if args.out and rank == 0:

        print('\nwriting results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)

        # import pickle
        # F=open(r'/disk2/zzr/work_dirs/PANet_r50_isaid/epoch_12_test_on_val_opencv_remap_maxdets_1000.pkl','rb')
        # outputs = pickle.load(F)
        outputs = tran2obb_results(outputs)
        # outputs = tran2mix_results(outputs)
        # outputs = trans2ms_results(outputs)
        # outputs = trans2mix_results(outputs)
        # outputs = trans2mask_results(outputs)
        # outputs = trans2hbb_results(outputs)
        # outputs = trans2mask_score(outputs)
        # outputs = trans2mask_results_V2(outputs)
        # outputs = assembel_mask(outputs)
        # outputs = assembel_mask_V2(outputs)

        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    results2json(dataset, outputs, result_file)
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
예제 #21
0
파일: test.py 프로젝트: SIAnalytics/roas
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    imgs_per_gpu = cfg.data.test.pop('imgs_per_gpu', 1)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = get_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=imgs_per_gpu,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, args.show, args.log_dir)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if args.csv:
            csv_path = (args.out).replace('.pkl', '.csv')
            print('\nwriting results as csv to {}'.format(csv_path))
            convert_output_to_csv(dataset, outputs, csv_path)

        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    results2json(dataset, outputs, result_file)
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
예제 #22
0
import mmcv
from mmdet.core import coco_eval, results2json
from mmdet.datasets import build_dataset
import os

os.chdir('..')  # change dir to mmdetection root

# eval_types = ['bbox']
# eval_types = ['proposal']
eval_types = ['proposal', 'bbox']

config = 'configs/faster_rcnn_r50_fpn_1x_train.py'

result_files = 'checkpoints'
# result_files = 'results/2019-09-15_train/eval_epoch2.pkl.bbox.json'
result_files = 'results/2019-09-15_train/eval_epoch8.pkl.bbox.json'

# outputs = mmcv.load(result_files)[0]

cfg = mmcv.Config.fromfile(config)

dataset = build_dataset(cfg.data.test)

# result_files = results2json(dataset, outputs, args.out)
coco_eval(result_files, eval_types, dataset.coco)

print('Done!')
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    # cfg.data.test.test_mode = False

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    ## uncomment to only eval on first 100 imgs

    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)

    print('load model from {}'.format(cfg.load_from))
    # checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    checkpoint = load_checkpoint(model, cfg.load_from, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    # set0 = mmcv.load('../liyu_mmdet/set0.pkl')
    # set1 = mmcv.load('../liyu_mmdet/set1.pkl')
    # set2 = mmcv.load('../liyu_mmdet/set2.pkl')
    # set3 = mmcv.load('../liyu_mmdet/set3.pkl')
    # set4 = mmcv.load('../liyu_mmdet/set4.pkl')
    # set5 = mmcv.load('../liyu_mmdet/set5.pkl')
    # set6 = mmcv.load('../liyu_mmdet/set6.pkl')
    # set7 = mmcv.load('../liyu_mmdet/set7.pkl')
    # set0 = mmcv.load('./set0.pkl')
    # set1 = mmcv.load('./set1.pkl')
    # set2 = mmcv.load('./set2.pkl')
    # set3 = mmcv.load('./set3.pkl')
    # set4 = mmcv.load('./set4.pkl')
    # set5 = mmcv.load('./set5.pkl')
    # set6 = mmcv.load('./set6.pkl')
    # set7 = mmcv.load('./set7.pkl')
    # set_combine = set0+set1+set2+set3+set4+set5+set6+set7
    # prefix = 'mrcnnr50_14.3_clshead'
    # set0 = mmcv.load('./{}_set0.pkl'.format(prefix))
    # set1 = mmcv.load('./{}_set1.pkl'.format(prefix))
    # set2 = mmcv.load('./{}_set2.pkl'.format(prefix))
    # set3 = mmcv.load('./{}_set3.pkl'.format(prefix))
    # set_combine = set0+set1+set2+set3

    # prefix = '/mrcnnr50_ag_coco_clshead'
    prefix = 'mrcnnr50_ag_3fc_ft_cocolongtail_cat400_epoch_2'
    prefix = 'mrcnn_r50_ag_cocolt'
    print(prefix)

    set0 = mmcv.load('./{}_set0.pkl'.format(prefix))
    set1 = mmcv.load('./{}_set1.pkl'.format(prefix))
    set2 = mmcv.load('./{}_set2.pkl'.format(prefix))
    set3 = mmcv.load('./{}_set3.pkl'.format(prefix))
    set4 = mmcv.load('./{}_set4.pkl'.format(prefix))
    set5 = mmcv.load('./{}_set5.pkl'.format(prefix))
    set6 = mmcv.load('./{}_set6.pkl'.format(prefix))
    set7 = mmcv.load('./{}_set7.pkl'.format(prefix))

    # set0 = mmcv.load('./set0.pkl')
    # set1 = mmcv.load('./set1.pkl')
    # set2 = mmcv.load('./set2.pkl')
    # set3 = mmcv.load('./set3.pkl')
    # set4 = mmcv.load('./set4.pkl')
    # set5 = mmcv.load('./set5.pkl')
    # set6 = mmcv.load('./set6.pkl')
    # set7 = mmcv.load('./set7.pkl')
    set_combine = set0 + set1 + set2 + set3 + set4 + set5 + set6 + set7

    # set_liyu = mmcv.load('../mmdet_ensemble/results319.pkl')

    # mmcv.dump(set_combine, args.out)
    # result_files = results2json(dataset, set_combine,
    #                             args.out)
    print('pkl result dumped, start eval')
    # result_files = results2json(dataset, set_combine,
    #                             args.out, dump=False)
    #
    # lvis_eval(result_files, args.eval, dataset.lvis)

    result_files = results2json(dataset, set_combine, args.out, dump=False)
    coco_eval(result_files, args.eval, dataset.coco)
예제 #24
0
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)

    while not osp.isfile(args.checkpoint):
        print('Waiting for {} to exist...'.format(args.checkpoint))
        time.sleep(60)

    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    # assert not distributed
    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        # data_loader.dataset.img_infos = data_loader.dataset.img_infos[:10]
        outputs = single_gpu_test(model, data_loader)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    if dataset.ann_file == 'data/coco/annotations/image_info_test-dev2017.json':
                        result_files = results2json_segm(dataset,
                                                         outputs,
                                                         args.out,
                                                         dump=True)
                    else:
                        result_files = results2json_segm(dataset,
                                                         outputs,
                                                         args.out,
                                                         dump=False)
                    if 'lvis' in dataset.ann_file:  ## an ugly fix to make it compatible with coco eval
                        from lvis import LVISEval
                        lvisEval = LVISEval(cfg.data.test.ann_file,
                                            result_files, 'segm')
                        lvisEval.run()
                        lvisEval.print_results()
                        #fix lvis api eval iou_thr error, should be 0.9 but was 0.8999
                        lvisEval.params.iou_thrs[8] = 0.9
                        for iou in [0.5, 0.6, 0.7, 0.8, 0.9]:
                            print('AP at iou {}: {}'.format(
                                iou, lvisEval._summarize('ap', iou_thr=iou)))
                    else:
                        coco_eval(result_files, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}'.format(name)
                        result_files = results2json(dataset,
                                                    outputs_,
                                                    result_file,
                                                    dump=False)
                        coco_eval(result_files, eval_types, dataset.coco)

        ##eval on lvis-77######
        cfg.data.test.ann_file = 'data/lvis/lvis_v0.5_val_cocofied.json'
        cfg.data.test.img_prefix = 'data/lvis/val2017/'
        cfg.data.test.test_mode = True
        dataset = build_dataset(cfg.data.test)
        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            dist=False,
            shuffle=False)
        # model_orig=model.module
        # model = MMDataParallel(model, device_ids=[0]).cuda()
        # data_loader.dataset.img_infos = data_loader.dataset.img_infos[:10]
        outputs = single_gpu_test(model, data_loader)

        print('\nwriting results to {}'.format('xxx'))
        # mmcv.dump(outputs, 'xxx')
        eval_types = ['segm']
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = 'xxx'
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_files = results2json_segm(dataset,
                                                     outputs,
                                                     'xxx',
                                                     dump=False)
                    from lvis import LVISEval
                    lvisEval = LVISEval(
                        'data/lvis/lvis_v0.5_val_cocofied.json', result_files,
                        'segm')
                    lvisEval.run()
                    lvisEval.print_results()
                    # fix lvis api eval iou_thr error, should be 0.9 but was 0.8999
                    lvisEval.params.iou_thrs[8] = 0.9
                    for iou in [0.5, 0.6, 0.7, 0.8, 0.9]:
                        print('AP at iou {}: {}'.format(
                            iou, lvisEval._summarize('ap', iou_thr=iou)))
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = 'xxx' + '.{}'.format(name)
                        result_files = results2json(dataset,
                                                    outputs_,
                                                    result_file,
                                                    dump=False)
                        coco_eval(result_files, eval_types, dataset.coco)

    # Save predictions in the COCO json format
    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)
예제 #25
0
def _non_dist_train(model,
                    dataset,
                    cfg,
                    validate=False,
                    logger=None,
                    timestamp=None):
    if validate:
        raise NotImplementedError('Built-in validation is not implemented '
                                  'yet in not-distributed training. Use '
                                  'distributed training or test.py and '
                                  '*eval.py scripts instead.')
    # prepare data loaders
    dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
    data_loaders = [
        build_dataloader(
            ds,
            cfg.data.imgs_per_gpu,
            cfg.data.workers_per_gpu,
            cfg.gpus,
            dist=False) for ds in dataset
    ]
    # put model on gpus
    model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()

    # # if model.module.bbox_head.freeze_solov2_and_train_combonly:
    # if model.module.bbox_head.optimize_list is not None:
    #     for (key, param) in model.named_parameters():
    #         # if 'kernel_convs_convcomb' not in key and 'context_fusion_convs' not in key and 'learned_weight' not in key:
    #         if not any(s in key for s in model.module.bbox_head.optimize_list):
    #             param.requires_grad=False
    #         else:
    #             # print('optimize {}'.format(key))
    #             logger.info('optimize {}'.format(key))

    # build runner
    optimizer = build_optimizer(model, cfg.optimizer)
    runner = Runner(
        model, batch_processor, optimizer, cfg.work_dir, logger=logger)
    # an ugly walkaround to make the .log and .log.json filenames the same
    runner.timestamp = timestamp
    # fp16 setting
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        optimizer_config = Fp16OptimizerHook(
            **cfg.optimizer_config, **fp16_cfg, distributed=False)
    else:
        optimizer_config = cfg.optimizer_config
    runner.register_training_hooks(cfg.lr_config, optimizer_config,
                                   cfg.checkpoint_config, cfg.log_config)

    if cfg.resume_from:
        runner.resume(cfg.resume_from)
    elif cfg.load_from:
        runner.load_checkpoint(cfg.load_from)
    runner.run(data_loaders, cfg.workflow, cfg.total_epochs)

    ## add test after training
    if cfg.data.test.ann_file != 'data/lvis/lvis_v0.5_val_lvis_freqset.json': # if val set is lvis freq, only eval on lvis-freq val set
        cfg.data.test.test_mode = True
        dataset = build_dataset(cfg.data.test)
        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            dist=False,
            shuffle=False)
        model_orig=model.module
        model = MMDataParallel(model, device_ids=[0]).cuda()
        data_loader.dataset.img_infos = data_loader.dataset.img_infos[:100]
        outputs = single_gpu_test(model, data_loader)

        print('\nwriting results to {}'.format('xxx'))
        # mmcv.dump(outputs, 'xxx')
        eval_types = ['segm']
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = 'xxx'
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_files = results2json_segm(dataset, outputs, 'xxx', dump=False)
                    coco_eval(result_files, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = 'xxx' + '.{}'.format(name)
                        result_files = results2json(dataset, outputs_,
                                                    result_file, dump=False)
                        coco_eval(result_files, eval_types, dataset.coco)

        ##eval on lvis-77######
        cfg.data.test.ann_file = 'data/lvis/lvis_v0.5_val_cocofied.json'
        cfg.data.test.img_prefix = 'data/lvis/val2017/'
        cfg.data.test.test_mode = True
        dataset = build_dataset(cfg.data.test)
        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            dist=False,
            shuffle=False)
        # model_orig=model.module
        # model = MMDataParallel(model, device_ids=[0]).cuda()
        data_loader.dataset.img_infos = data_loader.dataset.img_infos[:100]
        outputs = single_gpu_test(model, data_loader)

        print('\nwriting results to {}'.format('xxx'))
        # mmcv.dump(outputs, 'xxx')
        eval_types = ['segm']
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = 'xxx'
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_files = results2json_segm(dataset, outputs, 'xxx', dump=False)
                    from lvis import LVISEval
                    lvisEval = LVISEval('data/lvis/lvis_v0.5_val_cocofied.json', result_files, 'segm')
                    lvisEval.run()
                    lvisEval.print_results()
                    # fix lvis api eval iou_thr error, should be 0.9 but was 0.8999
                    lvisEval.params.iou_thrs[8] = 0.9
                    for iou in [0.5, 0.6, 0.7, 0.8, 0.9]:
                        print('AP at iou {}: {}'.format(iou, lvisEval._summarize('ap', iou_thr=iou)))
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = 'xxx' + '.{}'.format(name)
                        result_files = results2json(dataset, outputs_,
                                                    result_file, dump=False)
                        coco_eval(result_files, eval_types, dataset.coco)
    else:
        ##eval on lvis-freq######
        cfg.data.test.test_mode = True
        dataset = build_dataset(cfg.data.test)
        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            dist=False,
            shuffle=False)
        # model_orig=model.module
        # model = MMDataParallel(model, device_ids=[0]).cuda()
        data_loader.dataset.img_infos = data_loader.dataset.img_infos[:100]
        outputs = single_gpu_test(model, data_loader)

        print('\nwriting results to {}'.format('xxx'))
        # mmcv.dump(outputs, 'xxx')
        eval_types = ['segm']
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = 'xxx'
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_files = results2json_segm(dataset, outputs, 'xxx', dump=False)
                    from lvis import LVISEval
                    lvisEval = LVISEval(cfg.data.test.ann_file, result_files, 'segm')
                    lvisEval.run()
                    lvisEval.print_results()
                    # fix lvis api eval iou_thr error, should be 0.9 but was 0.8999
                    lvisEval.params.iou_thrs[8] = 0.9
                    for iou in [0.5, 0.6, 0.7, 0.8, 0.9]:
                        print('AP at iou {}: {}'.format(iou, lvisEval._summarize('ap', iou_thr=iou)))
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = 'xxx' + '.{}'.format(name)
                        result_files = results2json(dataset, outputs_,
                                                    result_file, dump=False)
                        coco_eval(result_files, eval_types, dataset.coco)
예제 #26
0
def main():
    args = parse_args()
    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')
    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True
    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)
    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset_type = 'OIDSegDataset'
    data_root = 'gs://oid2019/data/'
    img_norm_cfg = dict(mean=[123.675, 116.28, 103.53],
                        std=[58.395, 57.12, 57.375],
                        to_rgb=True)
    dataset = get_dataset(
        dict(type=dataset_type,
             ann_file='/home/bo_liu/' + args.ann_file,
             img_prefix=data_root +
             ('val/'
              if args.ann_file == 'seg_val_2844_ann.pkl' else 'OD_test/'),
             img_scale=(1333, 800),
             img_norm_cfg=img_norm_cfg,
             size_divisor=32,
             flip_ratio=0,
             with_mask=True,
             with_label=False,
             test_mode=True))

    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)
    # build the model and load checkpoint
    test_cfg = mmcv.ConfigDict(
        dict(
            rpn=dict(nms_across_levels=False,
                     nms_pre=1000,
                     nms_post=1000,
                     max_num=1000,
                     nms_thr=0.7,
                     min_bbox_size=0),
            rcnn=dict(
                score_thr=args.thres,
                # score_thr=0.0,
                nms=dict(type=args.nms_type, iou_thr=0.5),
                max_per_img=args.max_per_img,
                mask_thr_binary=0.5),
            keep_all_stages=False))
    model = build_detector(cfg.model, train_cfg=None, test_cfg=test_cfg)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES
    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, args.show)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)
    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    results2json(dataset, outputs, result_file)
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('Evaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
예제 #27
0
def main(args):
    if args.model.endswith('.onnx'):
        backend = 'onnx'
    elif args.model.endswith('.xml'):
        backend = 'openvino'
    else:
        raise ValueError('Unknown model type.')

    cfg = mmcv.Config.fromfile(args.config)
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    if backend == 'openvino':
        assert cfg.data.test.pipeline[1]['type'] == 'MultiScaleFlipAug'
        normalize_idx = [i for i, v in enumerate(cfg.data.test.pipeline[1]['transforms']) if v['type'] == 'Normalize'][0]
        cfg.data.test.pipeline[1]['transforms'][normalize_idx]['mean'] = [0.0, 0.0, 0.0]
        cfg.data.test.pipeline[1]['transforms'][normalize_idx]['std'] = [1.0, 1.0, 1.0]
        cfg.data.test.pipeline[1]['transforms'][normalize_idx]['to_rgb'] = False
        print(cfg.data.test)

    if args.video is not None and args.show:
        dataset = VideoDataset(int(args.video), cfg.data)
        data_loader = iter(dataset)
        wait_key = 1
    else:
        dataset = build_dataset(cfg.data.test)
        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            dist=False,
            shuffle=False)
        wait_key = -1

    # Valid classes + background.
    classes_num = len(dataset.CLASSES) + 1

    if backend == 'openvino':
        from mmdet.utils.deployment import DetectorOpenVINO
        model = DetectorOpenVINO(args.model,
                                 args.model[:-3] + 'bin',
                                 mapping_file_path=args.model[:-3] + 'mapping',
                                 cfg=cfg,
                                 classes=dataset.CLASSES)
    else:
        from mmdet.utils.deployment import ModelONNXRuntime
        model = ModelONNXRuntime(args.model, cfg=cfg, classes=dataset.CLASSES)

    results = []
    prog_bar = mmcv.ProgressBar(len(dataset))
    for i, data in enumerate(data_loader):
        im_data = data['img'][0].cpu().numpy()
        try:
            result = model(im_data)
            result = postprocess(
                result,
                data['img_meta'][0].data[0],
                num_classes=classes_num,
                rescale=not args.show)
        except Exception as ex:
            print('\nException raised while processing item {}:'.format(i))
            print(ex)
            result = empty_result(
                num_classes=classes_num,
                with_mask=model.pt_model.with_mask)
        results.append(result)

        if args.show:
            model.show(data, result, score_thr=args.score_thr, wait_time=wait_key)

        batch_size = data['img'][0].size(0)
        for _ in range(batch_size):
            prog_bar.update()

    print('')
    print('Writing results to {}'.format(args.out))
    mmcv.dump(results, args.out)

    eval_types = args.eval
    if eval_types:
        print('Starting evaluate {}'.format(' and '.join(eval_types)))
        if eval_types == ['proposal_fast']:
            result_file = args.out
            coco_eval(result_file, eval_types, dataset.coco)
        else:
            if not isinstance(results[0], dict):
                result_files = results2json(dataset, results, args.out)
                coco_eval(result_files, eval_types, dataset.coco)
            else:
                for name in results[0]:
                    print('\nEvaluating {}'.format(name))
                    outputs_ = [out[name] for out in results]
                    result_file = args.out + '.{}'.format(name)
                    result_files = results2json(dataset, outputs_,
                                                result_file)
                    coco_eval(result_files, eval_types, dataset.coco)

    # Save predictions in the COCO json format
    if args.json_out:
        if not isinstance(results[0], dict):
            results2json(dataset, results, args.json_out)
        else:
            for name in results[0]:
                outputs_ = [out[name] for out in results]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)
예제 #28
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_detector(
            cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader, args.show)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(detectors, model_args.pop('type'))
        outputs = parallel_test(
            model_type,
            model_args,
            args.checkpoint,
            dataset,
            _data_func,
            range(args.gpus),
            workers_per_gpu=args.proc_per_gpu)
    
    # run evaluation
    import zipfile
    from mmdet.core.evaluation.icdar_evaluation import icdar_eval
    import os

    pt_zip_dir = os.path.join('../output', 'pt.zip')
    output_pt_dir = os.path.join('../output', 'pt/')
    z = zipfile.ZipFile(pt_zip_dir, 'w', zipfile.ZIP_DEFLATED)

    for dirpath, dirnames, filenames in os.walk(output_pt_dir):
        for filename in filenames:
            z.write(os.path.join(dirpath, filename), filename)
    z.close()

    #3 use icdar eval
    if args.dataset=='icdar2015':
        gt_zip_dir = './work_dirs/gt_ic15.zip'
    elif args.dataset=='icdar2013':
        gt_zip_dir = './work_dirs/gt_ic13.zip'
    elif args.dataset=='td500':
        gt_zip_dir = './work_dirs/gt_td500.zip'
    param_dict = dict(
        # gt zip file path
        g = gt_zip_dir,
        # prediction zip file path
        s = pt_zip_dir,
    )
    result_dict = icdar_eval(param_dict)
    
    print(result_dict)
    for i in range(6):
        print('')

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    results2json(dataset, outputs, result_file)
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
예제 #29
0
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]
    
    # print('args.config:',args.config) #config file로 가야 됨()
    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(
        dataset,
        imgs_per_gpu=1,
        workers_per_gpu=cfg.data.workers_per_gpu,
        dist=distributed,
        shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, args.show)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_files = results2json(dataset, outputs, args.out)
                    coco_eval(result_files, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}'.format(name)
                        result_files = results2json(dataset, outputs_,
                                                    result_file)
                        coco_eval(result_files, eval_types, dataset.coco)

    # Save predictions in the COCO json format
    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)
예제 #30
0
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    # cfg.data.test.test_mode = False

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    ## uncomment to only eval on first 100 imgs
    dataset.img_infos = dataset.img_infos[:20]

    data_loader = build_dataloader(
        dataset,
        imgs_per_gpu=1,
        workers_per_gpu=cfg.data.workers_per_gpu,
        dist=distributed,
        shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    print('load model from {}'.format(cfg.load_from))
    checkpoint = load_checkpoint(model, cfg.load_from, map_location='cpu')

    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES


    def load_ckpt(ncm_model, cal_head):
        print('load cls head {}'.format('{}/{}.pth'.format(cfg.work_dir, cal_head)))
        # epoch = torch.load('{}/{}_epoch.pth'.format(cfg.work_dir, cal_head))
        load_checkpoint(ncm_model, '{}/{}.pth'.format(cfg.work_dir, cal_head))
        # return epoch

    print('use {}'.format(args.cal_head))
    if len(dataset.CLASSES) == 1230:##lvis
        if '2fc_rand' in args.cal_head:
            calibrated_head = simple2fc().cuda()
        elif '3fc_rand' in args.cal_head or '3fc_ft' in args.cal_head:
            calibrated_head = simple3fc().cuda()
    elif len(dataset.CLASSES) ==80:## coco
        if '2fc_rand' in args.cal_head:
            calibrated_head = simple2fc(num_classes=81).cuda()
        elif '3fc_rand' in args.cal_head or '3fc_ft' in args.cal_head:
            calibrated_head = simple3fc(num_classes=81).cuda()
    # epoch = load_ckpt(calibrated_head, args.head_ckpt)
    load_ckpt(calibrated_head, args.head_ckpt)
    calibrated_head.eval()


    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, calibrated_head, args.show)
    else:
        model = MMDistributedDataParallel(model.cuda())
        calibrated_head = MMDistributedDataParallel(calibrated_head.cuda())
        outputs = multi_gpu_test(model, data_loader, calibrated_head, args.show, args.tmpdir)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        # mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if len(dataset.CLASSES) == 1230:
            if eval_types:

                if eval_types == ['proposal_fast']:
                    result_file = args.out
                    lvis_eval(result_file, eval_types, dataset.coco)
                else:
                    if not isinstance(outputs[0], dict):
                        result_files = results2json(dataset, outputs, args.out, dump=False)
                        print('Starting evaluate {}'.format(' and '.join(eval_types)))
                        lvis_eval(result_files, eval_types, dataset.lvis)
                    else:
                        for name in outputs[0]:
                            print('\nEvaluating {}'.format(name))
                            outputs_ = [out[name] for out in outputs]
                            result_file = args.out + '.{}'.format(name)
                            result_files = results2json(dataset, outputs_,
                                                        result_file)
                            lvis_eval(result_files, eval_types, dataset.coco)
        elif len(dataset.CLASSES) == 80:
            result_files = results2json(dataset, outputs, args.out, dump=False)
            coco_eval(result_files, args.eval, dataset.coco)

    # Save predictions in the COCO json format
    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)