Ejemplo n.º 1
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_detector(cfg.model,
                               train_cfg=None,
                               test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader, args.show)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(detectors, model_args.pop('type'))
        outputs = parallel_test(model_type,
                                model_args,
                                args.checkpoint,
                                dataset,
                                _data_func,
                                range(args.gpus),
                                workers_per_gpu=args.proc_per_gpu)

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    results2json(dataset, outputs, result_file)
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
Ejemplo n.º 2
0
def main():
    args = parse_args()

    cfg = mmcv.Config.fromfile(args.config)
    mmcv.mkdir_or_exist(os.path.join(cfg.work_dir, 'results'))

    print(args)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    if args.val:
        print('now is val mode, with label')
        imgs_path = 'data/visdrone2019/visdrone2019_val_data'
        dataset = obj_from_dict(cfg.data.val, datasets, dict(test_mode=True))
    else:
        print('now is test mode, no label')
        imgs_path = 'data/visdrone2019/visdrone2019-test_data'
        dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_detector(
            cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader, args.show)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(detectors, model_args.pop('type'))
        outputs = parallel_test(
            model_type,
            model_args,
            args.checkpoint,
            dataset,
            _data_func,
            range(args.gpus),
            workers_per_gpu=args.proc_per_gpu)

    if args.out:
        print('writing results to {}'.format(args.out))
        write_result_txt(args.out, outputs, imgs_path)
        if args.val:  # eval
            visdrone_eval(args.out, dataset.gtPath, dataset.ann_file)
Ejemplo n.º 3
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_detector(cfg.model,
                               train_cfg=None,
                               test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint, strict=True)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(detectors, model_args.pop('type'))
        outputs = parallel_test(model_type,
                                model_args,
                                args.checkpoint,
                                dataset,
                                _data_func,
                                range(args.gpus),
                                workers_per_gpu=args.proc_per_gpu)

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)

    eval_type = args.eval
    if eval_type:
        print('Starting evaluate {}'.format(eval_type))

        result_file = osp.join(args.out + '.csv')
        results2csv(dataset, outputs, result_file)

        ava_eval(result_file, eval_type, args.label_file, args.ann_file,
                 args.exclude_file)
Ejemplo n.º 4
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    if cfg.data.test.oversample == 'three_crop':
        cfg.model.spatial_temporal_module.spatial_size = 8

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_recognizer(cfg.model,
                                 train_cfg=None,
                                 test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint, strict=True)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(recognizers, model_args.pop('type'))
        outputs = parallel_test(model_type,
                                model_args,
                                args.checkpoint,
                                dataset,
                                _data_func,
                                range(args.gpus),
                                workers_per_gpu=args.proc_per_gpu)

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        print(outputs)
    """
Ejemplo n.º 5
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_detector(cfg.model,
                               train_cfg=None,
                               test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader, args.show)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(detectors, model_args.pop('type'))
        outputs = parallel_test(model_type,
                                model_args,
                                args.checkpoint,
                                dataset,
                                _data_func,
                                range(args.gpus),
                                workers_per_gpu=args.proc_per_gpu)

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        cfg = mmcv.Config.fromfile(args.config)
        test_dataset = mmcv.runner.obj_from_dict(cfg.data.test, datasets)
        voc_eval(args.out, test_dataset, args.iou_thr)
Ejemplo n.º 6
0
def main():
    args = parse_args()

    cfg = mmcv.Config.fromfile(args.config)
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True
    # change nms_iou
    if args.nms_iou != 0:
        cfg.test_cfg['rcnn']['nms']['iou_thr'] = args.nms_iou
    if args.max_per_img != 0:
        cfg.test_cfg['rcnn']['max_per_img'] = args.max_per_img

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader, args.gtdir, args.outdir, cfg.label_vocab, cfg.kitti_ap_hook_cfg['eval_cpg_path'])
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(detectors, model_args.pop('type'))
        outputs = parallel_test(
            model_type,
            model_args,
            args.checkpoint,
            dataset,
            _data_func,
            range(args.gpus),
            workers_per_gpu=args.proc_per_gpu)
Ejemplo n.º 7
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_detector(cfg.model,
                               train_cfg=None,
                               test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader, args.show)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(detectors, model_args.pop('type'))
        outputs = parallel_test(model_type,
                                model_args,
                                args.checkpoint,
                                dataset,
                                _data_func,
                                range(args.gpus),
                                workers_per_gpu=args.proc_per_gpu)
Ejemplo n.º 8
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    if cfg.data.test.oversample == 'three_crop':
        cfg.model.spatial_temporal_module.spatial_size = 8

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_recognizer(cfg.model,
                                 train_cfg=None,
                                 test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint, strict=True)
        model = MMDataParallel(model, device_ids=[1])

        params = list(model.parameters())
        weight_softmax = np.squeeze(params[-2].data.cpu().numpy(
        ))  # fully conneted layer parameters to numpy already

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs, inputs = single_test(model, data_loader)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(recognizers, model_args.pop('type'))
        outputs = parallel_test(model_type,
                                model_args,
                                args.checkpoint,
                                dataset,
                                _data_func,
                                range(args.gpus),
                                workers_per_gpu=args.proc_per_gpu)

    #print(len(features_blobs))
    #print(features_blobs[0].size())

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)

    num_videos = len(outputs)
    class_name = 'YoYo'
    os.mkdir('data/CAM_imgs/' + class_name)

    for k in range(0, num_videos):
        os.mkdir('data/CAM_imgs/' + class_name + '/CAMs_{:02d}'.format(k))
        idx = get_top_5_index("tools/results.pkl",
                              k)  # change the dir of results.pkl to tools/
        conv_feat = pickle.load(open(
            "tools/hook_features/feat_{:02d}.pkl".format(k), 'rb'),
                                encoding='utf-8')
        conv_feat = conv_feat.cpu().numpy()
        CAMs = returnCAM(
            conv_feat, weight_softmax,
            [idx[0]
             ])  # generate class activation mapping for the top1 prediction
        single_input = inputs[k].numpy()
        writeCAMs(class_name, CAMs, single_input, k)

    gt_labels = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        gt_labels.append(ann['label'])

    if args.use_softmax:
        print("Averaging score over {} clips with softmax".format(
            outputs[0].shape[0]))
        results = [softmax(res, dim=1).mean(axis=0) for res in outputs]
    else:
        print("Averaging score over {} clips without softmax (ie, raw)".format(
            outputs[0].shape[0]))
        results = [res.mean(axis=0) for res in outputs]
    top1, top5 = top_k_accuracy(results, gt_labels, k=(1, 5))
    mean_acc = mean_class_accuracy(results, gt_labels)
    print("Mean Class Accuracy = {:.02f}".format(mean_acc * 100))
    print("Top-1 Accuracy = {:.02f}".format(top1 * 100))
    print("Top-5 Accuracy = {:.02f}".format(top5 * 100))
Ejemplo n.º 9
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_detector(
            cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader, args.show)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(detectors, model_args.pop('type'))
        outputs = parallel_test(
            model_type,
            model_args,
            args.checkpoint,
            dataset,
            _data_func,
            range(args.gpus),
            workers_per_gpu=args.proc_per_gpu)
    
    # run evaluation
    import zipfile
    from mmdet.core.evaluation.icdar_evaluation import icdar_eval
    import os

    pt_zip_dir = os.path.join('../output', 'pt.zip')
    output_pt_dir = os.path.join('../output', 'pt/')
    z = zipfile.ZipFile(pt_zip_dir, 'w', zipfile.ZIP_DEFLATED)

    for dirpath, dirnames, filenames in os.walk(output_pt_dir):
        for filename in filenames:
            z.write(os.path.join(dirpath, filename), filename)
    z.close()

    #3 use icdar eval
    if args.dataset=='icdar2015':
        gt_zip_dir = './work_dirs/gt_ic15.zip'
    elif args.dataset=='icdar2013':
        gt_zip_dir = './work_dirs/gt_ic13.zip'
    elif args.dataset=='td500':
        gt_zip_dir = './work_dirs/gt_td500.zip'
    param_dict = dict(
        # gt zip file path
        g = gt_zip_dir,
        # prediction zip file path
        s = pt_zip_dir,
    )
    result_dict = icdar_eval(param_dict)
    
    print(result_dict)
    for i in range(6):
        print('')

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    results2json(dataset, outputs, result_file)
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
Ejemplo n.º 10
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # VOCDataset(ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
    #           img_prefix=data_root + 'VOC2007/',
    #           img_scale=(300, 300),
    #           img_norm_cfg=img_norm_cfg,
    #           size_divisor=None,
    #           flip_ratio=0,
    #           with_mask=False,
    #           with_label=False,
    #           test_mode=True,
    #           resize_keep_ratio=False)
    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        # build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
        # SingleStageDetector(pretrained=..., backbone=..., neck=..., bbox_head=...,
        #                     train_cfg=None, test_cfg=...)

        # 首先要先注册 BACKBONES、 NECKS、 ROI_EXTRACTORS、 HEADS、 DETECTORS、
        # 然后 BACKBONES.register_module(class SSDVGG) @HEADS.register_module(class AnchorHead)
        #     @HEADS.register_module(class SSDHead)   @DETECTORS.register_module(class SingleStageDetector)
        # 最后 build_detector() 相当于SingleStageDetector(**args)
        model = build_detector(cfg.model,
                               train_cfg=None,
                               test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader, args.show)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(detectors, model_args.pop('type'))
        outputs = parallel_test(model_type,
                                model_args,
                                args.checkpoint,
                                dataset,
                                _data_func,
                                range(args.gpus),
                                workers_per_gpu=args.proc_per_gpu)

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    results2json(dataset, outputs, result_file)
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
Ejemplo n.º 11
0
def main():
    args = parse_args()

    assert args.out, ('Please specify the output path for results')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    if cfg.model.get('necks', None) is not None:
        cfg.model.necks.aux_head_config = None

    if cfg.data.test.oversample == 'three_crop':
        cfg.model.spatial_temporal_module.spatial_size = 8
    if args.fcn_testing:
        cfg.model['cls_head'].update({'fcn_testing': True})
        cfg.model.update({'fcn_testing': True})
    if args.flip:
        cfg.model.update({'flip': True})

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))

    if args.ignore_cache and args.out is not None:
        if not distributed:
            if args.gpus == 1:
                model = build_recognizer(cfg.model,
                                         train_cfg=None,
                                         test_cfg=cfg.test_cfg)
                load_checkpoint(model,
                                args.checkpoint,
                                strict=True,
                                map_location='cpu')
                model = MMDataParallel(model, device_ids=[0])

                data_loader = build_dataloader(
                    dataset,
                    imgs_per_gpu=1,
                    workers_per_gpu=cfg.data.workers_per_gpu,
                    num_gpus=1,
                    dist=False,
                    shuffle=False)
                outputs = single_test(model, data_loader)
            else:
                model_args = cfg.model.copy()
                model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
                model_type = getattr(recognizers, model_args.pop('type'))

                outputs = parallel_test(model_type,
                                        model_args,
                                        args.checkpoint,
                                        dataset,
                                        _data_func,
                                        range(args.gpus),
                                        workers_per_gpu=args.proc_per_gpu)
        else:
            data_loader = build_dataloader(
                dataset,
                imgs_per_gpu=1,
                workers_per_gpu=cfg.data.workers_per_gpu,
                dist=distributed,
                shuffle=False)
            model = build_recognizer(cfg.model,
                                     train_cfg=None,
                                     test_cfg=cfg.test_cfg)
            load_checkpoint(model,
                            args.checkpoint,
                            strict=True,
                            map_location='cpu')
            model = MMDistributedDataParallel(model.cuda())
            outputs = multi_gpu_test(model, data_loader, args.tmpdir)
    else:
        try:
            if distributed:
                rank, _ = get_dist_info()
                if rank == 0:
                    outputs = mmcv.load(args.out)
            else:
                outputs = mmcv.load(args.out)
        except:
            raise FileNotFoundError

    rank, _ = get_dist_info()
    if args.out:
        if rank == 0:
            print('writing results to {}'.format(args.out))
            mmcv.dump(outputs, args.out)
            gt_labels = []
            for i in range(len(dataset)):
                ann = dataset.get_ann_info(i)
                gt_labels.append(ann['label'])

            results = []
            for res in outputs:
                res_list = [res[i] for i in range(res.shape[0])]
                results += res_list
            results = results[:len(gt_labels)]
            print('results_length', len(results))
            top1, top5 = top_k_accuracy(results, gt_labels, k=(1, 5))
            mean_acc = mean_class_accuracy(results, gt_labels)
            non_mean_acc = non_mean_class_accuracy(results, gt_labels)
            if args.log:
                f = open(args.log, 'w')
                f.write(f'Testing ckpt from {args.checkpoint}\n')
                f.write(f'Testing config from {args.config}\n')
                f.write("Mean Class Accuracy = {:.04f}\n".format(mean_acc *
                                                                 100))
                f.write("Top-1 Accuracy = {:.04f}\n".format(top1 * 100))
                f.write("Top-5 Accuracy = {:.04f}\n".format(top5 * 100))
                f.close()
            else:
                print("Mean Class Accuracy = {:.02f}".format(mean_acc * 100))
                print("Top-1 Accuracy = {:.02f}".format(top1 * 100))
                print("Top-5 Accuracy = {:.02f}".format(top5 * 100))
                print("Non mean Class Accuracy", non_mean_acc)
                print('saving non_mean acc')
Ejemplo n.º 12
0
def main():
    args = parse_args()

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    if args.num_classes is not None and args.num_classes > 0:
        cfg.num_test_classes = args.num_classes
    if args.data_dir is not None:
        cfg = update_data_paths(cfg, args.data_dir)

    assert args.mode in cfg.data
    data_cfg = getattr(cfg.data, args.mode)
    data_cfg.test_mode = True

    dataset = obj_from_dict(data_cfg, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_recognizer(cfg.model,
                                 train_cfg=None,
                                 test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint, strict=False)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(recognizers, model_args.pop('type'))
        outputs = parallel_test(model_type,
                                model_args,
                                args.checkpoint,
                                dataset,
                                _data_func,
                                range(args.gpus),
                                workers_per_gpu=args.proc_per_gpu)

    gt_labels = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        gt_labels.append(ann['label'])

    results = np.array([res.cpu().numpy().mean(axis=0) for res in outputs],
                       dtype=np.float32)

    if cfg.data.num_test_classes is not None and cfg.data.num_test_classes > 0:
        results = results[:, :cfg.data.num_test_classes]

    top1_value = mean_top_k_accuracy(results, gt_labels, k=1)
    top5_value = mean_top_k_accuracy(results, gt_labels, k=5)

    print("\nMean Top-1 Accuracy = {:.03f}%".format(top1_value * 100))
    print("Mean Top-5 Accuracy = {:.03f}%".format(top5_value * 100))

    map_value = mean_average_precision(results, gt_labels)
    print("mAP = {:.03f}%".format(map_value * 100))

    invalid_ids = invalid_filtered(results, gt_labels)
    print('\nNum invalid classes: {} / {}'.format(len(invalid_ids),
                                                  cfg.data.num_test_classes))

    num_invalid_samples = sum([len(ids) for ids in invalid_ids.values()])
    print('Num invalid samples: {} / {}'.format(num_invalid_samples,
                                                len(gt_labels)))
Ejemplo n.º 13
0
def main():
    """针对faster rcnn在voc的评估做微调
    1. args parse用直接输入替代
    2. 
    """
    config_path = './config/cfg_ssd300_vgg16_voc.py'   # 注意:cfg和模型需要匹配,因为不同数据集类别数不一样,  
    checkpoint_path = './weights/myssd/epoch_24.pth'   
    cfg = mmcv.Config.fromfile(config_path)
    out_file = 'dataset_eval_result/results.pkl'  # 注意这里要选择pkl而不能选择json,因为outputs里边包含array,用json无法保存
    eval_type = ['bbox']      # proposal_fast是mmdetection自己的实现
#    eval_type = ['proposal','bbox']   # 这几种是coco api的实现包括['proposal','bbox','segm','keypoints'],已跑通
                                    
    show_result = False   # 这里可以设置show=True从而按顺序显示每张图的测试结果(对于少量图片的数据集可以这么玩)
    
    if not out_file.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
#    cfg.model.pretrained = None

#    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    dataset = get_dataset(cfg.data.test, CocoDataset)
    
    cfg.gpus = 1
    
    if cfg.gpus == 1:
#        model = build_detector(
#            cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
        model = OneStageDetector(cfg)
        
        load_checkpoint(model, checkpoint_path)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader, show=show_result)  
        # outputs结构: [img1,...imgn], len=5000,此为coco val的所有图片
        # 每个img结构: [cls1,...clsn], len=80, 此为每个预测的所有类的bbox预测输出
        # 每个cls结构: ndarray(n,5), 此为这个cls对应n个bbox,如果该类有预测则n>0,如果该类没有预测则n=0,第一列为置信度?
        # 注意:最内层数据结构是ndarray,是不能直接存入json文件,需要转换成data.tolist()
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(detectors, model_args.pop('type'))
        outputs = parallel_test(
            model_type,
            model_args,
            checkpoint_path,
            dataset,
            _data_func,
            range(cfg.gpus),
            workers_per_gpu=cfg.proc_per_gpu)
    # debug
    
    if out_file:  
        print('writing results to {}'.format(out_file))  
        mmcv.dump(outputs, out_file)  # 先把模型的测试结果输出到文件中: 如果文件不存在会创建  
        eval_types = eval_type
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = out_file
#                coco_eval(result_file, eval_types, dataset.coco)  # result_file传入coco_eval()
                """用自己写的evaluation()"""
                evaluation(result_file, dataset.coco, eval_types=eval_types)
            
            else:
                if not isinstance(outputs[0], dict):
                    result_file = out_file + '.json'
                    results2json(dataset, outputs, result_file)
#                    coco_eval(result_file, eval_types, dataset.coco)
                    """用自己写的evaluation()"""
                    evaluation(result_file, dataset.coco, eval_types=eval_types)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = out_file + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
Ejemplo n.º 14
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_detector(cfg.model,
                               train_cfg=None,
                               test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader, args.show)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(detectors, model_args.pop('type'))
        outputs = parallel_test(model_type,
                                model_args,
                                args.checkpoint,
                                dataset,
                                _data_func,
                                range(args.gpus),
                                workers_per_gpu=args.proc_per_gpu)

    if args.out:
        result_root = os.path.dirname(args.out)
        if not os.path.exists(result_root):
            os.mkdir(result_root)

        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            elif 'panoptic' in eval_types:
                category_json_path = '../panopticapi/panoptic_coco_categories.json'
                cat_data = mmcv.load(category_json_path)
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    results2json(dataset, outputs, result_file,
                                 cat_data)  # dingguo
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file,
                                     cat_data)  # dingguo
                        coco_eval(result_file, eval_types, dataset.coco)

                # evaluate panoptic
                logger = logger_init(result_root)
                combine_predictions(
                    os.path.join(result_root,
                                 'result_siting_seg.json'), result_file,
                    cfg.data.test['ann_file'].replace('instances', 'panoptic'),
                    category_json_path, os.path.join(result_root,
                                                     'seg_result'),
                    os.path.join(result_root, 'panoptic_result.json'),
                    cfg.data.test['ann_pan_file'].split('_semantic')[0],
                    args.confidence_thr, args.overlap_thr,
                    args.stuff_area_limit, logger)
            else:
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    results2json(dataset, outputs, result_file)
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
Ejemplo n.º 15
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    # reorganize stpp
    num_classes = (cfg.model.cls_head.num_classes -
                   1 if cfg.model.cls_head.with_bg else
                   cfg.model.cls_head.num_classes)
    stpp_feat_multiplier = 0
    for stpp_subcfg in cfg.model.segmental_consensus.stpp_cfg:
        _, mult = parse_stage_config(stpp_subcfg)
        stpp_feat_multiplier += mult
    cfg.model.segmental_consensus = dict(
        type="STPPReorganized",
        standalong_classifier=cfg.model.segmental_consensus.
        standalong_classifier,
        feat_dim=num_classes + 1 + num_classes * 3 * stpp_feat_multiplier,
        act_score_len=num_classes + 1,
        comp_score_len=num_classes,
        reg_score_len=num_classes * 2,
        stpp_cfg=cfg.model.segmental_consensus.stpp_cfg)

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))

    if args.gpus == 1:
        model = build_localizer(cfg.model,
                                train_cfg=None,
                                test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint, strict=True)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(localizers, model_args.pop('type'))
        outputs = parallel_test(model_type,
                                model_args,
                                args.checkpoint,
                                dataset,
                                _data_func,
                                range(args.gpus),
                                workers_per_gpu=args.proc_per_gpu)

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)

    eval_type = args.eval
    if eval_type:
        print('Starting evaluate {}'.format(eval_type))

        detections = results2det(dataset, outputs,
                                 **cfg.test_cfg.ssn.evaluater)

        if not args.no_regression:
            print("Performing location regression")
            for cls in range(len(detections)):
                detections[cls] = {
                    k: perform_regression(v)
                    for k, v in detections[cls].items()
                }
            print("Regression finished")

        print("Performing NMS")
        for cls in range(len(detections)):
            detections[cls] = {
                k: temporal_nms(v, cfg.test_cfg.ssn.evaluater.nms)
                for k, v in detections[cls].items()
            }
        print("NMS finished")

        if eval_type == 'activitynet':
            iou_range = np.arange(0.5, 1.0, 0.05)
        elif eval_type in ['thumos14', 'coin']:
            iou_range = np.arange(0.1, 1.0, .1)

        # get gt
        all_gt = pd.DataFrame(dataset.get_all_gt(),
                              columns=['video-id', 'cls', 't-start', 't-end'])
        gt_by_cls = [
            all_gt[all_gt.cls == cls].reset_index(drop=True).drop('cls', 1)
            for cls in range(len(detections))
        ]
        plain_detections = [
            det2df(detections, cls) for cls in range(len(detections))
        ]
        ap_values = eval_ap_parallel(plain_detections, gt_by_cls, iou_range)
        map_iou = ap_values.mean(axis=0)
        print("Evaluation finished")

        # display
        display_title = 'Temporal detection performance ({})'.format(args.eval)
        display_data = [['IoU thresh'], ['mean AP']]

        for i in range(len(iou_range)):
            display_data[0].append('{:.02f}'.format(iou_range[i]))
            display_data[1].append('{:.04f}'.format(map_iou[i]))
        table = AsciiTable(display_data, display_title)
        table.justify_columns[-1] = 'right'
        table.inner_footing_row_border = True
        print(table.table)
Ejemplo n.º 16
0
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = "1"
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    if cfg.data.test.oversample == 'three_crop':
        cfg.model.spatial_temporal_module.spatial_size = 8

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_recognizer(cfg.model,
                                 train_cfg=None,
                                 test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint, strict=True)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(recognizers, model_args.pop('type'))
        outputs = parallel_test(model_type,
                                model_args,
                                args.checkpoint,
                                dataset,
                                _data_func,
                                range(args.gpus),
                                workers_per_gpu=args.proc_per_gpu)

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)

    gt_labels = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        gt_labels.append(ann['label'])

    if args.use_softmax:
        print("Averaging score over {} clips with softmax".format(
            outputs[0].shape[0]))
        results = [softmax(res, dim=1).mean(axis=0) for res in outputs]
    else:
        print("Averaging score over {} clips without softmax (ie, raw)".format(
            outputs[0].shape[0]))
        results = [res.mean(axis=0) for res in outputs]
    top1, top5 = top_k_accuracy(results, gt_labels, k=(1, 5))
    mean_acc = mean_class_accuracy(results, gt_labels)
    print("Mean Class Accuracy = {:.02f}".format(mean_acc * 100))
    print("Top-1 Accuracy = {:.02f}".format(top1 * 100))
    print("Top-5 Accuracy = {:.02f}".format(top5 * 100))
Ejemplo n.º 17
0
def main():
    args = parse_args()

    if os.path.isdir(args.checkpoint):
        print(args.checkpoint)
        checkpoints = glob.glob(args.checkpoint + '/epoch_*.pth')
        checkpoints = sorted(
            checkpoints,
            key=lambda x: int(x.split('epoch_')[-1].split('.')[0]))
        print(checkpoints)
        if args.out is not None:
            if not os.path.exists(args.out):
                os.mkdir(args.out)
            elif os.path.isfile(args.out):
                raise ValueError('args.out must be a directory.')
        # Create TensorBoard writer for output checkpoint dir.
        tensorboard_writer = SummaryWriter(args.out)
    else:
        checkpoints = [args.checkpoint]
        tensorboard_writer = None
        if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
            raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    for i, checkpoint in enumerate(checkpoints):
        outpath = args.out
        if os.path.isdir(args.checkpoint):
            outpath = args.out + '/%d_out.pkl' % i

        if not os.path.exists(outpath):
            if args.gpus == 1:
                model = build_detector(cfg.model,
                                       train_cfg=None,
                                       test_cfg=cfg.test_cfg)
                load_checkpoint(model, checkpoint)
                model = MMDataParallel(model, device_ids=[0])

                data_loader = build_dataloader(
                    dataset,
                    imgs_per_gpu=1,
                    workers_per_gpu=cfg.data.workers_per_gpu,
                    num_gpus=1,
                    dist=False,
                    shuffle=False)
                outputs = single_test(model, data_loader, args.show)
            else:
                model_args = cfg.model.copy()
                model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
                model_type = getattr(detectors, model_args.pop('type'))
                outputs = parallel_test(model_type,
                                        model_args,
                                        checkpoint,
                                        dataset,
                                        _data_func,
                                        range(args.gpus),
                                        workers_per_gpu=args.proc_per_gpu)

        # TODO: Currently assume test set is same size as training set.
        num_iters = (i + 1) * len(dataset)
        if outpath:
            if os.path.exists(outpath):
                print('reading results from {}'.format(outpath))
                outputs = mmcv.load(outpath)
            else:
                print('writing results to {}'.format(outpath))
                mmcv.dump(outputs, outpath)
            eval_types = args.eval
            if eval_types:
                print('Starting evaluate {}'.format(' and '.join(eval_types)))
                if eval_types == ['proposal_fast']:
                    result_file = outpath
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    if not isinstance(outputs[0], dict):
                        result_file = outpath + '.json'
                        results2json(dataset, outputs, result_file)
                        results_dict = coco_eval(result_file, eval_types,
                                                 dataset.coco)
                        if tensorboard_writer:
                            for eval_type in eval_types:
                                out = capture_stdout(lambda: results_dict[
                                    eval_type].summarize())
                                for line in out.split('\n')[:-1]:
                                    parts = line.split('=')
                                    name, score = '='.join(parts[:-1]), float(
                                        parts[-1])
                                    tensorboard_writer.add_scalar(
                                        'eval/' + name, score, num_iters)
                    else:
                        for name in outputs[0]:
                            print('\nEvaluating {}'.format(name))
                            outputs_ = [out[name] for out in outputs]
                            result_file = outpath + '.{}.json'.format(name)
                            results2json(dataset, outputs_, result_file)
                            results_dict = coco_eval(result_file, eval_types,
                                                     dataset.coco)
                            if tensorboard_writer:
                                for eval_type in eval_types:
                                    out = capture_stdout(lambda: results_dict[
                                        eval_type].summarize())
Ejemplo n.º 18
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_detector(
            cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint)
        model = MMDataParallel(model, device_ids=[0])

        if hasattr(cfg, 'data2'):
            dataset2 = obj_from_dict(cfg.data2.test, datasets, dict(test_mode=True))
            data_loader2 = build_dataloader(
                dataset2,
                imgs_per_gpu=1,
                workers_per_gpu=cfg.data2.workers_per_gpu,
                num_gpus=1,
                dist=False,
                shuffle=False)
            data_loader = build_dataloader(
                dataset,
                imgs_per_gpu=1,
                workers_per_gpu=cfg.data.workers_per_gpu,
                num_gpus=1,
                dist=False,
                shuffle=False)
            outputs = double_test(model, data_loader, data_loader2, cfg.test_cfg2, args.show)
        else:
            data_loader = build_dataloader(
                dataset,
                imgs_per_gpu=1,
                workers_per_gpu=cfg.data.workers_per_gpu,
                num_gpus=1,
                dist=False,
                shuffle=False)
            outputs = single_test(model, data_loader, args.show)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(detectors, model_args.pop('type'))
        outputs = parallel_test(
            model_type,
            model_args,
            args.checkpoint,
            dataset,
            _data_func,
            range(args.gpus),
            workers_per_gpu=args.proc_per_gpu)

    if args.out:
        # print('writing results to {}'.format(args.out))
        # mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    # 3D
                    # load full volume and get full volume's image IDs
                    if hasattr(cfg.data.test, 'ann_file_volume'):
                        coco_full_gt = COCO(cfg.data.test.ann_file_volume)
                    else:
                        coco_full_gt = COCO(cfg.data.test.ann_file)

                    if str(type(dataset)) == "<class 'mmdet.datasets.coco_3d.Coco3DDataset'>" or \
                        str(type(dataset)) == "<class 'mmdet.datasets.coco_3d_2scales.Coco3D2ScalesDataset'>" or \
                        str(type(dataset)) == "<class 'mmdet.datasets.coco_3d_3scales.Coco3D3ScalesDataset'>" or \
                        str(type(dataset)) == "<class 'mmdet.datasets.coco_3d_parcel.Coco3DParcelDataset'>":
                        full_filename_to_id = dict()
                        for img_id in coco_full_gt.getImgIds():
                            full_filename_to_id[coco_full_gt.loadImgs([img_id])[0]['file_name']] = img_id

                        if cfg.data.test.with_mask:
                            if hasattr(cfg, 'data2') and hasattr(cfg.data2, 'test'):
                                result = results2json3DMulti(dataset, dataset2, outputs, result_file, full_filename_to_id)
                            else:
                                result = results2json3D(dataset, outputs, result_file, full_filename_to_id)
                            coco_eval(result, eval_types, coco_full_gt, is3D=True, hasMask=True, full_filename_to_id=full_filename_to_id)
                        else:
                            if hasattr(cfg, 'data2') and hasattr(cfg.data2, 'test'):
                                results2json3DMulti(dataset, dataset2, outputs, result_file, full_filename_to_id)
                                coco_eval(result_file, eval_types, coco_full_gt, is3D=True, full_filename_to_id=full_filename_to_id)
                            elif str(type(dataset)) == "<class 'mmdet.datasets.coco_3d_parcel.Coco3DParcelDataset'>":
                                results2json3DParcel(dataset, outputs, result_file, full_filename_to_id)
                                coco_eval(result_file, eval_types, coco_full_gt, is3D=True, full_filename_to_id=full_filename_to_id, isParcellized=True)
                            else:
                                results2json3D(dataset, outputs, result_file, full_filename_to_id)
                                coco_eval(result_file, eval_types, coco_full_gt, is3D=True, full_filename_to_id=full_filename_to_id)
                    else:
                        # default
                        results2json(dataset, outputs, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
Ejemplo n.º 19
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    if cfg.data.test.oversample == 'three_crop':
        cfg.model.spatial_temporal_module.spatial_size = 8

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_recognizer(cfg.model,
                                 train_cfg=None,
                                 test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint, strict=True)
        model = MMDataParallel(model, device_ids=[0])
        a = dataset[1]

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(recognizers, model_args.pop('type'))
        outputs = parallel_test(model_type,
                                model_args,
                                args.checkpoint,
                                dataset,
                                _data_func,
                                range(args.gpus),
                                workers_per_gpu=args.proc_per_gpu)

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)

    gt_labels = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        gt_labels.append(ann['label'])

    if args.use_softmax:
        print("Averaging score over {} clips with softmax".format(
            outputs[0].shape[0]))
        results = [softmax(res, dim=1).mean(axis=0) for res in outputs]
    else:
        print("Averaging score over {} clips without softmax (ie, raw)".format(
            outputs[0].shape[0]))
        results = [res.mean(axis=0) for res in outputs]

    import datetime

    currentDT = datetime.datetime.now()

    with open('data/nturgbd/nturgbd_val_split_generalization_rawframes.txt'
              ) as f:
        video_names = [l.strip().split(' ')[0] for l in f.readlines()]

    with open(
            osp.join(args.checkpoint + '.result_%s.pkl' %
                     currentDT.strftime("%Y-%m-%d_%H:%M:%S")), 'wb') as f:
        pickle.dump([results, gt_labels, video_names], f)
    top1, top5 = top_k_accuracy(results, gt_labels, k=(1, 5))
    mean_acc = mean_class_accuracy(results, gt_labels)
    print("Mean Class Accuracy = {:.02f}".format(mean_acc * 100))
    print("Top-1 Accuracy = {:.02f}".format(top1 * 100))
    print("Top-5 Accuracy = {:.02f}".format(top5 * 100))
Ejemplo n.º 20
0
def main():
    args = parse_args()

    checkpoints = args.checkpoint.split(',')
    if os.path.isdir(checkpoints[0]):
        configs = [
            mmcv.Config.fromfile(checkpoints[i] + '/' + args.config)
            for i in range(len(checkpoints))
        ]
        cfg = configs[0]
    else:
        cfg = mmcv.Config.fromfile(args.config)
        configs = [cfg]
    checkpoint = args.checkpoint

    val_dataset = deep_recursive_obj_from_dict(cfg.data.val)
    per_model_outputs = []
    for i, (checkpoint, curr_cfg) in enumerate(zip(checkpoints, configs)):
        # build model
        model_cls = eval_mmcv_str(curr_cfg.model['type'])
        model_args = curr_cfg.model
        model_args.pop('type')

        # Need higher ulimit for data loaders.
        import resource
        rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
        resource.setrlimit(resource.RLIMIT_NOFILE, (16384, rlimit[1]))

        if os.path.isdir(checkpoints[0]):
            checkpoint_path = checkpoint + '/latest.pth'
            pkl_path = checkpoint + '/' + args.out
        else:
            checkpoint_path, pkl_path = checkpoint, args.out

        # Run model if results don't already exist.
        if os.path.exists(pkl_path):
            with open(pkl_path, 'rb') as f:
                outputs = pickle.load(f)
            targets = torch.LongTensor(
                [val_dataset[i][1] for i in range(len(val_dataset))])
        elif args.gpus == 1:
            num_workers = curr_cfg.data_workers * len(cfg.gpus)
            val_loader = DataLoader(
                val_dataset,
                batch_size=1,
                shuffle=False,
                # sampler=val_sampler,
                num_workers=num_workers)
            # Build and run model.
            model = DataParallel(model, device_ids=range(args.gpus)).cuda()
            load_checkpoint(model_cls(**model_args), checkpoint_path)
            outputs = single_test(model, val_loader, args.show)
            targets = torch.LongTensor([x[1] for x in outputs]).cuda()
            outputs = torch.cat([x[0] for x in outputs])
            with open(pkl_path, 'wb') as f:
                pickle.dump(outputs, f)
        else:
            # NOTE: Parallel inference requires the data to be explicitly swapped to
            #       cpu (add a .cpu() call to the result in parallel_test.py).
            # model_args = cfg.model.copy()
            # model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
            def model_fn(**kwargs):
                return model

            outputs = parallel_test(model_cls,
                                    model_args,
                                    checkpoint_path,
                                    val_dataset,
                                    _data_func,
                                    range(args.gpus),
                                    workers_per_gpu=args.proc_per_gpu)
            targets = torch.LongTensor(
                [val_dataset[i][1] for i in range(len(val_dataset))])
            outputs = torch.cat(outputs).cpu()
            with open(pkl_path, 'wb') as f:
                pickle.dump(outputs, f)
        print(checkpoint, accuracy(outputs, targets, topk=(1, )))
        per_model_outputs.append(outputs)

    # Naive averaging.
    avg = torch.mean(torch.stack(per_model_outputs), 0)
    print("Naive Averaging", accuracy(avg, targets, topk=(1, )))
    with open('avg.pkl', 'wb') as f:
        pickle.dump(avg, f)