Пример #1
0
def main():
    args = parse_args()
    src_txt_dir = args.anno_test
    src_img_dir = args.anno_img
    src_test_file = args.anno_test_txt

    src_xml_dir = "./data/VOCdevkit/VOC2007/Annotations"
    des_test_file = "./data/VOCdevkit/VOC2007/ImageSets/Main/test.txt"

    des_img_dir = "./data/VOCdevkit/VOC2007/JPEGImages"

    # 转成xml
    txt_list = list(sorted(os.listdir(src_txt_dir)))
    change_to_xml(txt_list, src_txt_dir, src_img_dir, src_xml_dir)

    # 图片软链接
    os.symlink(os.path.abspath(src_img_dir), os.path.abspath(des_img_dir))

    # 复制test.txt到指定路径
    shutil.copyfile(src_test_file, des_test_file)

    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, args.show)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir,
                                 args.gpu_collect)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_files = results2json(dataset, outputs, args.out)
                    coco_eval(result_files, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}'.format(name)
                        result_files = results2json(dataset, outputs_,
                                                    result_file)
                        coco_eval(result_files, eval_types, dataset.coco)

    # Save predictions in the COCO json format
    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)

    # 生成两个txt文件
    results = pickle.load(open('./eval/result.pkl', 'rb'), encoding='utf-8')

    # test_txt = '../core_coreless_test.txt'

    if not os.path.exists('../predicted_file'):
        os.makedirs('../predicted_file')

    core_save_txt = '../predicted_file/det_test_带电芯充电宝.txt'
    coreless_save_txt = '../predicted_file/det_test_不带电芯充电宝.txt'

    with open(src_test_file, 'r') as f:
        names = f.readlines()

    for name, result in zip(names, results):
        for core_result in result[0]:
            with open(core_save_txt, 'a+') as f:
                f.write('{} {} {} {} {} {}\n'.format(
                    name.replace('\n', ''), core_result[4], core_result[0],
                    core_result[1], core_result[2], core_result[3]))
        for coreless_result in result[1]:
            with open(coreless_save_txt, 'a+') as f:
                f.write('{} {} {} {} {} {}\n'.format(name.replace('\n', ''),
                                                     coreless_result[4],
                                                     coreless_result[0],
                                                     coreless_result[1],
                                                     coreless_result[2],
                                                     coreless_result[3]))
Пример #2
0
def main():
    args = parse_args()

    assert args.out or args.eval or args.format_only or args.show or args.show_dir or args.json_out, \
        ('Please specify at least one operation (save/eval/format/show the '
         'results / save the results) with the argument "--out", "--eval"'
         ', "--format-only", "--show" or "--show-dir"')

    if args.eval and args.format_only:
        raise ValueError('--eval and --format_only cannot be both specified')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = Config.fromfile(args.config)
    if args.cfg_options is not None:
        cfg.merge_from_dict(args.cfg_options)
    # import modules from string list.
    if cfg.get('custom_imports', None):
        from mmcv.utils import import_modules_from_strings
        import_modules_from_strings(**cfg['custom_imports'])
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    if cfg.model.get('neck'):
        if isinstance(cfg.model.neck, list):
            for neck_cfg in cfg.model.neck:
                if neck_cfg.get('rfp_backbone'):
                    if neck_cfg.rfp_backbone.get('pretrained'):
                        neck_cfg.rfp_backbone.pretrained = None
        elif cfg.model.neck.get('rfp_backbone'):
            if cfg.model.neck.rfp_backbone.get('pretrained'):
                cfg.model.neck.rfp_backbone.pretrained = None

    # in case the test dataset is concatenated
    samples_per_gpu = 1
    if isinstance(cfg.data.test, dict):
        cfg.data.test.test_mode = True
        samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
        if samples_per_gpu > 1:
            # Replace 'ImageToTensor' to 'DefaultFormatBundle'
            cfg.data.test.pipeline = replace_ImageToTensor(
                cfg.data.test.pipeline)
    elif isinstance(cfg.data.test, list):
        for ds_cfg in cfg.data.test:
            ds_cfg.test_mode = True
        samples_per_gpu = max(
            [ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])
        if samples_per_gpu > 1:
            for ds_cfg in cfg.data.test:
                ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   samples_per_gpu=samples_per_gpu,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    cfg.model.train_cfg = None
    model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    if args.fuse_conv_bn:
        model = fuse_conv_bn(model)
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        #outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr)
        #outputs = single_gpu_test_crop_img(model, data_loader, args.show, args.show_dir, args.show_score_thr)  # clw modify
        ###### outputs = single_gpu_test_rotate_rect_img(model, data_loader, args.show, args.show_dir, args.show_score_thr)  # clw modify
        outputs = single_gpu_test_processed_rect_img(
            model, data_loader, args.show, args.show_dir,
            args.show_score_thr)  # clw modify
        ####### outputs = single_gpu_test_processed_rect_crop_img(model, data_loader, args.show, args.show_dir, args.show_score_thr)  # clw modify

    else:
        model = MMDistributedDataParallel(
            model.cuda(),
            device_ids=[torch.cuda.current_device()],
            broadcast_buffers=False)
        outputs = multi_gpu_test(model, data_loader, args.tmpdir,
                                 args.gpu_collect)

    rank, _ = get_dist_info()
    if rank == 0:
        if args.out:
            print(f'\nwriting results to {args.out}')
            mmcv.dump(outputs, args.out)
        kwargs = {} if args.eval_options is None else args.eval_options
        if args.format_only:
            dataset.format_results(outputs, **kwargs)
        if args.eval:
            eval_kwargs = cfg.get('evaluation', {}).copy()
            # hard-code way to remove EvalHook args
            for key in [
                    'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
                    'rule'
            ]:
                eval_kwargs.pop(key, None)
            eval_kwargs.update(dict(metric=args.eval, **kwargs))
            print(dataset.evaluate(outputs, **eval_kwargs))

    # Save predictions in the COCO json format
    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out)  # here
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)
Пример #3
0
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, args.show)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir,
                                 args.gpu_collect)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_files = results2json(dataset, outputs, args.out)
                    coco_eval(result_files, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}'.format(name)
                        result_files = results2json(dataset, outputs_,
                                                    result_file)
                        coco_eval(result_files, eval_types, dataset.coco)

    # Save predictions in the COCO json format
    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)
Пример #4
0
def main():
    args = parse_args()

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    for t in cfg.data.test:
        t.test_mode = True
    cfg.out_path = args.out.split('.pkl')[0] if args.out else None

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the model and load checkpoint
    model = build_detector(
        cfg.model, train_cfg=None, test_cfg=cfg.test_cfg, global_cfg=cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    if args.det_ckpt is not None:
        print('Loading detection models...')
        det_ckpt = load_checkpoint(model, args.det_ckpt, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    if not type(cfg.data.test) == list:
        cfg.data.test = [cfg.data.test]

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
    else:
        model = MMDistributedDataParallel(model.cuda())

    outputs = dict()
    for c in cfg.data.test:
        dataset = build_dataset(c)
        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            dist=distributed,
            shuffle=False)

        if not distributed:
            results = single_gpu_test(model, data_loader, args.out, args.show)
            if results is not None:
                outputs.update(results)
        else:
            outputs.update(multi_gpu_test(model, data_loader, args.tmpdir))

    rank, _ = get_dist_info()
    if len(outputs.keys()) > 0 and args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        if not (args.out.endswith('.pkl') or args.out.endswith('.json')):
            args.out += '.pkl'
        if 'track_results' in outputs.keys():
            mmcv.dump(outputs['track_results'], args.out)
        else:
            mmcv.dump(outputs, args.out)
        if 'bbox_results' in outputs.keys():
            result_files = results2json(dataset, outputs['bbox_results'], args.out)
            coco_eval(result_files, ['bbox', 'segm'], cfg.data.test[0].ann_file)
        if 'segm_results' in outputs.keys():
            result_files = results2json(dataset, [(b, s) for b, s in zip(outputs['bbox_results'], outputs['segm_results'])], args.out)
            coco_eval(result_files, ['segm'], cfg.data.test[0].ann_file)
        # if 'new_bbox_results' in outputs.keys():
        #     # For tracking
        #     result_files = results2json(dataset, outputs['new_bbox_results'],
        #                                 args.out)
        #     coco_eval(result_files, ['bbox'], cfg.data.test[0].ann_file)
        if 'track_results' in outputs.keys():
            print("Evaluating box tracking...")
            mdat_eval(outputs['track_results'], dataset, args.out, cfg)
        if 'segm_track_results' in outputs.keys():
            print("Evaluating segmentation tracking...")
            mdat_eval(outputs['segm_track_results'], dataset, args.out, cfg, with_mask=True)
Пример #5
0
def main():
    args = parse_args()

    # assert args.show or args.json_out, \
    #     ('Please specify at least one operation (save or show the results) '
    #      'with the argument "--out" or "--show" or "--json_out"')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)

    checkpoint_file = args.checkpoint
    if not checkpoint_file:

        def _epoch_num(name):
            return int(
                re.findall('epoch_[0-9]*.pth',
                           name)[0].replace('epoch_', '').replace('.pth', ''))

        pths = sorted(glob.glob(os.path.join(cfg.work_dir, 'epoch_*.pth')),
                      key=_epoch_num)
        if len(pths) > 0:
            print("Found {}, use it as checkpoint by default.".format(
                pths[-1]))
            checkpoint_file = pths[-1]
    if not checkpoint_file:
        raise ValueError("Checkpoints not found, check work_dir non empty.")
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(
        dataset,
        imgs_per_gpu=1,
        workers_per_gpu=cfg.data.workers_per_gpu,
        dist=distributed,
        shuffle=args.shuffle)  # TODO: hack shuffle True

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    checkpoint = load_checkpoint(model, checkpoint_file, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    num_evals = args.num_evals
    if num_evals < 0:
        num_evals = len(data_loader)
    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, num_evals, args.show)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, num_evals, args.tmpdir)

    rank, _ = get_dist_info()
    if rank == 0:
        gt_bboxes, gt_labels, gt_ignore, dataset_name = get_seq_gts(dataset)
        print('\nStarting evaluate {}'.format(dataset_name))
        eval_map(outputs,
                 gt_bboxes,
                 gt_labels,
                 gt_ignore,
                 scale_ranges=None,
                 iou_thr=0.5,
                 dataset=dataset_name,
                 print_summary=True)

    # Save predictions in the COCO json format
    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    # cfg.data.test.test_mode = False

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    ## uncomment to only eval on first 100 imgs

    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)

    prefix = './mrcnn_r50_dual'
    # prefix = './mrcnn_r50_ag_cocolt'

    print(prefix)

    set0 = mmcv.load('./{}_set0.pkl'.format(prefix))
    set1 = mmcv.load('./{}_set1.pkl'.format(prefix))
    set2 = mmcv.load('./{}_set2.pkl'.format(prefix))
    set3 = mmcv.load('./{}_set3.pkl'.format(prefix))
    set4 = mmcv.load('./{}_set4.pkl'.format(prefix))
    set5 = mmcv.load('./{}_set5.pkl'.format(prefix))
    set6 = mmcv.load('./{}_set6.pkl'.format(prefix))
    set7 = mmcv.load('./{}_set7.pkl'.format(prefix))

    set_combine = set0 + set1 + set2 + set3 + set4 + set5 + set6 + set7

    print('start eval')
    if hasattr(dataset, 'coco'):
        result_files = results2json(dataset, set_combine, args.out, dump=False)
        coco_eval(result_files, args.eval, dataset.coco)

    elif hasattr(dataset, 'lvis'):
        result_files = results2json(dataset, set_combine, args.out, dump=False)
        lvis_eval(result_files, args.eval, dataset.lvis)
Пример #7
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # VOCDataset(ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
    #           img_prefix=data_root + 'VOC2007/',
    #           img_scale=(300, 300),
    #           img_norm_cfg=img_norm_cfg,
    #           size_divisor=None,
    #           flip_ratio=0,
    #           with_mask=False,
    #           with_label=False,
    #           test_mode=True,
    #           resize_keep_ratio=False)
    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        # build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
        # SingleStageDetector(pretrained=..., backbone=..., neck=..., bbox_head=...,
        #                     train_cfg=None, test_cfg=...)

        # 首先要先注册 BACKBONES、 NECKS、 ROI_EXTRACTORS、 HEADS、 DETECTORS、
        # 然后 BACKBONES.register_module(class SSDVGG) @HEADS.register_module(class AnchorHead)
        #     @HEADS.register_module(class SSDHead)   @DETECTORS.register_module(class SingleStageDetector)
        # 最后 build_detector() 相当于SingleStageDetector(**args)
        model = build_detector(cfg.model,
                               train_cfg=None,
                               test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader, args.show)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(detectors, model_args.pop('type'))
        outputs = parallel_test(model_type,
                                model_args,
                                args.checkpoint,
                                dataset,
                                _data_func,
                                range(args.gpus),
                                workers_per_gpu=args.proc_per_gpu)

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    results2json(dataset, outputs, result_file)
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
Пример #8
0
def main():
    args = parse_args()
    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')
    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True
    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)
    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset_type = 'OIDSegDataset'
    data_root = 'gs://oid2019/data/'
    img_norm_cfg = dict(mean=[123.675, 116.28, 103.53],
                        std=[58.395, 57.12, 57.375],
                        to_rgb=True)
    dataset = get_dataset(
        dict(type=dataset_type,
             ann_file='/home/bo_liu/' + args.ann_file,
             img_prefix=data_root +
             ('val/'
              if args.ann_file == 'seg_val_2844_ann.pkl' else 'OD_test/'),
             img_scale=(1333, 800),
             img_norm_cfg=img_norm_cfg,
             size_divisor=32,
             flip_ratio=0,
             with_mask=True,
             with_label=False,
             test_mode=True))

    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)
    # build the model and load checkpoint
    test_cfg = mmcv.ConfigDict(
        dict(
            rpn=dict(nms_across_levels=False,
                     nms_pre=1000,
                     nms_post=1000,
                     max_num=1000,
                     nms_thr=0.7,
                     min_bbox_size=0),
            rcnn=dict(
                score_thr=args.thres,
                # score_thr=0.0,
                nms=dict(type=args.nms_type, iou_thr=0.5),
                max_per_img=args.max_per_img,
                mask_thr_binary=0.5),
            keep_all_stages=False))
    model = build_detector(cfg.model, train_cfg=None, test_cfg=test_cfg)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES
    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, args.show)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)
    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    results2json(dataset, outputs, result_file)
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('Evaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
Пример #9
0
def evaluate_model(model_name, paper_arxiv_id, weights_url, weights_name,
                   paper_results, config):
    print('---')
    print('Now Evaluating %s' % model_name)

    evaluator = COCOEvaluator(root='./.data/vision/coco',
                              model_name=model_name,
                              paper_arxiv_id=paper_arxiv_id,
                              paper_results=paper_results)

    out = 'results.pkl'
    launcher = 'none'

    if out is not None and not out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(config)
    cfg.data.test[
        'ann_file'] = './.data/vision/coco/annotations/instances_val2017.json'
    cfg.data.test['img_prefix'] = './.data/vision/coco/val2017/'

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)

    local_checkpoint, _ = urllib.request.urlretrieve(
        weights_url, '%s/.cache/torch/%s' % (str(Path.home()), weights_name))

    print(local_checkpoint)

    # '/home/ubuntu/GCNet/mask_rcnn_r50_fpn_1x_20181010-069fa190.pth'
    checkpoint = load_checkpoint(model, local_checkpoint, map_location='cpu')

    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    evaluator.reset_time()

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs, cache_exists = single_gpu_test(model, data_loader, False,
                                                evaluator)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)

    if cache_exists:
        print('Cache exists: %s' % (evaluator.batch_hash))
        evaluator.save()

    else:
        from mmdet.core import results2json

        rank, _ = get_dist_info()
        if out and rank == 0:
            print('\nwriting results to {}'.format(out))
            mmcv.dump(outputs, out)
            eval_types = ['bbox']
            if eval_types:
                print('Starting evaluate {}'.format(' and '.join(eval_types)))
                if eval_types == ['proposal_fast']:
                    result_file = out
                else:
                    if not isinstance(outputs[0], dict):
                        result_files = results2json(dataset, outputs, out)
                    else:
                        for name in outputs[0]:
                            print('\nEvaluating {}'.format(name))
                            outputs_ = [out[name] for out in outputs]
                            result_file = out + '.{}'.format(name)
                            result_files = results2json(
                                dataset, outputs_, result_file)
        anns = json.load(open(result_files['bbox']))
        evaluator.detections = []
        evaluator.add(anns)
        evaluator.save()
Пример #10
0
def main():
    args = parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.data_index % 2)
    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(
        cfg.data.test)  # original - test | changed to test_with_train_data
    data_loader = build_dataloader(
        dataset,
        imgs_per_gpu=1,
        workers_per_gpu=0,  # cfg.data.workers_per_gpu
        dist=distributed,
        shuffle=False)

    # save gt boxes and labels for learning nms
    # for i, data in enumerate(data_loader):
    #     img_id = dataset.img_infos[i]['id']
    #     gt = dataset.get_ann_info(i)
    #     gt_boxes = gt['bboxes']
    #     gt_labels = gt['labels']
    #     filename = f'test_logits/learning_nms_data/{i}/gt_boxes.p'  # file name for new directory
    #     os.makedirs(os.path.dirname(filename), exist_ok=True)
    #     with open(f'test_logits/learning_nms_data/{i}/gt_boxes.p', 'wb') as outfile:  # possible to include img_id
    #         pickle.dump(gt_boxes, outfile)
    #     with open(f'test_logits/learning_nms_data/{i}/gt_labels.p', 'wb') as outfile:
    #         pickle.dump(gt_boxes, outfile)
    #
    #     # filename = dataset.img_infos[i]['filename']
    #     # with open(f'test_gt/{filename}.p', 'wb') as outfile:
    #     #     pickle.dump(gt_labels, outfile)

    # save gt instances per class
    # instances_list = np.zeros(1231)
    # for i, data in enumerate(data_loader):  # original script in test_lvis_tnorm.py
    #     gt = dataset.get_ann_info(i)
    #     print(i)
    #     for label in gt['labels']:
    #         instances_list[label] += 1
    # with open('train_instances_list.p', 'wb') as outfile:
    #     pickle.dump(instances_list, outfile)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    model = reweight_cls(model, args.tau)

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs, logits = single_gpu_test(model, data_loader, args.show, cfg,
                                          args.data_index)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)

    # save outputs as csv:
    # pd.DataFrame(outputs).to_csv("original_outputs_full.csv")
    # preprocess logits and save them on json file
    # otp = np.asarray(outputs)  # temp
    # df = pd.DataFrame(otp)
    # df.to_csv('otp.csv', index=False)

    bboxes_mat, labels_mat, logits_mat, proposal_num = logits_process(logits)

    # save labels, boxes and logits
    # with open('test_logits/dragon_test_bboxes_mat.p', 'wb') as outfile:
    #     pickle.dump(bboxes_mat, outfile)
    # with open('test_logits/dragon_labels_mat.p', 'wb') as outfile:
    #     pickle.dump(labels_mat, outfile)
    # with open('logits_mat1.p', 'wb') as outfile:
    #     pickle.dump(logits_mat[:1000], outfile)
    # with open('logits_mat2.p', 'wb') as outfile:
    #     pickle.dump(logits_mat[1000:2000], outfile)
    # with open('logits_mat3.p', 'wb') as outfile:
    #     pickle.dump(logits_mat[2000:3000], outfile)
    # with open('logits_mat4.p', 'wb') as outfile:
    #     pickle.dump(logits_mat[3000:4000], outfile)
    # with open('logits_mat5.p', 'wb') as outfile:
    #     pickle.dump(logits_mat[4000:], outfile)

    # filter detections by iou with gt (for dragon training)
    gt_list = []
    results_per_image = []
    for i, data in enumerate(
            data_loader):  # original script in test_lvis_tnorm.py
        # if i < TEMP_DATASET_SIZE*args.data_index:
        #     continue
        if i >= TEMP_DATASET_SIZE:  # temporary condition for testing
            break
        print(i)
        img_id = dataset.img_infos[i]['id']
        gt = dataset.get_ann_info(i)
        gt_dict = dict()
        gt_dict['id'] = img_id
        gt_dict['bboxes'] = gt['bboxes']
        gt_dict['labels'] = gt['labels']
        gt_list.append(gt_dict)
        # filter logits according to equivalent ground truth.
        # after filtering, for each image we get a list in length of classes and detections belongs to this class.
        results = filter_logits_by_gt(bboxes_mat[i], logits_mat[i], gt_list[i],
                                      proposal_num[i], i)
        results_per_image.append(results)
    with open(f'dragon_bboxes_logits_map24.p', 'wb') as outfile:
        pickle.dump(results_per_image, outfile)
    print('saved')

    # evaluation:
    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                lvis_eval(result_file, eval_types, dataset.lvis)
            else:
                if not isinstance(outputs[0], dict):
                    result_files = results2json(dataset, outputs, args.out,
                                                args.data_index)
                    lvis_eval(result_files,
                              eval_types,
                              dataset.lvis,
                              max_dets=300)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}'.format(name)
                        result_files = results2json(dataset, outputs_,
                                                    result_file)
                        lvis_eval(result_files, eval_types, dataset.lvis)

    # Save predictions in the COCO json format
    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)
Пример #11
0
def _non_dist_train(model,
                    dataset,
                    cfg,
                    validate=False,
                    logger=None,
                    timestamp=None):
    if validate:
        raise NotImplementedError('Built-in validation is not implemented '
                                  'yet in not-distributed training. Use '
                                  'distributed training or test.py and '
                                  '*eval.py scripts instead.')
    # prepare data loaders
    dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
    data_loaders = [
        build_dataloader(
            ds,
            cfg.data.imgs_per_gpu,
            cfg.data.workers_per_gpu,
            cfg.gpus,
            dist=False) for ds in dataset
    ]
    # put model on gpus
    model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()

    # # if model.module.bbox_head.freeze_solov2_and_train_combonly:
    # if model.module.bbox_head.optimize_list is not None:
    #     for (key, param) in model.named_parameters():
    #         # if 'kernel_convs_convcomb' not in key and 'context_fusion_convs' not in key and 'learned_weight' not in key:
    #         if not any(s in key for s in model.module.bbox_head.optimize_list):
    #             param.requires_grad=False
    #         else:
    #             # print('optimize {}'.format(key))
    #             logger.info('optimize {}'.format(key))

    # build runner
    optimizer = build_optimizer(model, cfg.optimizer)
    runner = Runner(
        model, batch_processor, optimizer, cfg.work_dir, logger=logger)
    # an ugly walkaround to make the .log and .log.json filenames the same
    runner.timestamp = timestamp
    # fp16 setting
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        optimizer_config = Fp16OptimizerHook(
            **cfg.optimizer_config, **fp16_cfg, distributed=False)
    else:
        optimizer_config = cfg.optimizer_config
    runner.register_training_hooks(cfg.lr_config, optimizer_config,
                                   cfg.checkpoint_config, cfg.log_config)

    if cfg.resume_from:
        runner.resume(cfg.resume_from)
    elif cfg.load_from:
        runner.load_checkpoint(cfg.load_from)
    runner.run(data_loaders, cfg.workflow, cfg.total_epochs)

    ## add test after training
    if cfg.data.test.ann_file != 'data/lvis/lvis_v0.5_val_lvis_freqset.json': # if val set is lvis freq, only eval on lvis-freq val set
        cfg.data.test.test_mode = True
        dataset = build_dataset(cfg.data.test)
        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            dist=False,
            shuffle=False)
        model_orig=model.module
        model = MMDataParallel(model, device_ids=[0]).cuda()
        data_loader.dataset.img_infos = data_loader.dataset.img_infos[:100]
        outputs = single_gpu_test(model, data_loader)

        print('\nwriting results to {}'.format('xxx'))
        # mmcv.dump(outputs, 'xxx')
        eval_types = ['segm']
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = 'xxx'
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_files = results2json_segm(dataset, outputs, 'xxx', dump=False)
                    coco_eval(result_files, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = 'xxx' + '.{}'.format(name)
                        result_files = results2json(dataset, outputs_,
                                                    result_file, dump=False)
                        coco_eval(result_files, eval_types, dataset.coco)

        ##eval on lvis-77######
        cfg.data.test.ann_file = 'data/lvis/lvis_v0.5_val_cocofied.json'
        cfg.data.test.img_prefix = 'data/lvis/val2017/'
        cfg.data.test.test_mode = True
        dataset = build_dataset(cfg.data.test)
        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            dist=False,
            shuffle=False)
        # model_orig=model.module
        # model = MMDataParallel(model, device_ids=[0]).cuda()
        data_loader.dataset.img_infos = data_loader.dataset.img_infos[:100]
        outputs = single_gpu_test(model, data_loader)

        print('\nwriting results to {}'.format('xxx'))
        # mmcv.dump(outputs, 'xxx')
        eval_types = ['segm']
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = 'xxx'
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_files = results2json_segm(dataset, outputs, 'xxx', dump=False)
                    from lvis import LVISEval
                    lvisEval = LVISEval('data/lvis/lvis_v0.5_val_cocofied.json', result_files, 'segm')
                    lvisEval.run()
                    lvisEval.print_results()
                    # fix lvis api eval iou_thr error, should be 0.9 but was 0.8999
                    lvisEval.params.iou_thrs[8] = 0.9
                    for iou in [0.5, 0.6, 0.7, 0.8, 0.9]:
                        print('AP at iou {}: {}'.format(iou, lvisEval._summarize('ap', iou_thr=iou)))
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = 'xxx' + '.{}'.format(name)
                        result_files = results2json(dataset, outputs_,
                                                    result_file, dump=False)
                        coco_eval(result_files, eval_types, dataset.coco)
    else:
        ##eval on lvis-freq######
        cfg.data.test.test_mode = True
        dataset = build_dataset(cfg.data.test)
        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            dist=False,
            shuffle=False)
        # model_orig=model.module
        # model = MMDataParallel(model, device_ids=[0]).cuda()
        data_loader.dataset.img_infos = data_loader.dataset.img_infos[:100]
        outputs = single_gpu_test(model, data_loader)

        print('\nwriting results to {}'.format('xxx'))
        # mmcv.dump(outputs, 'xxx')
        eval_types = ['segm']
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = 'xxx'
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_files = results2json_segm(dataset, outputs, 'xxx', dump=False)
                    from lvis import LVISEval
                    lvisEval = LVISEval(cfg.data.test.ann_file, result_files, 'segm')
                    lvisEval.run()
                    lvisEval.print_results()
                    # fix lvis api eval iou_thr error, should be 0.9 but was 0.8999
                    lvisEval.params.iou_thrs[8] = 0.9
                    for iou in [0.5, 0.6, 0.7, 0.8, 0.9]:
                        print('AP at iou {}: {}'.format(iou, lvisEval._summarize('ap', iou_thr=iou)))
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = 'xxx' + '.{}'.format(name)
                        result_files = results2json(dataset, outputs_,
                                                    result_file, dump=False)
                        coco_eval(result_files, eval_types, dataset.coco)
Пример #12
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_detector(
            cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint)
        model = MMDataParallel(model, device_ids=[0])

        if hasattr(cfg, 'data2'):
            dataset2 = obj_from_dict(cfg.data2.test, datasets, dict(test_mode=True))
            data_loader2 = build_dataloader(
                dataset2,
                imgs_per_gpu=1,
                workers_per_gpu=cfg.data2.workers_per_gpu,
                num_gpus=1,
                dist=False,
                shuffle=False)
            data_loader = build_dataloader(
                dataset,
                imgs_per_gpu=1,
                workers_per_gpu=cfg.data.workers_per_gpu,
                num_gpus=1,
                dist=False,
                shuffle=False)
            outputs = double_test(model, data_loader, data_loader2, cfg.test_cfg2, args.show)
        else:
            data_loader = build_dataloader(
                dataset,
                imgs_per_gpu=1,
                workers_per_gpu=cfg.data.workers_per_gpu,
                num_gpus=1,
                dist=False,
                shuffle=False)
            outputs = single_test(model, data_loader, args.show)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(detectors, model_args.pop('type'))
        outputs = parallel_test(
            model_type,
            model_args,
            args.checkpoint,
            dataset,
            _data_func,
            range(args.gpus),
            workers_per_gpu=args.proc_per_gpu)

    if args.out:
        # print('writing results to {}'.format(args.out))
        # mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    # 3D
                    # load full volume and get full volume's image IDs
                    if hasattr(cfg.data.test, 'ann_file_volume'):
                        coco_full_gt = COCO(cfg.data.test.ann_file_volume)
                    else:
                        coco_full_gt = COCO(cfg.data.test.ann_file)

                    if str(type(dataset)) == "<class 'mmdet.datasets.coco_3d.Coco3DDataset'>" or \
                        str(type(dataset)) == "<class 'mmdet.datasets.coco_3d_2scales.Coco3D2ScalesDataset'>" or \
                        str(type(dataset)) == "<class 'mmdet.datasets.coco_3d_3scales.Coco3D3ScalesDataset'>" or \
                        str(type(dataset)) == "<class 'mmdet.datasets.coco_3d_parcel.Coco3DParcelDataset'>":
                        full_filename_to_id = dict()
                        for img_id in coco_full_gt.getImgIds():
                            full_filename_to_id[coco_full_gt.loadImgs([img_id])[0]['file_name']] = img_id

                        if cfg.data.test.with_mask:
                            if hasattr(cfg, 'data2') and hasattr(cfg.data2, 'test'):
                                result = results2json3DMulti(dataset, dataset2, outputs, result_file, full_filename_to_id)
                            else:
                                result = results2json3D(dataset, outputs, result_file, full_filename_to_id)
                            coco_eval(result, eval_types, coco_full_gt, is3D=True, hasMask=True, full_filename_to_id=full_filename_to_id)
                        else:
                            if hasattr(cfg, 'data2') and hasattr(cfg.data2, 'test'):
                                results2json3DMulti(dataset, dataset2, outputs, result_file, full_filename_to_id)
                                coco_eval(result_file, eval_types, coco_full_gt, is3D=True, full_filename_to_id=full_filename_to_id)
                            elif str(type(dataset)) == "<class 'mmdet.datasets.coco_3d_parcel.Coco3DParcelDataset'>":
                                results2json3DParcel(dataset, outputs, result_file, full_filename_to_id)
                                coco_eval(result_file, eval_types, coco_full_gt, is3D=True, full_filename_to_id=full_filename_to_id, isParcellized=True)
                            else:
                                results2json3D(dataset, outputs, result_file, full_filename_to_id)
                                coco_eval(result_file, eval_types, coco_full_gt, is3D=True, full_filename_to_id=full_filename_to_id)
                    else:
                        # default
                        results2json(dataset, outputs, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
Пример #13
0
def main():
    args = parse_args()
    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)

    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    model.CLASSES = dataset.CLASSES

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, args.show)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.out)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)

    annType = 'bbox'  # specify type here
    print('Running demo for *%s* results.' % (annType))
    annFile = 'eval_scripts/val_gt.json'
    res_file = open("eval_scripts/results.txt", "w")
    for id_setup in range(0, 2):
        cocoGt = COCO(annFile)
        cocoDt = cocoGt.loadRes(args.out + '.json')
        imgIds = sorted(cocoGt.getImgIds())
        cocoEval = COCOeval(cocoGt, cocoDt, annType)
        cocoEval.params.imgIds = imgIds
        cocoEval.evaluate(id_setup)
        cocoEval.accumulate()
        cocoEval.summarize(id_setup, res_file)
Пример #14
0
def main():
    args = parse_args()

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(
        dataset,
        imgs_per_gpu=1,
        workers_per_gpu=cfg.data.workers_per_gpu,
        dist=distributed,
        shuffle=False)

    if args.out is None:
        dataset_name = dataset.name if hasattr(dataset, 'name') else 'val'
        if hasattr(cfg.data.test, 'task'):
            dataset_name = dataset_name + '_' + cfg.data.test.task
        model_name = os.path.basename(args.checkpoint).split('.')[0]
        model_dir = os.path.dirname(args.checkpoint)
        args.out = os.path.join(model_dir, 'raw_results',
                                dataset_name + '_' + model_name + '.pkl')
    elif not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')
    mmcv.mkdir_or_exist(os.path.dirname(args.out))

    rank, _ = get_dist_info()
    eval_types = args.eval
    if not os.path.isfile(args.out):
        # build the model and load checkpoint
        model = build_detector(
            cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
        fp16_cfg = cfg.get('fp16', None)
        if fp16_cfg is not None:
            wrap_fp16_model(model)
        checkpoint = load_checkpoint(
            model, args.checkpoint, map_location='cpu')
        # old versions did not save class info in checkpoints, this walkaround is
        # for backward compatibility
        if 'CLASSES' in checkpoint['meta']:
            model.CLASSES = checkpoint['meta']['CLASSES']
        else:
            model.CLASSES = dataset.CLASSES

        if not distributed:
            model = MMDataParallel(model, device_ids=[0])
            outputs = single_gpu_test(model, data_loader, args.show)
        else:
            model = MMDistributedDataParallel(model.cuda())
            outputs = multi_gpu_test(model, data_loader, args.tmpdir)

        if rank == 0:
            if hasattr(dataset, 'raw_annotations'):
                filenames = [
                    dataset.raw_annotations[dataset.ids[i]]['filename']
                    for i in range(len(dataset))
                ]
            else:
                filenames = [
                    img_info['filename'] for img_info in dataset.img_infos
                ]

            print('\nwriting results to {}'.format(args.out))
            results = {
                'file_names': filenames,
                'outputs': outputs,
            }
            mmcv.dump(results, args.out, protocol=2)
    elif rank == 0:
        results = mmcv.load(args.out, encoding='latin1')
        outputs = results['outputs']

    if eval_types and rank == 0:
        print('Starting evaluate {}'.format(' and '.join(eval_types)))
        if not hasattr(dataset, 'coco'):
            if hasattr(dataset, 'raw_annotations'):
                gt_bboxes = [
                    dataset.raw_annotations[dataset.ids[i]]['ann']['bboxes']
                    for i in range(len(dataset))
                ]
                gt_labels = [
                    dataset.raw_annotations[dataset.ids[i]]['ann']['classes']
                    for i in range(len(dataset))
                ]

                if cfg.data.test.with_ignore:
                    gt_ignores = [l <= 0 for l in gt_labels]
                else:
                    gt_ignores = [l == 0 for l in gt_labels]
                gt_labels = [np.abs(l) for l in gt_labels]
                if 'corners' in eval_types:
                    gt_corners = [
                        dataset.raw_annotations[dataset.ids[i]]['ann']
                        ['corners'] for i in range(len(dataset))
                    ]
                    gt_poses = [
                        dataset.raw_annotations[dataset.ids[i]]['ann']['poses']
                        for i in range(len(dataset))
                    ]
                    eval_corners(
                        outputs,
                        gt_corners,
                        gt_poses,
                        gt_labels,
                        gt_ignores,
                        gt_bboxes=gt_bboxes,
                        display=True)
                    det_bboxes = corners2bboxes(outputs,
                                                len(dataset.CLASSES) - 1)
                    eval_map(
                        det_bboxes, gt_bboxes, gt_labels, gt_ignore=gt_ignores)
                else:
                    eval_map(
                        outputs, gt_bboxes, gt_labels, gt_ignore=gt_ignores)
            else:
                gt_bboxes = [
                    img_info['ann']['bboxes'] for img_info in dataset.img_infos
                ]
                gt_labels = [
                    img_info['ann']['labels'] for img_info in dataset.img_infos
                ]
                if len(outputs[0]) == 5:
                    outputs = corners2bboxes(outputs, len(dataset.classes) - 1)
                eval_map(outputs, gt_bboxes, gt_labels, iou_thr=0.4)
        else:
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco, CLASSES=dataset.CLASSES)
            else:
                if not isinstance(outputs[0], dict):
                    result_files = results2json(dataset, outputs, args.out)
                    coco_eval(result_files, eval_types, dataset.coco, CLASSES=dataset.CLASSES, show=True)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}'.format(name)
                        result_files = results2json(dataset, outputs_,
                                                    result_file)
                        coco_eval(result_files, eval_types, dataset.coco, CLASSES=dataset.CLASSES, show=True)
Пример #15
0
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)

    while not osp.isfile(args.checkpoint):
        print('Waiting for {} to exist...'.format(args.checkpoint))
        time.sleep(60)

    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    # assert not distributed
    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        # data_loader.dataset.img_infos = data_loader.dataset.img_infos[:10]
        outputs = single_gpu_test(model, data_loader)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    if dataset.ann_file == 'data/coco/annotations/image_info_test-dev2017.json':
                        result_files = results2json_segm(dataset,
                                                         outputs,
                                                         args.out,
                                                         dump=True)
                    else:
                        result_files = results2json_segm(dataset,
                                                         outputs,
                                                         args.out,
                                                         dump=False)
                    if 'lvis' in dataset.ann_file:  ## an ugly fix to make it compatible with coco eval
                        from lvis import LVISEval
                        lvisEval = LVISEval(cfg.data.test.ann_file,
                                            result_files, 'segm')
                        lvisEval.run()
                        lvisEval.print_results()
                        #fix lvis api eval iou_thr error, should be 0.9 but was 0.8999
                        lvisEval.params.iou_thrs[8] = 0.9
                        for iou in [0.5, 0.6, 0.7, 0.8, 0.9]:
                            print('AP at iou {}: {}'.format(
                                iou, lvisEval._summarize('ap', iou_thr=iou)))
                    else:
                        coco_eval(result_files, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}'.format(name)
                        result_files = results2json(dataset,
                                                    outputs_,
                                                    result_file,
                                                    dump=False)
                        coco_eval(result_files, eval_types, dataset.coco)

        ##eval on lvis-77######
        cfg.data.test.ann_file = 'data/lvis/lvis_v0.5_val_cocofied.json'
        cfg.data.test.img_prefix = 'data/lvis/val2017/'
        cfg.data.test.test_mode = True
        dataset = build_dataset(cfg.data.test)
        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            dist=False,
            shuffle=False)
        # model_orig=model.module
        # model = MMDataParallel(model, device_ids=[0]).cuda()
        # data_loader.dataset.img_infos = data_loader.dataset.img_infos[:10]
        outputs = single_gpu_test(model, data_loader)

        print('\nwriting results to {}'.format('xxx'))
        # mmcv.dump(outputs, 'xxx')
        eval_types = ['segm']
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = 'xxx'
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_files = results2json_segm(dataset,
                                                     outputs,
                                                     'xxx',
                                                     dump=False)
                    from lvis import LVISEval
                    lvisEval = LVISEval(
                        'data/lvis/lvis_v0.5_val_cocofied.json', result_files,
                        'segm')
                    lvisEval.run()
                    lvisEval.print_results()
                    # fix lvis api eval iou_thr error, should be 0.9 but was 0.8999
                    lvisEval.params.iou_thrs[8] = 0.9
                    for iou in [0.5, 0.6, 0.7, 0.8, 0.9]:
                        print('AP at iou {}: {}'.format(
                            iou, lvisEval._summarize('ap', iou_thr=iou)))
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = 'xxx' + '.{}'.format(name)
                        result_files = results2json(dataset,
                                                    outputs_,
                                                    result_file,
                                                    dump=False)
                        coco_eval(result_files, eval_types, dataset.coco)

    # Save predictions in the COCO json format
    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)
Пример #16
0
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    # cfg.data.test.test_mode = False

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    ## uncomment to only eval on first 100 imgs
    dataset.img_infos = dataset.img_infos[:20]

    data_loader = build_dataloader(
        dataset,
        imgs_per_gpu=1,
        workers_per_gpu=cfg.data.workers_per_gpu,
        dist=distributed,
        shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    print('load model from {}'.format(cfg.load_from))
    checkpoint = load_checkpoint(model, cfg.load_from, map_location='cpu')

    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES


    def load_ckpt(ncm_model, cal_head):
        print('load cls head {}'.format('{}/{}.pth'.format(cfg.work_dir, cal_head)))
        # epoch = torch.load('{}/{}_epoch.pth'.format(cfg.work_dir, cal_head))
        load_checkpoint(ncm_model, '{}/{}.pth'.format(cfg.work_dir, cal_head))
        # return epoch

    print('use {}'.format(args.cal_head))
    if len(dataset.CLASSES) == 1230:##lvis
        if '2fc_rand' in args.cal_head:
            calibrated_head = simple2fc().cuda()
        elif '3fc_rand' in args.cal_head or '3fc_ft' in args.cal_head:
            calibrated_head = simple3fc().cuda()
    elif len(dataset.CLASSES) ==80:## coco
        if '2fc_rand' in args.cal_head:
            calibrated_head = simple2fc(num_classes=81).cuda()
        elif '3fc_rand' in args.cal_head or '3fc_ft' in args.cal_head:
            calibrated_head = simple3fc(num_classes=81).cuda()
    # epoch = load_ckpt(calibrated_head, args.head_ckpt)
    load_ckpt(calibrated_head, args.head_ckpt)
    calibrated_head.eval()


    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, calibrated_head, args.show)
    else:
        model = MMDistributedDataParallel(model.cuda())
        calibrated_head = MMDistributedDataParallel(calibrated_head.cuda())
        outputs = multi_gpu_test(model, data_loader, calibrated_head, args.show, args.tmpdir)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        # mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if len(dataset.CLASSES) == 1230:
            if eval_types:

                if eval_types == ['proposal_fast']:
                    result_file = args.out
                    lvis_eval(result_file, eval_types, dataset.coco)
                else:
                    if not isinstance(outputs[0], dict):
                        result_files = results2json(dataset, outputs, args.out, dump=False)
                        print('Starting evaluate {}'.format(' and '.join(eval_types)))
                        lvis_eval(result_files, eval_types, dataset.lvis)
                    else:
                        for name in outputs[0]:
                            print('\nEvaluating {}'.format(name))
                            outputs_ = [out[name] for out in outputs]
                            result_file = args.out + '.{}'.format(name)
                            result_files = results2json(dataset, outputs_,
                                                        result_file)
                            lvis_eval(result_files, eval_types, dataset.coco)
        elif len(dataset.CLASSES) == 80:
            result_files = results2json(dataset, outputs, args.out, dump=False)
            coco_eval(result_files, args.eval, dataset.coco)

    # Save predictions in the COCO json format
    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    # cfg.data.test.test_mode = False

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    ## uncomment to only eval on first 100 imgs

    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)

    print('load model from {}'.format(cfg.load_from))
    # checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    checkpoint = load_checkpoint(model, cfg.load_from, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    # set0 = mmcv.load('../liyu_mmdet/set0.pkl')
    # set1 = mmcv.load('../liyu_mmdet/set1.pkl')
    # set2 = mmcv.load('../liyu_mmdet/set2.pkl')
    # set3 = mmcv.load('../liyu_mmdet/set3.pkl')
    # set4 = mmcv.load('../liyu_mmdet/set4.pkl')
    # set5 = mmcv.load('../liyu_mmdet/set5.pkl')
    # set6 = mmcv.load('../liyu_mmdet/set6.pkl')
    # set7 = mmcv.load('../liyu_mmdet/set7.pkl')
    # set0 = mmcv.load('./set0.pkl')
    # set1 = mmcv.load('./set1.pkl')
    # set2 = mmcv.load('./set2.pkl')
    # set3 = mmcv.load('./set3.pkl')
    # set4 = mmcv.load('./set4.pkl')
    # set5 = mmcv.load('./set5.pkl')
    # set6 = mmcv.load('./set6.pkl')
    # set7 = mmcv.load('./set7.pkl')
    # set_combine = set0+set1+set2+set3+set4+set5+set6+set7
    # prefix = 'mrcnnr50_14.3_clshead'
    # set0 = mmcv.load('./{}_set0.pkl'.format(prefix))
    # set1 = mmcv.load('./{}_set1.pkl'.format(prefix))
    # set2 = mmcv.load('./{}_set2.pkl'.format(prefix))
    # set3 = mmcv.load('./{}_set3.pkl'.format(prefix))
    # set_combine = set0+set1+set2+set3

    # prefix = '/mrcnnr50_ag_coco_clshead'
    prefix = 'mrcnnr50_ag_3fc_ft_cocolongtail_cat400_epoch_2'
    prefix = 'mrcnn_r50_ag_cocolt'
    print(prefix)

    set0 = mmcv.load('./{}_set0.pkl'.format(prefix))
    set1 = mmcv.load('./{}_set1.pkl'.format(prefix))
    set2 = mmcv.load('./{}_set2.pkl'.format(prefix))
    set3 = mmcv.load('./{}_set3.pkl'.format(prefix))
    set4 = mmcv.load('./{}_set4.pkl'.format(prefix))
    set5 = mmcv.load('./{}_set5.pkl'.format(prefix))
    set6 = mmcv.load('./{}_set6.pkl'.format(prefix))
    set7 = mmcv.load('./{}_set7.pkl'.format(prefix))

    # set0 = mmcv.load('./set0.pkl')
    # set1 = mmcv.load('./set1.pkl')
    # set2 = mmcv.load('./set2.pkl')
    # set3 = mmcv.load('./set3.pkl')
    # set4 = mmcv.load('./set4.pkl')
    # set5 = mmcv.load('./set5.pkl')
    # set6 = mmcv.load('./set6.pkl')
    # set7 = mmcv.load('./set7.pkl')
    set_combine = set0 + set1 + set2 + set3 + set4 + set5 + set6 + set7

    # set_liyu = mmcv.load('../mmdet_ensemble/results319.pkl')

    # mmcv.dump(set_combine, args.out)
    # result_files = results2json(dataset, set_combine,
    #                             args.out)
    print('pkl result dumped, start eval')
    # result_files = results2json(dataset, set_combine,
    #                             args.out, dump=False)
    #
    # lvis_eval(result_files, args.eval, dataset.lvis)

    result_files = results2json(dataset, set_combine, args.out, dump=False)
    coco_eval(result_files, args.eval, dataset.coco)
Пример #18
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    imgs_per_gpu = cfg.data.test.pop('imgs_per_gpu', 1)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = get_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=imgs_per_gpu,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, args.show, args.log_dir)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if args.csv:
            csv_path = (args.out).replace('.pkl', '.csv')
            print('\nwriting results as csv to {}'.format(csv_path))
            convert_output_to_csv(dataset, outputs, csv_path)

        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    results2json(dataset, outputs, result_file)
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
Пример #19
0
def main():
    args = parse_args()

    if os.path.isdir(args.checkpoint):
        print(args.checkpoint)
        checkpoints = glob.glob(args.checkpoint + '/epoch_*.pth')
        checkpoints = sorted(
            checkpoints,
            key=lambda x: int(x.split('epoch_')[-1].split('.')[0]))
        print(checkpoints)
        if args.out is not None:
            if not os.path.exists(args.out):
                os.mkdir(args.out)
            elif os.path.isfile(args.out):
                raise ValueError('args.out must be a directory.')
        # Create TensorBoard writer for output checkpoint dir.
        tensorboard_writer = SummaryWriter(args.out)
    else:
        checkpoints = [args.checkpoint]
        tensorboard_writer = None
        if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
            raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    for i, checkpoint in enumerate(checkpoints):
        outpath = args.out
        if os.path.isdir(args.checkpoint):
            outpath = args.out + '/%d_out.pkl' % i

        if not os.path.exists(outpath):
            if args.gpus == 1:
                model = build_detector(cfg.model,
                                       train_cfg=None,
                                       test_cfg=cfg.test_cfg)
                load_checkpoint(model, checkpoint)
                model = MMDataParallel(model, device_ids=[0])

                data_loader = build_dataloader(
                    dataset,
                    imgs_per_gpu=1,
                    workers_per_gpu=cfg.data.workers_per_gpu,
                    num_gpus=1,
                    dist=False,
                    shuffle=False)
                outputs = single_test(model, data_loader, args.show)
            else:
                model_args = cfg.model.copy()
                model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
                model_type = getattr(detectors, model_args.pop('type'))
                outputs = parallel_test(model_type,
                                        model_args,
                                        checkpoint,
                                        dataset,
                                        _data_func,
                                        range(args.gpus),
                                        workers_per_gpu=args.proc_per_gpu)

        # TODO: Currently assume test set is same size as training set.
        num_iters = (i + 1) * len(dataset)
        if outpath:
            if os.path.exists(outpath):
                print('reading results from {}'.format(outpath))
                outputs = mmcv.load(outpath)
            else:
                print('writing results to {}'.format(outpath))
                mmcv.dump(outputs, outpath)
            eval_types = args.eval
            if eval_types:
                print('Starting evaluate {}'.format(' and '.join(eval_types)))
                if eval_types == ['proposal_fast']:
                    result_file = outpath
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    if not isinstance(outputs[0], dict):
                        result_file = outpath + '.json'
                        results2json(dataset, outputs, result_file)
                        results_dict = coco_eval(result_file, eval_types,
                                                 dataset.coco)
                        if tensorboard_writer:
                            for eval_type in eval_types:
                                out = capture_stdout(lambda: results_dict[
                                    eval_type].summarize())
                                for line in out.split('\n')[:-1]:
                                    parts = line.split('=')
                                    name, score = '='.join(parts[:-1]), float(
                                        parts[-1])
                                    tensorboard_writer.add_scalar(
                                        'eval/' + name, score, num_iters)
                    else:
                        for name in outputs[0]:
                            print('\nEvaluating {}'.format(name))
                            outputs_ = [out[name] for out in outputs]
                            result_file = outpath + '.{}.json'.format(name)
                            results2json(dataset, outputs_, result_file)
                            results_dict = coco_eval(result_file, eval_types,
                                                     dataset.coco)
                            if tensorboard_writer:
                                for eval_type in eval_types:
                                    out = capture_stdout(lambda: results_dict[
                                        eval_type].summarize())
Пример #20
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_detector(
            cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader, args.show)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(detectors, model_args.pop('type'))
        outputs = parallel_test(
            model_type,
            model_args,
            args.checkpoint,
            dataset,
            _data_func,
            range(args.gpus),
            workers_per_gpu=args.proc_per_gpu)
    
    # run evaluation
    import zipfile
    from mmdet.core.evaluation.icdar_evaluation import icdar_eval
    import os

    pt_zip_dir = os.path.join('../output', 'pt.zip')
    output_pt_dir = os.path.join('../output', 'pt/')
    z = zipfile.ZipFile(pt_zip_dir, 'w', zipfile.ZIP_DEFLATED)

    for dirpath, dirnames, filenames in os.walk(output_pt_dir):
        for filename in filenames:
            z.write(os.path.join(dirpath, filename), filename)
    z.close()

    #3 use icdar eval
    if args.dataset=='icdar2015':
        gt_zip_dir = './work_dirs/gt_ic15.zip'
    elif args.dataset=='icdar2013':
        gt_zip_dir = './work_dirs/gt_ic13.zip'
    elif args.dataset=='td500':
        gt_zip_dir = './work_dirs/gt_td500.zip'
    param_dict = dict(
        # gt zip file path
        g = gt_zip_dir,
        # prediction zip file path
        s = pt_zip_dir,
    )
    result_dict = icdar_eval(param_dict)
    
    print(result_dict)
    for i in range(6):
        print('')

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    results2json(dataset, outputs, result_file)
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
Пример #21
0
def main():  # noqa: C901
    """Start test."""
    args = parse_args()
    if args.work_dir is not None:
        mmcv.mkdir_or_exist(args.work_dir)
        if args.tmpdir is None:
            args.tmpdir = osp.join(args.work_dir, 'tmp_dir')
            mmcv.mkdir_or_exist(args.tmpdir)
        if args.out is None:
            args.out = osp.join(args.work_dir, 'result.pkl')
        if args.checkpoint is None:
            args.checkpoint = osp.join(args.work_dir, 'latest.pth')
        fps_file = osp.join(args.work_dir, 'fps.pkl')
        mAP_file = osp.join(args.work_dir, 'mAP.pkl')
    else:
        mAP_file, fps_file = None, None
        if args.checkpoint is None:
            raise ValueError('Checkpoint file cannot be empty.')

    if args.config.endswith(".json"):
        load_method = mmcv.load
        mmcv.load = json_to_dict
        cfg = mmcv.Config.fromfile(args.config)
        mmcv.load = load_method
    else:
        cfg = mmcv.Config.fromfile(args.config)
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True
    if args.dist:
        init_dist('pytorch', **cfg.dist_params)

    # build the dataloader
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=True,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    load_checkpoint(model, args.checkpoint, map_location='cpu')
    model.CLASSES = dataset.CLASSES
    if args.dist:
        model = MMDistributedDataParallel(
            model.cuda(),
            device_ids=[torch.cuda.current_device()],
            broadcast_buffers=False)
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)
    else:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, fps_file)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            assert not isinstance(outputs[0], dict)
            result_files = results2json(dataset, outputs, args.out)
            coco_eval(result_files,
                      eval_types,
                      dataset.coco,
                      dump_file=mAP_file)
Пример #22
0
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    # per_set_img_num = int(len(dataset.img_infos)/args.total_set_num)
    # this_set_start = per_set_img_num*args.set
    # if args.set < args.total_set_num-1:
    #     dataset.img_infos = dataset.img_infos[this_set_start: this_set_start+per_set_img_num]
    # else:
    #     dataset.img_infos = dataset.img_infos[this_set_start:]
    # dataset.img_infos = dataset.img_infos[:100]

    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    # print('load from {}'.format(args.checkpoint))
    # checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    print('load model from {}'.format(cfg.load_from))
    # checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    checkpoint = load_checkpoint(model, cfg.load_from, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES


## load longtail classifier

# def load_ncm_ckpt(ncm_model):
#     if not os.path.exists('./simple3fc.pth'):
#         print('start training from 0 epoch')
#         return 0
#     else:
#         epoch = torch.load('./simple3fc_epoch.pth')
#         load_checkpoint(ncm_model, './simple3fc.pth')
#         return epoch

# def load_ncm_ckpt(ncm_model):
#     if not os.path.exists('./simple3fc.pth'):
#         print('start training from 0 epoch')
#         return 0
#     else:
#         epoch = torch.load('./finetune_simple3fc_epoch.pth')
#         load_checkpoint(ncm_model, './finetune_simple3fc.pth')
#         return epoch

    def load_ncm_ckpt(ncm_model):
        if not os.path.exists(
                './exp_randominit_negpossame_finetune_simple3fc_stage2_epoch.pth'
        ):
            print('start training from 0 epoch')
            return 0
        else:
            epoch = torch.load(
                './exp_randominit_negpossame_finetune_simple3fc_stage2_epoch.pth'
            )
            load_checkpoint(
                ncm_model,
                'exp_randominit_negpossame_finetune_simple3fc_stage2.pth')
            return epoch

    # def load_simple2fc_stage0_ckpt(ncm_model):
    #
    #     epoch = torch.load('./finetune2fc_10epoch/exp_randominit_finetune_simple2fc_stage0_epoch.pth')
    #     load_checkpoint(ncm_model, './finetune2fc_10epoch/exp_randominit_finetune_simple2fc_stage0.pth')
    #     return epoch
    #
    # def load_simple2fc_stage1_ckpt(ncm_model):
    #
    #     epoch = torch.load('./finetune2fc_10epoch/exp_randominit_finetune_simple2fc_stage1_epoch.pth')
    #     load_checkpoint(ncm_model, './finetune2fc_10epoch/exp_randominit_finetune_simple2fc_stage1.pth')
    #     return epoch
    #
    # def load_simple2fc_stage2_ckpt(ncm_model):
    #
    #     epoch = torch.load('./finetune2fc_10epoch/exp_randominit_finetune_simple2fc_stage2_epoch.pth')
    #     load_checkpoint(ncm_model, './finetune2fc_10epoch/exp_randominit_finetune_simple2fc_stage2.pth')
    #     return epoch
    #
    #
    # olongtail_model_stage0 = simple2fc().cuda()
    # epoch = load_simple2fc_stage0_ckpt(olongtail_model_stage0)
    # print('load model epoch {}'.format(epoch))
    # olongtail_model_stage0.eval()
    #
    # olongtail_model_stage1 = simple2fc().cuda()
    # epoch = load_simple2fc_stage1_ckpt(olongtail_model_stage1)
    # olongtail_model_stage1.eval()
    #
    # olongtail_model_stage2 = simple2fc().cuda()
    # epoch = load_simple2fc_stage2_ckpt(olongtail_model_stage2)
    # olongtail_model_stage2.eval()
    #
    # olongtail_model_all_stage = [olongtail_model_stage0, olongtail_model_stage1, olongtail_model_stage2]

    prefix = '3fc_ft'

    def load_stage0_ckpt(ncm_model):

        # epoch = torch.load('./work_dirs/htc/{}_stage0_epoch.pth'.format(prefix))
        load_checkpoint(ncm_model,
                        './work_dirs/htc/{}_stage0.pth'.format(prefix))
        # return epoch

    def load_stage1_ckpt(ncm_model):

        # epoch = torch.load('./work_dirs/htc/{}_stage1_epoch.pth'.format(prefix))
        load_checkpoint(ncm_model,
                        './work_dirs/htc/{}_stage1.pth'.format(prefix))
        # return epoch

    def load_stage2_ckpt(ncm_model):

        # epoch = torch.load('./work_dirs/htc/{}_stage2_epoch.pth'.format(prefix))
        load_checkpoint(ncm_model,
                        './work_dirs/htc/{}_stage2.pth'.format(prefix))
        # return epoch

    olongtail_model_stage0 = simple3fc().cuda()
    epoch = load_stage0_ckpt(olongtail_model_stage0)
    # print('load model epoch {}'.format(epoch))
    olongtail_model_stage0.eval()

    olongtail_model_stage1 = simple3fc().cuda()
    epoch = load_stage1_ckpt(olongtail_model_stage1)
    olongtail_model_stage1.eval()

    olongtail_model_stage2 = simple3fc().cuda()
    epoch = load_stage2_ckpt(olongtail_model_stage2)
    olongtail_model_stage2.eval()

    olongtail_model_all_stage = [
        olongtail_model_stage0, olongtail_model_stage1, olongtail_model_stage2
    ]

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader,
                                  olongtail_model_all_stage, args.show)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, olongtail_model_all_stage,
                                 args.show, args.tmpdir)

    # mmcv.dump(outputs, args.out)
    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        # mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:

            if eval_types == ['proposal_fast']:
                result_file = args.out
                lvis_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_files = results2json(dataset,
                                                outputs,
                                                args.out,
                                                dump=False)
                    print('Starting evaluate {}'.format(
                        ' and '.join(eval_types)))
                    lvis_eval(result_files, eval_types, dataset.lvis)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}'.format(name)
                        result_files = results2json(dataset, outputs_,
                                                    result_file)
                        lvis_eval(result_files, eval_types, dataset.coco)

    # Save predictions in the COCO json format
    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)
Пример #23
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True
    if args.ann_file is not None:
        cfg.data.test.ann_file = args.ann_file
    if args.img_prefix is not None:
        cfg.data.test.img_prefix = args.img_prefix
    if args.flip:
        cfg.data.test.flip_ratio = 1

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = get_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    load_checkpoint(model, args.checkpoint, map_location='cpu')

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, args.show)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    results2json(dataset, outputs, result_file)
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
Пример #24
0
def main(args):
    if args.model.endswith('.onnx'):
        backend = 'onnx'
    elif args.model.endswith('.xml'):
        backend = 'openvino'
    else:
        raise ValueError('Unknown model type.')

    cfg = mmcv.Config.fromfile(args.config)
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    if backend == 'openvino':
        assert cfg.data.test.pipeline[1]['type'] == 'MultiScaleFlipAug'
        normalize_idx = [i for i, v in enumerate(cfg.data.test.pipeline[1]['transforms']) if v['type'] == 'Normalize'][0]
        cfg.data.test.pipeline[1]['transforms'][normalize_idx]['mean'] = [0.0, 0.0, 0.0]
        cfg.data.test.pipeline[1]['transforms'][normalize_idx]['std'] = [1.0, 1.0, 1.0]
        cfg.data.test.pipeline[1]['transforms'][normalize_idx]['to_rgb'] = False
        print(cfg.data.test)

    if args.video is not None and args.show:
        dataset = VideoDataset(int(args.video), cfg.data)
        data_loader = iter(dataset)
        wait_key = 1
    else:
        dataset = build_dataset(cfg.data.test)
        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            dist=False,
            shuffle=False)
        wait_key = -1

    # Valid classes + background.
    classes_num = len(dataset.CLASSES) + 1

    if backend == 'openvino':
        from mmdet.utils.deployment import DetectorOpenVINO
        model = DetectorOpenVINO(args.model,
                                 args.model[:-3] + 'bin',
                                 mapping_file_path=args.model[:-3] + 'mapping',
                                 cfg=cfg,
                                 classes=dataset.CLASSES)
    else:
        from mmdet.utils.deployment import ModelONNXRuntime
        model = ModelONNXRuntime(args.model, cfg=cfg, classes=dataset.CLASSES)

    results = []
    prog_bar = mmcv.ProgressBar(len(dataset))
    for i, data in enumerate(data_loader):
        im_data = data['img'][0].cpu().numpy()
        try:
            result = model(im_data)
            result = postprocess(
                result,
                data['img_meta'][0].data[0],
                num_classes=classes_num,
                rescale=not args.show)
        except Exception as ex:
            print('\nException raised while processing item {}:'.format(i))
            print(ex)
            result = empty_result(
                num_classes=classes_num,
                with_mask=model.pt_model.with_mask)
        results.append(result)

        if args.show:
            model.show(data, result, score_thr=args.score_thr, wait_time=wait_key)

        batch_size = data['img'][0].size(0)
        for _ in range(batch_size):
            prog_bar.update()

    print('')
    print('Writing results to {}'.format(args.out))
    mmcv.dump(results, args.out)

    eval_types = args.eval
    if eval_types:
        print('Starting evaluate {}'.format(' and '.join(eval_types)))
        if eval_types == ['proposal_fast']:
            result_file = args.out
            coco_eval(result_file, eval_types, dataset.coco)
        else:
            if not isinstance(results[0], dict):
                result_files = results2json(dataset, results, args.out)
                coco_eval(result_files, eval_types, dataset.coco)
            else:
                for name in results[0]:
                    print('\nEvaluating {}'.format(name))
                    outputs_ = [out[name] for out in results]
                    result_file = args.out + '.{}'.format(name)
                    result_files = results2json(dataset, outputs_,
                                                result_file)
                    coco_eval(result_files, eval_types, dataset.coco)

    # Save predictions in the COCO json format
    if args.json_out:
        if not isinstance(results[0], dict):
            results2json(dataset, results, args.json_out)
        else:
            for name in results[0]:
                outputs_ = [out[name] for out in results]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)
Пример #25
0
def main():
    args = parse_args()

    assert args.out or args.show, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True
    if args.workers == 0:
        args.workers = cfg.data.workers_per_gpu

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # set random seeds
    if args.seed is not None:
        set_random_seed(args.seed)

    if 'all' in args.corruptions:
        corruptions = [
            'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
            'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
            'brightness', 'contrast', 'elastic_transform', 'pixelate',
            'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter',
            'saturate'
        ]
    elif 'benchmark' in args.corruptions:
        corruptions = [
            'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
            'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
            'brightness', 'contrast', 'elastic_transform', 'pixelate',
            'jpeg_compression'
        ]
    elif 'noise' in args.corruptions:
        corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise']
    elif 'blur' in args.corruptions:
        corruptions = [
            'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur'
        ]
    elif 'weather' in args.corruptions:
        corruptions = ['snow', 'frost', 'fog', 'brightness']
    elif 'digital' in args.corruptions:
        corruptions = [
            'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression'
        ]
    elif 'holdout' in args.corruptions:
        corruptions = ['speckle_noise', 'gaussian_blur', 'spatter', 'saturate']
    elif 'None' in args.corruptions:
        corruptions = ['None']
        args.severities = [0]
    else:
        corruptions = args.corruptions

    aggregated_results = {}
    for corr_i, corruption in enumerate(corruptions):
        aggregated_results[corruption] = {}
        for sev_i, corruption_severity in enumerate(args.severities):
            # evaluate severity 0 (= no corruption) only once
            if corr_i > 0 and corruption_severity == 0:
                aggregated_results[corruption][0] = \
                    aggregated_results[corruptions[0]][0]
                continue

            test_data_cfg = copy.deepcopy(cfg.data.test)
            # assign corruption and severity
            if corruption_severity > 0:
                corruption_trans = dict(type='Corrupt',
                                        corruption=corruption,
                                        severity=corruption_severity)
                # TODO: hard coded "1", we assume that the first step is
                # loading images, which needs to be fixed in the future
                test_data_cfg['pipeline'].insert(1, corruption_trans)

            # print info
            print('\nTesting {} at severity {}'.format(corruption,
                                                       corruption_severity))

            # build the dataloader
            # TODO: support multiple images per gpu
            #       (only minor changes are needed)
            dataset = build_dataset(test_data_cfg)
            data_loader = build_dataloader(dataset,
                                           imgs_per_gpu=1,
                                           workers_per_gpu=args.workers,
                                           dist=distributed,
                                           shuffle=False)

            # build the model and load checkpoint
            model = build_detector(cfg.model,
                                   train_cfg=None,
                                   test_cfg=cfg.test_cfg)
            fp16_cfg = cfg.get('fp16', None)
            if fp16_cfg is not None:
                wrap_fp16_model(model)
            checkpoint = load_checkpoint(model,
                                         args.checkpoint,
                                         map_location='cpu')
            # old versions did not save class info in checkpoints,
            # this walkaround is for backward compatibility
            if 'CLASSES' in checkpoint['meta']:
                model.CLASSES = checkpoint['meta']['CLASSES']
            else:
                model.CLASSES = dataset.CLASSES

            if not distributed:
                model = MMDataParallel(model, device_ids=[0])
                outputs = single_gpu_test(model, data_loader, args.show)
            else:
                model = MMDistributedDataParallel(model.cuda())
                outputs = multi_gpu_test(model, data_loader, args.tmpdir)

            rank, _ = get_dist_info()
            if args.out and rank == 0:
                eval_results_filename = (osp.splitext(args.out)[0] +
                                         '_results' +
                                         osp.splitext(args.out)[1])
                mmcv.dump(outputs, args.out)
                eval_types = args.eval
                if cfg.dataset_type == 'VOCDataset':
                    if eval_types:
                        for eval_type in eval_types:
                            if eval_type == 'bbox':
                                test_dataset = mmcv.runner.obj_from_dict(
                                    cfg.data.test, datasets)
                                logger = 'print' if args.summaries else None
                                mean_ap, eval_results = \
                                    voc_eval_with_return(
                                        args.out, test_dataset,
                                        args.iou_thr, logger)
                                aggregated_results[corruption][
                                    corruption_severity] = eval_results
                            else:
                                print('\nOnly "bbox" evaluation \
                                is supported for pascal voc')
                else:
                    if eval_types:
                        print('Starting evaluate {}'.format(
                            ' and '.join(eval_types)))
                        if eval_types == ['proposal_fast']:
                            result_file = args.out
                        else:
                            if not isinstance(outputs[0], dict):
                                result_files = results2json(
                                    dataset, outputs, args.out)
                            else:
                                for name in outputs[0]:
                                    print('\nEvaluating {}'.format(name))
                                    outputs_ = [out[name] for out in outputs]
                                    result_file = args.out
                                    + '.{}'.format(name)
                                    result_files = results2json(
                                        dataset, outputs_, result_file)
                        eval_results = coco_eval_with_return(
                            result_files, eval_types, dataset.coco)
                        aggregated_results[corruption][
                            corruption_severity] = eval_results
                    else:
                        print('\nNo task was selected for evaluation;'
                              '\nUse --eval to select a task')

            # save results after each evaluation
            mmcv.dump(aggregated_results, eval_results_filename)

    # print filan results
    print('\nAggregated results:')
    prints = args.final_prints
    aggregate = args.final_prints_aggregate

    if cfg.dataset_type == 'VOCDataset':
        get_results(eval_results_filename,
                    dataset='voc',
                    prints=prints,
                    aggregate=aggregate)
    else:
        get_results(eval_results_filename,
                    dataset='coco',
                    prints=prints,
                    aggregate=aggregate)
Пример #26
0
def main():
    args = parse_args()
    assert type(args.input) is list()
    N = len(args.input)
    for ii in range(N):
        if args.input[ii] is not None and not args.input[ii].endswith(
            ('.pkl', '.pickle')):
            raise ValueError('The input file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)

    rank, _ = get_dist_info()
    if args.input and rank == 0:
        print('\n results is {}'.format(args.input))
        root_Path = '/'.join(args.out[0].split('/')[:-1])
        if not os.path.exists(root_Path):
            os.makedirs(root_Path)
        #mmcv.dump(outputs, args.out)
        outputs = list()
        print("{} models ".format(N))
        for jj in range(N):
            input = mmcv.load(args.input[jj])
            print("{} images".format(len(input)))
            for zz in range(len(input)):
                if jj == 0:
                    outputs.append(input[zz])
                else:
                    assert len(outputs[zz]) == len(input[zz])
                    outputs[zz].extend(input[zz])
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if not args.is_coco:
                #  test VisDrone2019
                if args.eval == ['bbox']:
                    print("eval {}".format(args.eval))
                    test_dataset = cfg.data.test
                    eval_visdrone_det(cfg.work_dir, args.out, test_dataset,
                                      args.is_patch, args.show)
            else:
                if eval_types == ['proposal_fast']:
                    result_file = args.out
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    if not isinstance(outputs[0], dict):
                        result_file = args.out + '.json'
                        results2json(dataset, outputs, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
                    else:
                        for name in outputs[0]:
                            print('\nEvaluating {}'.format(name))
                            outputs_ = [out[name] for out in outputs]
                            result_file = args.out + '.{}.json'.format(name)
                            results2json(dataset, outputs_, result_file)
                            coco_eval(result_file, eval_types, dataset.coco)
Пример #27
0
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    ## uncomment to only eval on first 100 imgs
    # dataset.img_infos = dataset.img_infos[:100]
    data_loader = build_dataloader(
        dataset,
        imgs_per_gpu=1,
        workers_per_gpu=cfg.data.workers_per_gpu,
        dist=distributed,
        shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    # checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    # # old versions did not save class info in checkpoints, this walkaround is
    # # for backward compatibility
    # if 'CLASSES' in checkpoint['meta']:
    #     model.CLASSES = checkpoint['meta']['CLASSES']
    # else:
    #     model.CLASSES = dataset.CLASSES

    if args.existing_out:
        result_files= {'bbox': './{}.bbox.json'.format(args.existing_json), 'segm': './{}.segm.json'.format(args.existing_json),
         'proposal': './{}.proposal.json'.format(args.existing_json), 'full': './{}'.format(args.existing_json)}
        eval_types = args.eval
        if eval_types == ['proposal_fast'] or eval_types == ['proposal_fast_percat']:
            result_files = mmcv.load(result_files['full'])
            result_files = [item[2] for item in result_files]
            if eval_types == ['proposal_fast_percat']:
                lvis_eval(result_files, eval_types, dataset.lvis, existing_json=args.existing_json)
            else:
                lvis_eval(result_files, eval_types, dataset.lvis)

        else:
            lvis_eval(result_files, eval_types, dataset.lvis)
        exit()

    if os.path.isdir(args.checkpoint):

        ckpts = glob.glob(
            '/home/wangtao/prj/mmdetection/work_dirs/mask_rcnn_r50_fpn_1x_lr0.01_class_ag_boxmask_finetune/epoch*')
        ckpts.sort(key=os.path.getmtime)
        for ckpt in ckpts:
            print('eval {}'.format(ckpt))
            checkpoint = load_checkpoint(model, ckpt, map_location='cpu')
            # old versions did not save class info in checkpoints, this walkaround is
            # for backward compatibility
            if 'CLASSES' in checkpoint['meta']:
                model.CLASSES = checkpoint['meta']['CLASSES']
            else:
                model.CLASSES = dataset.CLASSES

            if not distributed:
                model = MMDataParallel(model, device_ids=[0])
                outputs = single_gpu_test(model, data_loader, args.show)
            else:
                model = MMDistributedDataParallel(model.cuda())
                outputs = multi_gpu_test(model, data_loader, args.tmpdir)

            rank, _ = get_dist_info()
            if args.out and rank == 0:
                print('\nwriting results to {}'.format(args.out))
                mmcv.dump(outputs, args.out)
                eval_types = args.eval
                if eval_types:
                    print('Starting evaluate {}'.format(' and '.join(eval_types)))
                    if eval_types == ['proposal_fast']:
                        result_file = args.out
                        lvis_eval(result_file, eval_types, dataset.coco)
                    else:
                        if not isinstance(outputs[0], dict):
                            result_files = results2json(dataset, outputs, args.out)
                            lvis_eval(result_files, eval_types, dataset.lvis)
                        else:
                            for name in outputs[0]:
                                print('\nEvaluating {}'.format(name))
                                outputs_ = [out[name] for out in outputs]
                                result_file = args.out + '.{}'.format(name)
                                result_files = results2json(dataset, outputs_,
                                                            result_file)
                                lvis_eval(result_files, eval_types, dataset.coco)

            # Save predictions in the COCO json format
            if args.json_out and rank == 0:
                if not isinstance(outputs[0], dict):
                    results2json(dataset, outputs, args.json_out)
                else:
                    for name in outputs[0]:
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.json_out + '.{}'.format(name)
                        results2json(dataset, outputs_, result_file)

    else:
        print('eval {}'.format(args.checkpoint))
        checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
        # old versions did not save class info in checkpoints, this walkaround is
        # for backward compatibility
        if 'CLASSES' in checkpoint['meta']:
            model.CLASSES = checkpoint['meta']['CLASSES']
        else:
            model.CLASSES = dataset.CLASSES

        if not distributed:
            model = MMDataParallel(model, device_ids=[0])
            outputs = single_gpu_test(model, data_loader, args.show)
        else:
            model = MMDistributedDataParallel(model.cuda())
            outputs = multi_gpu_test(model, data_loader, args.tmpdir)

        rank, _ = get_dist_info()
        if args.out and rank == 0:
            print('\nwriting results to {}'.format(args.out))
            mmcv.dump(outputs, args.out)
            ## combine results:
            # load_outputs = mmcv.load('lvis_maskrcnn_r50fpn_clsag_boxmask.pkl')

            eval_types = args.eval
            if eval_types:
                print('Starting evaluate {}'.format(' and '.join(eval_types)))
                if eval_types == ['proposal_fast']:
                    result_file = args.out
                    lvis_eval(result_file, eval_types, dataset.coco)
                else:
                    if not isinstance(outputs[0], dict):
                        result_files = results2json(dataset, outputs, args.out)
                        lvis_eval(result_files, eval_types, dataset.lvis)
                    else:
                        for name in outputs[0]:
                            print('\nEvaluating {}'.format(name))
                            outputs_ = [out[name] for out in outputs]
                            result_file = args.out + '.{}'.format(name)
                            result_files = results2json(dataset, outputs_,
                                                        result_file)
                            lvis_eval(result_files, eval_types, dataset.coco)

        # Save predictions in the COCO json format
        if args.json_out and rank == 0:
            if not isinstance(outputs[0], dict):
                results2json(dataset, outputs, args.json_out)
            else:
                for name in outputs[0]:
                    outputs_ = [out[name] for out in outputs]
                    result_file = args.json_out + '.{}'.format(name)
                    results2json(dataset, outputs_, result_file)
Пример #28
0
def main():
    args = parse_args()

    assert args.out or args.eval or args.format_only or args.show or args.json_out, \
        ('Please specify at least one operation (save/eval/format/show the '
         'results) with the argument "--out", "--eval", "--format_only" '
         'or "--show"')

    if args.eval and args.format_only:
        raise ValueError('--eval and --format_only cannot be both specified')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    if args.fuse_conv_bn:
        model = fuse_module(model)
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    # if 'CLASSES' in checkpoint['meta']:
    #     model.CLASSES = checkpoint['meta']['CLASSES']
    # else:
    #     model.CLASSES = dataset.CLASSES
    model.CLASSES = [0, 1, 2, 3]

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, args.show)
    else:
        # model = MMDistributedDataParallel(
        #     model.cuda(),
        #     device_ids=[torch.cuda.current_device()],
        #     broadcast_buffers=False)
        # outputs = multi_gpu_test(model, data_loader, args.tmpdir,
        #                          args.gpu_collect)
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir,
                                 args.gpu_collect)

    rank, _ = get_dist_info()
    if rank == 0:
        if args.out:
            print('\nwriting results to {}'.format(args.out))
            mmcv.dump(outputs, args.out)
        kwargs = {} if args.options is None else args.options
        if args.format_only:
            dataset.format_results(outputs, **kwargs)
        if args.eval:
            dataset.evaluate(outputs, args.eval, **kwargs)

    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)
Пример #29
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_detector(
            cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader, args.show)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
        model_type = getattr(detectors, model_args.pop('type'))
        outputs = parallel_test(
            model_type,
            model_args,
            args.checkpoint,
            dataset,
            _data_func,
            range(args.gpus),
            workers_per_gpu=args.proc_per_gpu)

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_file = args.out + '.json'
                    results2json(dataset, outputs, result_file)
                    coco_eval(result_file, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}.json'.format(name)
                        results2json(dataset, outputs_, result_file)
                        coco_eval(result_file, eval_types, dataset.coco)
Пример #30
0
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out or args.vdo_out_folder, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)

    while not osp.isfile(args.checkpoint):
        print('Waiting for {} to exist...'.format(args.checkpoint))
        time.sleep(60)

    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_files = results2json_segm(dataset, outputs,
                                                     args.out)
                    coco_eval(result_files, eval_types, dataset.coco)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}'.format(name)
                        result_files = results2json(dataset, outputs_,
                                                    result_file)
                        coco_eval(result_files, eval_types, dataset.coco)

    # Save predictions in the COCO json format
    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)

    # Save predictions in RLE format for VDO
    '''
    if args.vdo_out_folder and rank == 0:
        if not osp.exists(args.vdo_out_folder):
            os.mkdir(args.vdo_out_folder)
        for i in range(len(dataset)):
            img_id = dataset.img_infos[i]['id']
            file_name = dataset.img_infos[i]['file_name']
            width = dataset.img_infos[i]['width']
            height = dataset.img_infos[i]['height']
            results = outputs[i]
            lines = ['{} {}\n'.format(width, height).encode()]
            for class_id in range(len(results)):
                for segm in results[class_id]:
                    lines.append('{} '.format(class_id).encode())
                    lines.append(segm[0]['counts'])
                    lines.append('\n'.encode())
            out_file_name = '.'.join(file_name.split('.')[:-1] + ['txt'])
            with open(osp.join(args.vdo_out_folder, out_file_name), 'wb') as f:
                f.writelines(lines)
    '''

    # Save predictions in default format for VDO
    if args.vdo_out_folder and rank == 0:
        if not osp.exists(args.vdo_out_folder):
            os.mkdir(args.vdo_out_folder)
        for i in tqdm(range(len(dataset))):
            file_name = dataset.img_infos[i]['file_name']
            width = dataset.img_infos[i]['width']
            height = dataset.img_infos[i]['height']
            results = outputs[i]
            mask = np.zeros((height, width), dtype=np.uint8)

            obj_id = 1
            for class_id in range(len(results)):
                for segm in results[class_id]:
                    m = mask_util.decode(segm[0])
                    m = m * obj_id
                    mask[m > 0] = m[m > 0]
                    obj_id += 1

            lines = list()
            for y in range(mask.shape[0]):
                line = str()
                for x in range(mask.shape[1]):
                    line = line + str(mask[y][x]) + ' '
                if y != mask.shape[0] - 1:
                    line = line + '\n'
                lines.append(line)

            out_file_name = '.'.join(file_name.split('.')[:-1] + ['txt'])
            with open(osp.join(args.vdo_out_folder, out_file_name), 'w') as f:
                f.writelines(lines)