Пример #1
0
def main():
    args = parse_args()
    cfg = mmcv.Config.fromfile(args.config)
    dataset = build_dataset(cfg.data.test)
    eval_types = args.eval
    # load filtered detections (filterd by iou)
    # logits = pickle.load(open('filtered_logits.p', 'rb'))
    # targets = pickle.load(open('filtered_targets.p', 'rb'))
    # imgs = pickle.load(open('filtered_imgs.p', 'rb'))
    # boxes = pickle.load(open('filtered_boxes.p', 'rb'))
    # outputs = logits2output(logits, boxes, imgs, targets)

    # load unfiltered files (300 detections for each image)
    logits = []
    logits += [
        pickle.load(open(f'test_logits/smDragon/dragon_logits_mat{i}.p', 'rb'))
        for i in range(1, 6)
    ]
    logits = np.asarray(logits)
    logits = logits.reshape(-1, logits.shape[-2],
                            logits.shape[-1])  # reshape to (5000, 300, 1231)
    boxes = pickle.load(open('test_logits/bboxes_mat.p', 'rb'))
    labels = pickle.load(open('test_logits/labels_mat.p', 'rb'))
    outputs = logits2output_unfiltered(logits, boxes, labels)
    # otp2 = np.asarray(outputs[:4])  # temp
    # df = pd.DataFrame(otp2)
    # df.to_csv('dragon_otp.csv', index=False)
    # pd.DataFrame(outputs[:50]).to_csv("dragon2map_outputs.csv")
    result_files = results2json(dataset, outputs, args.out)
    lvis_eval(result_files, eval_types, dataset.lvis)
Пример #2
0
def main():
    parser = ArgumentParser(description='LVIS Evaluation')
    parser.add_argument('result', help='result file path')
    parser.add_argument('--cfg', help='config file path')
    parser.add_argument('--auto-dir',
                        action='store_true',
                        help='auto generate result dir based on config file')
    parser.add_argument('--types',
                        type=str,
                        nargs='+',
                        choices=['proposal_fast', 'proposal', 'bbox', 'segm'],
                        default=['bbox'],
                        help='result types')
    parser.add_argument(
        '--max-dets',
        type=int,
        nargs='+',
        default=[100, 300, 1000],
        help='proposal numbers, only used for recall evaluation')
    args = parser.parse_args()

    cfg = mmcv.Config.fromfile(args.cfg)
    if args.auto_dir:
        work_dir = cfg.work_dir
        args.result = osp.join(work_dir, args.result)

    dataset = build_dataset(cfg.data.test)
    eval_types = args.types
    if eval_types:
        print('Starting evaluate {}'.format(' and '.join(eval_types)))
        results = mmcv.load(args.result)
        if not isinstance(results[0], dict):
            result_files = results2json(dataset,
                                        results,
                                        args.result,
                                        dump_json=False)
            lvis_eval(result_files, eval_types, dataset.lvis, args.max_dets)
        else:
            for name in results[0]:
                print('\nEvaluating {}'.format(name))
                outputs_ = [out[name] for out in results]
                result_file = args.result + '.{}'.format(name)
                result_files = results2json(dataset,
                                            outputs_,
                                            result_file,
                                            dump_json=False)
                lvis_eval(result_files, eval_types, dataset.lvis,
                          args.max_dets)
Пример #3
0
def main():
    args = parse_args()
    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, args.show)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                coco_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_files = results2json(dataset, outputs, args.out)
                    dataset_type = DATASETS.get(cfg.data.test.type)
                    if issubclass(dataset_type, datasets.CocoDataset):
                        coco_eval(result_files, eval_types, dataset.coco)
                    elif issubclass(dataset_type, datasets.LVISDataset):
                        max_dets = cfg.test_cfg['rcnn']['max_per_img']
                        lvis_eval(result_files,
                                  eval_types,
                                  dataset.lvis,
                                  max_dets=max_dets)
                    else:
                        raise ValueError(
                            '{} is not supported type for evaluation'.format(
                                dataset_type))
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}'.format(name)
                        result_files = results2json(dataset, outputs_,
                                                    result_file)
                        coco_eval(result_files, eval_types, dataset.coco)

    # Save predictions in the COCO json format
    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)
Пример #4
0
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    ## uncomment to only eval on first 100 imgs
    # dataset.img_infos = dataset.img_infos[:100]
    data_loader = build_dataloader(
        dataset,
        imgs_per_gpu=1,
        workers_per_gpu=cfg.data.workers_per_gpu,
        dist=distributed,
        shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    # checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    # # old versions did not save class info in checkpoints, this walkaround is
    # # for backward compatibility
    # if 'CLASSES' in checkpoint['meta']:
    #     model.CLASSES = checkpoint['meta']['CLASSES']
    # else:
    #     model.CLASSES = dataset.CLASSES

    if args.existing_out:
        result_files= {'bbox': './{}.bbox.json'.format(args.existing_json), 'segm': './{}.segm.json'.format(args.existing_json),
         'proposal': './{}.proposal.json'.format(args.existing_json), 'full': './{}'.format(args.existing_json)}
        eval_types = args.eval
        if eval_types == ['proposal_fast'] or eval_types == ['proposal_fast_percat']:
            result_files = mmcv.load(result_files['full'])
            result_files = [item[2] for item in result_files]
            if eval_types == ['proposal_fast_percat']:
                lvis_eval(result_files, eval_types, dataset.lvis, existing_json=args.existing_json)
            else:
                lvis_eval(result_files, eval_types, dataset.lvis)

        else:
            lvis_eval(result_files, eval_types, dataset.lvis)
        exit()

    if os.path.isdir(args.checkpoint):

        ckpts = glob.glob(
            '/home/wangtao/prj/mmdetection/work_dirs/mask_rcnn_r50_fpn_1x_lr0.01_class_ag_boxmask_finetune/epoch*')
        ckpts.sort(key=os.path.getmtime)
        for ckpt in ckpts:
            print('eval {}'.format(ckpt))
            checkpoint = load_checkpoint(model, ckpt, map_location='cpu')
            # old versions did not save class info in checkpoints, this walkaround is
            # for backward compatibility
            if 'CLASSES' in checkpoint['meta']:
                model.CLASSES = checkpoint['meta']['CLASSES']
            else:
                model.CLASSES = dataset.CLASSES

            if not distributed:
                model = MMDataParallel(model, device_ids=[0])
                outputs = single_gpu_test(model, data_loader, args.show)
            else:
                model = MMDistributedDataParallel(model.cuda())
                outputs = multi_gpu_test(model, data_loader, args.tmpdir)

            rank, _ = get_dist_info()
            if args.out and rank == 0:
                print('\nwriting results to {}'.format(args.out))
                mmcv.dump(outputs, args.out)
                eval_types = args.eval
                if eval_types:
                    print('Starting evaluate {}'.format(' and '.join(eval_types)))
                    if eval_types == ['proposal_fast']:
                        result_file = args.out
                        lvis_eval(result_file, eval_types, dataset.coco)
                    else:
                        if not isinstance(outputs[0], dict):
                            result_files = results2json(dataset, outputs, args.out)
                            lvis_eval(result_files, eval_types, dataset.lvis)
                        else:
                            for name in outputs[0]:
                                print('\nEvaluating {}'.format(name))
                                outputs_ = [out[name] for out in outputs]
                                result_file = args.out + '.{}'.format(name)
                                result_files = results2json(dataset, outputs_,
                                                            result_file)
                                lvis_eval(result_files, eval_types, dataset.coco)

            # Save predictions in the COCO json format
            if args.json_out and rank == 0:
                if not isinstance(outputs[0], dict):
                    results2json(dataset, outputs, args.json_out)
                else:
                    for name in outputs[0]:
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.json_out + '.{}'.format(name)
                        results2json(dataset, outputs_, result_file)

    else:
        print('eval {}'.format(args.checkpoint))
        checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
        # old versions did not save class info in checkpoints, this walkaround is
        # for backward compatibility
        if 'CLASSES' in checkpoint['meta']:
            model.CLASSES = checkpoint['meta']['CLASSES']
        else:
            model.CLASSES = dataset.CLASSES

        if not distributed:
            model = MMDataParallel(model, device_ids=[0])
            outputs = single_gpu_test(model, data_loader, args.show)
        else:
            model = MMDistributedDataParallel(model.cuda())
            outputs = multi_gpu_test(model, data_loader, args.tmpdir)

        rank, _ = get_dist_info()
        if args.out and rank == 0:
            print('\nwriting results to {}'.format(args.out))
            mmcv.dump(outputs, args.out)
            ## combine results:
            # load_outputs = mmcv.load('lvis_maskrcnn_r50fpn_clsag_boxmask.pkl')

            eval_types = args.eval
            if eval_types:
                print('Starting evaluate {}'.format(' and '.join(eval_types)))
                if eval_types == ['proposal_fast']:
                    result_file = args.out
                    lvis_eval(result_file, eval_types, dataset.coco)
                else:
                    if not isinstance(outputs[0], dict):
                        result_files = results2json(dataset, outputs, args.out)
                        lvis_eval(result_files, eval_types, dataset.lvis)
                    else:
                        for name in outputs[0]:
                            print('\nEvaluating {}'.format(name))
                            outputs_ = [out[name] for out in outputs]
                            result_file = args.out + '.{}'.format(name)
                            result_files = results2json(dataset, outputs_,
                                                        result_file)
                            lvis_eval(result_files, eval_types, dataset.coco)

        # Save predictions in the COCO json format
        if args.json_out and rank == 0:
            if not isinstance(outputs[0], dict):
                results2json(dataset, outputs, args.json_out)
            else:
                for name in outputs[0]:
                    outputs_ = [out[name] for out in outputs]
                    result_file = args.json_out + '.{}'.format(name)
                    results2json(dataset, outputs_, result_file)
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    # cfg.data.test.test_mode = False

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    ## uncomment to only eval on first 100 imgs

    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)

    prefix = './mrcnn_r50_dual'
    # prefix = './mrcnn_r50_ag_cocolt'

    print(prefix)

    set0 = mmcv.load('./{}_set0.pkl'.format(prefix))
    set1 = mmcv.load('./{}_set1.pkl'.format(prefix))
    set2 = mmcv.load('./{}_set2.pkl'.format(prefix))
    set3 = mmcv.load('./{}_set3.pkl'.format(prefix))
    set4 = mmcv.load('./{}_set4.pkl'.format(prefix))
    set5 = mmcv.load('./{}_set5.pkl'.format(prefix))
    set6 = mmcv.load('./{}_set6.pkl'.format(prefix))
    set7 = mmcv.load('./{}_set7.pkl'.format(prefix))

    set_combine = set0 + set1 + set2 + set3 + set4 + set5 + set6 + set7

    print('start eval')
    if hasattr(dataset, 'coco'):
        result_files = results2json(dataset, set_combine, args.out, dump=False)
        coco_eval(result_files, args.eval, dataset.coco)

    elif hasattr(dataset, 'lvis'):
        result_files = results2json(dataset, set_combine, args.out, dump=False)
        lvis_eval(result_files, args.eval, dataset.lvis)
Пример #6
0
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    # per_set_img_num = int(len(dataset.img_infos)/args.total_set_num)
    # this_set_start = per_set_img_num*args.set
    # if args.set < args.total_set_num-1:
    #     dataset.img_infos = dataset.img_infos[this_set_start: this_set_start+per_set_img_num]
    # else:
    #     dataset.img_infos = dataset.img_infos[this_set_start:]
    # dataset.img_infos = dataset.img_infos[:100]

    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=cfg.data.workers_per_gpu,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    # print('load from {}'.format(args.checkpoint))
    # checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    print('load model from {}'.format(cfg.load_from))
    # checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    checkpoint = load_checkpoint(model, cfg.load_from, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES


## load longtail classifier

# def load_ncm_ckpt(ncm_model):
#     if not os.path.exists('./simple3fc.pth'):
#         print('start training from 0 epoch')
#         return 0
#     else:
#         epoch = torch.load('./simple3fc_epoch.pth')
#         load_checkpoint(ncm_model, './simple3fc.pth')
#         return epoch

# def load_ncm_ckpt(ncm_model):
#     if not os.path.exists('./simple3fc.pth'):
#         print('start training from 0 epoch')
#         return 0
#     else:
#         epoch = torch.load('./finetune_simple3fc_epoch.pth')
#         load_checkpoint(ncm_model, './finetune_simple3fc.pth')
#         return epoch

    def load_ncm_ckpt(ncm_model):
        if not os.path.exists(
                './exp_randominit_negpossame_finetune_simple3fc_stage2_epoch.pth'
        ):
            print('start training from 0 epoch')
            return 0
        else:
            epoch = torch.load(
                './exp_randominit_negpossame_finetune_simple3fc_stage2_epoch.pth'
            )
            load_checkpoint(
                ncm_model,
                'exp_randominit_negpossame_finetune_simple3fc_stage2.pth')
            return epoch

    # def load_simple2fc_stage0_ckpt(ncm_model):
    #
    #     epoch = torch.load('./finetune2fc_10epoch/exp_randominit_finetune_simple2fc_stage0_epoch.pth')
    #     load_checkpoint(ncm_model, './finetune2fc_10epoch/exp_randominit_finetune_simple2fc_stage0.pth')
    #     return epoch
    #
    # def load_simple2fc_stage1_ckpt(ncm_model):
    #
    #     epoch = torch.load('./finetune2fc_10epoch/exp_randominit_finetune_simple2fc_stage1_epoch.pth')
    #     load_checkpoint(ncm_model, './finetune2fc_10epoch/exp_randominit_finetune_simple2fc_stage1.pth')
    #     return epoch
    #
    # def load_simple2fc_stage2_ckpt(ncm_model):
    #
    #     epoch = torch.load('./finetune2fc_10epoch/exp_randominit_finetune_simple2fc_stage2_epoch.pth')
    #     load_checkpoint(ncm_model, './finetune2fc_10epoch/exp_randominit_finetune_simple2fc_stage2.pth')
    #     return epoch
    #
    #
    # olongtail_model_stage0 = simple2fc().cuda()
    # epoch = load_simple2fc_stage0_ckpt(olongtail_model_stage0)
    # print('load model epoch {}'.format(epoch))
    # olongtail_model_stage0.eval()
    #
    # olongtail_model_stage1 = simple2fc().cuda()
    # epoch = load_simple2fc_stage1_ckpt(olongtail_model_stage1)
    # olongtail_model_stage1.eval()
    #
    # olongtail_model_stage2 = simple2fc().cuda()
    # epoch = load_simple2fc_stage2_ckpt(olongtail_model_stage2)
    # olongtail_model_stage2.eval()
    #
    # olongtail_model_all_stage = [olongtail_model_stage0, olongtail_model_stage1, olongtail_model_stage2]

    prefix = '3fc_ft'

    def load_stage0_ckpt(ncm_model):

        # epoch = torch.load('./work_dirs/htc/{}_stage0_epoch.pth'.format(prefix))
        load_checkpoint(ncm_model,
                        './work_dirs/htc/{}_stage0.pth'.format(prefix))
        # return epoch

    def load_stage1_ckpt(ncm_model):

        # epoch = torch.load('./work_dirs/htc/{}_stage1_epoch.pth'.format(prefix))
        load_checkpoint(ncm_model,
                        './work_dirs/htc/{}_stage1.pth'.format(prefix))
        # return epoch

    def load_stage2_ckpt(ncm_model):

        # epoch = torch.load('./work_dirs/htc/{}_stage2_epoch.pth'.format(prefix))
        load_checkpoint(ncm_model,
                        './work_dirs/htc/{}_stage2.pth'.format(prefix))
        # return epoch

    olongtail_model_stage0 = simple3fc().cuda()
    epoch = load_stage0_ckpt(olongtail_model_stage0)
    # print('load model epoch {}'.format(epoch))
    olongtail_model_stage0.eval()

    olongtail_model_stage1 = simple3fc().cuda()
    epoch = load_stage1_ckpt(olongtail_model_stage1)
    olongtail_model_stage1.eval()

    olongtail_model_stage2 = simple3fc().cuda()
    epoch = load_stage2_ckpt(olongtail_model_stage2)
    olongtail_model_stage2.eval()

    olongtail_model_all_stage = [
        olongtail_model_stage0, olongtail_model_stage1, olongtail_model_stage2
    ]

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader,
                                  olongtail_model_all_stage, args.show)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, olongtail_model_all_stage,
                                 args.show, args.tmpdir)

    # mmcv.dump(outputs, args.out)
    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        # mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:

            if eval_types == ['proposal_fast']:
                result_file = args.out
                lvis_eval(result_file, eval_types, dataset.coco)
            else:
                if not isinstance(outputs[0], dict):
                    result_files = results2json(dataset,
                                                outputs,
                                                args.out,
                                                dump=False)
                    print('Starting evaluate {}'.format(
                        ' and '.join(eval_types)))
                    lvis_eval(result_files, eval_types, dataset.lvis)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}'.format(name)
                        result_files = results2json(dataset, outputs_,
                                                    result_file)
                        lvis_eval(result_files, eval_types, dataset.coco)

    # Save predictions in the COCO json format
    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)
Пример #7
0
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    # cfg.data.test.test_mode = False

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(cfg.data.test)
    ## uncomment to only eval on first 100 imgs
    dataset.img_infos = dataset.img_infos[:20]

    data_loader = build_dataloader(
        dataset,
        imgs_per_gpu=1,
        workers_per_gpu=cfg.data.workers_per_gpu,
        dist=distributed,
        shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    print('load model from {}'.format(cfg.load_from))
    checkpoint = load_checkpoint(model, cfg.load_from, map_location='cpu')

    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES


    def load_ckpt(ncm_model, cal_head):
        print('load cls head {}'.format('{}/{}.pth'.format(cfg.work_dir, cal_head)))
        # epoch = torch.load('{}/{}_epoch.pth'.format(cfg.work_dir, cal_head))
        load_checkpoint(ncm_model, '{}/{}.pth'.format(cfg.work_dir, cal_head))
        # return epoch

    print('use {}'.format(args.cal_head))
    if len(dataset.CLASSES) == 1230:##lvis
        if '2fc_rand' in args.cal_head:
            calibrated_head = simple2fc().cuda()
        elif '3fc_rand' in args.cal_head or '3fc_ft' in args.cal_head:
            calibrated_head = simple3fc().cuda()
    elif len(dataset.CLASSES) ==80:## coco
        if '2fc_rand' in args.cal_head:
            calibrated_head = simple2fc(num_classes=81).cuda()
        elif '3fc_rand' in args.cal_head or '3fc_ft' in args.cal_head:
            calibrated_head = simple3fc(num_classes=81).cuda()
    # epoch = load_ckpt(calibrated_head, args.head_ckpt)
    load_ckpt(calibrated_head, args.head_ckpt)
    calibrated_head.eval()


    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader, calibrated_head, args.show)
    else:
        model = MMDistributedDataParallel(model.cuda())
        calibrated_head = MMDistributedDataParallel(calibrated_head.cuda())
        outputs = multi_gpu_test(model, data_loader, calibrated_head, args.show, args.tmpdir)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        # mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if len(dataset.CLASSES) == 1230:
            if eval_types:

                if eval_types == ['proposal_fast']:
                    result_file = args.out
                    lvis_eval(result_file, eval_types, dataset.coco)
                else:
                    if not isinstance(outputs[0], dict):
                        result_files = results2json(dataset, outputs, args.out, dump=False)
                        print('Starting evaluate {}'.format(' and '.join(eval_types)))
                        lvis_eval(result_files, eval_types, dataset.lvis)
                    else:
                        for name in outputs[0]:
                            print('\nEvaluating {}'.format(name))
                            outputs_ = [out[name] for out in outputs]
                            result_file = args.out + '.{}'.format(name)
                            result_files = results2json(dataset, outputs_,
                                                        result_file)
                            lvis_eval(result_files, eval_types, dataset.coco)
        elif len(dataset.CLASSES) == 80:
            result_files = results2json(dataset, outputs, args.out, dump=False)
            coco_eval(result_files, args.eval, dataset.coco)

    # Save predictions in the COCO json format
    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)
Пример #8
0
def main():
    args = parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.data_index % 2)
    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    dataset = build_dataset(
        cfg.data.test)  # original - test | changed to test_with_train_data
    data_loader = build_dataloader(
        dataset,
        imgs_per_gpu=1,
        workers_per_gpu=0,  # cfg.data.workers_per_gpu
        dist=distributed,
        shuffle=False)

    # save gt boxes and labels for learning nms
    # for i, data in enumerate(data_loader):
    #     img_id = dataset.img_infos[i]['id']
    #     gt = dataset.get_ann_info(i)
    #     gt_boxes = gt['bboxes']
    #     gt_labels = gt['labels']
    #     filename = f'test_logits/learning_nms_data/{i}/gt_boxes.p'  # file name for new directory
    #     os.makedirs(os.path.dirname(filename), exist_ok=True)
    #     with open(f'test_logits/learning_nms_data/{i}/gt_boxes.p', 'wb') as outfile:  # possible to include img_id
    #         pickle.dump(gt_boxes, outfile)
    #     with open(f'test_logits/learning_nms_data/{i}/gt_labels.p', 'wb') as outfile:
    #         pickle.dump(gt_boxes, outfile)
    #
    #     # filename = dataset.img_infos[i]['filename']
    #     # with open(f'test_gt/{filename}.p', 'wb') as outfile:
    #     #     pickle.dump(gt_labels, outfile)

    # save gt instances per class
    # instances_list = np.zeros(1231)
    # for i, data in enumerate(data_loader):  # original script in test_lvis_tnorm.py
    #     gt = dataset.get_ann_info(i)
    #     print(i)
    #     for label in gt['labels']:
    #         instances_list[label] += 1
    # with open('train_instances_list.p', 'wb') as outfile:
    #     pickle.dump(instances_list, outfile)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    model = reweight_cls(model, args.tau)

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs, logits = single_gpu_test(model, data_loader, args.show, cfg,
                                          args.data_index)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)

    # save outputs as csv:
    # pd.DataFrame(outputs).to_csv("original_outputs_full.csv")
    # preprocess logits and save them on json file
    # otp = np.asarray(outputs)  # temp
    # df = pd.DataFrame(otp)
    # df.to_csv('otp.csv', index=False)

    bboxes_mat, labels_mat, logits_mat, proposal_num = logits_process(logits)

    # save labels, boxes and logits
    # with open('test_logits/dragon_test_bboxes_mat.p', 'wb') as outfile:
    #     pickle.dump(bboxes_mat, outfile)
    # with open('test_logits/dragon_labels_mat.p', 'wb') as outfile:
    #     pickle.dump(labels_mat, outfile)
    # with open('logits_mat1.p', 'wb') as outfile:
    #     pickle.dump(logits_mat[:1000], outfile)
    # with open('logits_mat2.p', 'wb') as outfile:
    #     pickle.dump(logits_mat[1000:2000], outfile)
    # with open('logits_mat3.p', 'wb') as outfile:
    #     pickle.dump(logits_mat[2000:3000], outfile)
    # with open('logits_mat4.p', 'wb') as outfile:
    #     pickle.dump(logits_mat[3000:4000], outfile)
    # with open('logits_mat5.p', 'wb') as outfile:
    #     pickle.dump(logits_mat[4000:], outfile)

    # filter detections by iou with gt (for dragon training)
    gt_list = []
    results_per_image = []
    for i, data in enumerate(
            data_loader):  # original script in test_lvis_tnorm.py
        # if i < TEMP_DATASET_SIZE*args.data_index:
        #     continue
        if i >= TEMP_DATASET_SIZE:  # temporary condition for testing
            break
        print(i)
        img_id = dataset.img_infos[i]['id']
        gt = dataset.get_ann_info(i)
        gt_dict = dict()
        gt_dict['id'] = img_id
        gt_dict['bboxes'] = gt['bboxes']
        gt_dict['labels'] = gt['labels']
        gt_list.append(gt_dict)
        # filter logits according to equivalent ground truth.
        # after filtering, for each image we get a list in length of classes and detections belongs to this class.
        results = filter_logits_by_gt(bboxes_mat[i], logits_mat[i], gt_list[i],
                                      proposal_num[i], i)
        results_per_image.append(results)
    with open(f'dragon_bboxes_logits_map24.p', 'wb') as outfile:
        pickle.dump(results_per_image, outfile)
    print('saved')

    # evaluation:
    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if eval_types == ['proposal_fast']:
                result_file = args.out
                lvis_eval(result_file, eval_types, dataset.lvis)
            else:
                if not isinstance(outputs[0], dict):
                    result_files = results2json(dataset, outputs, args.out,
                                                args.data_index)
                    lvis_eval(result_files,
                              eval_types,
                              dataset.lvis,
                              max_dets=300)
                else:
                    for name in outputs[0]:
                        print('\nEvaluating {}'.format(name))
                        outputs_ = [out[name] for out in outputs]
                        result_file = args.out + '.{}'.format(name)
                        result_files = results2json(dataset, outputs_,
                                                    result_file)
                        lvis_eval(result_files, eval_types, dataset.lvis)

    # Save predictions in the COCO json format
    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file)
Пример #9
0
def main():
    args = parse_args()

    assert args.out or args.show or args.json_out, \
        ('Please specify at least one operation (save or show the results) '
         'with the argument "--out" or "--show" or "--json_out"')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if args.out is not None and not args.auto_dir:
        assert osp.exists(osp.dirname(
            args.out)), 'output file directory does not exist!!'

    if args.json_out is not None and args.json_out.endswith('.json'):
        args.json_out = args.json_out[:-5]

    cfg = mmcv.Config.fromfile(args.config)
    if args.auto_dir:
        work_dir = cfg.work_dir
        args.checkpoint = osp.join(work_dir, args.checkpoint)
        if args.out:
            args.out = osp.join(work_dir, args.out)
        if args.json_out:
            args.json_out = osp.join(work_dir, args.json_out)

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.model.pretrained = None
    cfg.data.test.test_mode = True
    # if cfg.dataset_type == 'LvisDataSet':
    #     cfg.test_cfg.rcnn.max_per_img = 300
    #     cfg.test_cfg.rcnn.score_thr = 0.01

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # build the dataloader
    # TODO: support multiple images per gpu (only minor changes are needed)
    # use val dataset for visualize ground truch bboxes
    dataset = build_dataset(cfg.data.val if args.show_gt else cfg.data.test)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=1,
                                   dist=distributed,
                                   shuffle=False)

    # build the model and load checkpoint
    model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
    # old versions did not save class info in checkpoints, this walkaround is
    # for backward compatibility
    if 'CLASSES' in checkpoint['meta']:
        model.CLASSES = checkpoint['meta']['CLASSES']
    else:
        model.CLASSES = dataset.CLASSES

    if args.analyze:
        result = dict()
        result['fc_weight'] = model.bbox_head.fc_cls.weight.data
        samples_per_cls_file = cfg.data.train.samples_per_cls_file
        if osp.exists(samples_per_cls_file):  # add samples_per_cls_file
            with open(samples_per_cls_file, 'r') as f:
                samples_per_cls = torch.Tensor(
                    [int(line.strip()) for line in f.readlines()])
        analyze(result, samples_per_cls)
        exit()

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model,
                                  data_loader,
                                  args.show,
                                  show_gt=args.show_gt,
                                  work_dir=cfg.work_dir)
    else:
        model = MMDistributedDataParallel(model.cuda())
        outputs = multi_gpu_test(model, data_loader, args.tmpdir)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('\nwriting results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)
        eval_types = args.eval
        if eval_types:
            print('Starting evaluate {}'.format(' and '.join(eval_types)))
            if not isinstance(outputs[0], dict):
                result_files = results2json(dataset,
                                            outputs,
                                            args.out,
                                            dump_json=False)
                lvis_eval(result_files, eval_types, dataset.lvis)
            else:
                for name in outputs[0]:
                    print('\nEvaluating {}'.format(name))
                    outputs_ = [out[name] for out in outputs]
                    result_file = args.out + '.{}'.format(name)
                    result_files = results2json(dataset,
                                                outputs_,
                                                result_file,
                                                dump_json=False)
                    lvis_eval(result_files, eval_types, dataset.lvis)

    # Save predictions in the COCO json format
    if args.json_out and rank == 0:
        if not isinstance(outputs[0], dict):
            results2json(dataset, outputs, args.json_out, dump_json=True)
        else:
            for name in outputs[0]:
                outputs_ = [out[name] for out in outputs]
                result_file = args.json_out + '.{}'.format(name)
                results2json(dataset, outputs_, result_file, dump_json=True)