Ejemplo n.º 1
0
def test_net_on_dataset(
    weights_file,
    dataset_name,
    proposal_file,
    output_dir,
    multi_gpu=False,
    gpu_id=0
):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps, all_bodys = \
            multi_gpu_test_net_on_dataset(
                weights_file, dataset_name, proposal_file,
                num_images, output_dir
            )
    else:
        all_boxes, all_segms, all_keyps, all_bodys = test_net(
            weights_file, dataset_name, proposal_file, output_dir, gpu_id=gpu_id
        )
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
    results = task_evaluation.evaluate_all(
        dataset, all_boxes, all_segms, all_keyps, all_bodys, output_dir
    )
    return results
Ejemplo n.º 2
0
def test_net_on_dataset(
    weights_file,
    dataset_name,
    proposal_file,
    output_dir,
    multi_gpu=False,
    gpu_id=0
):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            weights_file, dataset_name, proposal_file, num_images, output_dir
        )
    else:
        all_boxes, all_segms, all_keyps = test_net(
            weights_file, dataset_name, proposal_file, output_dir, gpu_id=gpu_id
        )
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
    results = task_evaluation.evaluate_all(
        dataset, all_boxes, all_segms, all_keyps, output_dir
    )
    return results
Ejemplo n.º 3
0
def do_reval(dataset_name, output_dir, args):
    dataset = JsonDataset(dataset_name)
    dets = load_object(os.path.join(output_dir, 'detections.pkl'))

    # Override config with the one saved in the detections file
    if args.cfg_file is not None:
        core_config.merge_cfg_from_cfg(core_config.load_cfg(dets['cfg']))
    else:
        core_config._merge_a_into_b(core_config.load_cfg(dets['cfg']), cfg)

    # re-filter on score threshold:
    dets['all_boxes'] = \
        [
            [
                im[im[:,4] > cfg.TEST.SCORE_THRESH,:] if len(im) != 0 else []
                for im in cls
            ]
            for cls in dets['all_boxes']
        ]

    results = task_evaluation.evaluate_all(dataset,
                                           dets['all_boxes'],
                                           dets['all_segms'],
                                           dets['all_keyps'],
                                           output_dir,
                                           use_matlab=args.matlab_eval)
    task_evaluation.log_copy_paste_friendly_results(results)
Ejemplo n.º 4
0
def test_net_on_dataset(weights_file,
                        dataset_name,
                        proposal_file,
                        output_dir,
                        multi_gpu=False,
                        gpu_id=0):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    model = ''
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            weights_file, dataset_name, proposal_file, num_images, output_dir)
    else:
        all_boxes, all_segms, all_keyps, model = test_net(weights_file,
                                                          dataset_name,
                                                          proposal_file,
                                                          output_dir,
                                                          gpu_id=gpu_id)

    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(
        test_timer.average_time))
    results = task_evaluation.evaluate_all(dataset, all_boxes, all_segms,
                                           all_keyps, output_dir)

    roc_data = metrics.calculate_roc(all_boxes, dataset, cfg.TEST.IOU)
    froc_data = metrics.calculate_froc(all_boxes, dataset, cfg.TEST.IOU)
    auc_score = {
        dataset.name: {
            u'box': {
                u'AUC': auc(roc_data[0], roc_data[1])
            }
        }
    }
    afroc_score = np.trapz(froc_data[0], froc_data[2])
    afroc = {dataset.name: {u'box': {u'AFROC': afroc_score}}}
    print('Afroc score: {:.4f}'.format(afroc_score))

    plot.plot_roc(roc_data, auc_score[dataset.name][u'box'][u'AUC'], dataset,
                  model, output_dir)
    plot.plot_froc(froc_data, dataset, model, output_dir)
    plot.plot_afroc(froc_data, dataset, model, output_dir)

    save.np_save(np.stack(roc_data), 'roc', dataset, model, output_dir)
    save.np_save(np.stack(froc_data), 'froc', dataset, model, output_dir)

    results[dataset_name][u'box'].update(auc_score[dataset.name][u'box'])
    results[dataset_name][u'box'].update(afroc[dataset.name][u'box'])
    return results, auc_score, afroc_score
Ejemplo n.º 5
0
def test_net_on_dataset(weights_file,
                        dataset_name,
                        proposal_file,
                        output_dir,
                        multi_gpu=False,
                        gpu_id=0):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    ################################################################
    import pickle
    res_file = os.path.join(output_dir,
                            'bbox_' + dataset_name + '_results.json')
    print("res_file = {}==========================".format(res_file))
    if os.path.exists(res_file):
        import detectron.datasets.json_dataset_evaluator as json_dataset_evaluator
        print("res_file = {} exists! Loading res_file".format(res_file))
        coco_eval = json_dataset_evaluator._do_detection_eval(
            dataset, res_file, output_dir)
        box_results = task_evaluation._coco_eval_to_box_results(coco_eval)
        results = OrderedDict([(dataset.name, box_results)])
        return results
    ################################################################
    det_name = "detections.pkl"
    det_file = os.path.join(output_dir, det_name)
    print("det_file = {}==========================".format(det_file))
    if os.path.exists(det_file):
        print("{} exists! Loading detection results".format(det_file))
        res = pickle.load(open(det_file))
        all_boxes = res['all_boxes']
        all_segms = res['all_segms']
        all_keyps = res['all_keyps']
    ################################################################
    elif multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            weights_file, dataset_name, proposal_file, num_images, output_dir)
    else:
        all_boxes, all_segms, all_keyps = test_net(weights_file,
                                                   dataset_name,
                                                   proposal_file,
                                                   output_dir,
                                                   gpu_id=gpu_id)
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(
        test_timer.average_time))
    results = task_evaluation.evaluate_all(dataset, all_boxes, all_segms,
                                           all_keyps, output_dir)
    return results
def test_net_on_dataset(weights_file,
                        dataset_name,
                        proposal_file,
                        output_dir,
                        multi_gpu=False,
                        gpu_id=0,
                        subset_pointer=None):
    """Run inference on a dataset."""
    if dataset_name[:5] != 'live_':
        dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            weights_file, dataset_name, proposal_file, num_images, output_dir)
    else:
        all_boxes, all_segms, all_keyps = test_net(
            weights_file,
            dataset_name,
            proposal_file,
            output_dir,
            gpu_id=gpu_id,
            subset_pointer=subset_pointer)
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(
        test_timer.average_time))

    if cfg.TEST.COCO_TO_VOC:
        all_boxes = coco_detects_to_voc(all_boxes)

    if dataset_name[:5] == 'live_':
        return None

    results = task_evaluation.evaluate_all(dataset,
                                           all_boxes,
                                           all_segms,
                                           all_keyps,
                                           output_dir,
                                           subset_pointer=subset_pointer)

    if subset_pointer is not None:
        # prune the subset for the following datasets:
        subset_pointer.subset = subset_pointer.subset[len(dataset.get_roidb()
                                                          ):]
        print('remains', len(subset_pointer.subset)
              )  # should have 0 remains for the last set, voc_2012_train.

    return results
Ejemplo n.º 7
0
def do_reval(dataset_name, output_dir, args):
    dataset = JsonDataset(dataset_name)
    dets = load_object(os.path.join(output_dir, 'detections.pkl'))

    # Override config with the one saved in the detections file
    if args.cfg_file is not None:
        core_config.merge_cfg_from_cfg(core_config.load_cfg(dets['cfg']))
    else:
        core_config._merge_a_into_b(core_config.load_cfg(dets['cfg']), cfg)
    results = task_evaluation.evaluate_all(dataset,
                                           dets['all_boxes'],
                                           dets['all_segms'],
                                           dets['all_keyps'],
                                           output_dir,
                                           use_matlab=args.matlab_eval)
    task_evaluation.log_copy_paste_friendly_results(results)
Ejemplo n.º 8
0
def do_reval(dataset_name, output_dir, args):
    dataset = JsonDataset(dataset_name)
    dets = load_object(os.path.join(output_dir, 'detections.pkl'))

    # Override config with the one saved in the detections file
    if args.cfg_file is not None:
        core_config.merge_cfg_from_cfg(core_config.load_cfg(dets['cfg']))
    else:
        core_config._merge_a_into_b(core_config.load_cfg(dets['cfg']), cfg)
    results = task_evaluation.evaluate_all(
        dataset,
        dets['all_boxes'],
        dets['all_segms'],
        dets['all_keyps'],
        output_dir,
        use_matlab=args.matlab_eval
    )
    task_evaluation.log_copy_paste_friendly_results(results)
Ejemplo n.º 9
0
def do_reval(dataset_name, output_dir, args):
    dataset = JsonDataset(dataset_name)
    with open(os.path.join(output_dir, 'detections.pkl'), 'rb') as f:
        dets = pickle.load(f)
    # Override config with the one saved in the detections file
    if args.cfg_file is not None:
        # bug: loads only already stored cfg
        # core_config.merge_cfg_from_cfg(core_config.load_cfg(dets['cfg']))
        # merge config from passed config file!!
        core_config.merge_cfg_from_file(args.cfg_file)
    else:
        core_config._merge_a_into_b(core_config.load_cfg(dets['cfg']), cfg)
    results = task_evaluation.evaluate_all(dataset,
                                           dets['all_boxes'],
                                           dets['all_segms'],
                                           dets['all_keyps'],
                                           output_dir,
                                           use_matlab=args.matlab_eval)
    task_evaluation.log_copy_paste_friendly_results(results)
Ejemplo n.º 10
0
def test_net_on_dataset(weights_file,
                        dataset_name,
                        proposal_file,
                        output_dir,
                        multi_gpu=False,
                        gpu_id=0):

    load_from_tmp = False
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    if not load_from_tmp:
        test_timer = Timer()
        test_timer.tic()
        if multi_gpu:
            num_images = len(dataset.get_roidb())
            all_boxes, all_segms, all_keyps, all_personmasks, all_parss, all_bodys = \
                multi_gpu_test_net_on_dataset(
                    weights_file, dataset_name, proposal_file,
                    num_images, output_dir
                )
        else:
            all_boxes, all_segms, all_keyps, all_personmasks, all_parss, all_bodys = test_net(
                weights_file,
                dataset_name,
                proposal_file,
                output_dir,
                gpu_id=gpu_id)
        test_timer.toc()
        logger.info('Total inference time: {:.3f}s'.format(
            test_timer.average_time))
    else:
        tmp_path = '/coco/results/detectron-output_mulres_intersup_mulsaclesup_lowfeat23_int05/test/dense_coco_2014_minival/generalized_rcnn/detections.pkl'
        #tmp_path = '/coco/results/detectron-output_mulres_intersup/test/dense_coco_2014_minival/generalized_rcnn/detections.pkl'
        #tmp_path = '/coco/results/detectron-output_mulres_intersup_onlysegpart_fliped/test/MHP_seg_val/generalized_rcnn/detections.pkl'
        #tmp_path = '/coco/results/detectron-output_mulres_intersup_onlysegpart_fliped/test/CIHP_seg_val/generalized_rcnn/detections.pkl'

        tmp_file = open(tmp_path, 'r')
        print('detections results from: ', tmp_path)
        tmp_pkl = pickle.load(tmp_file)
        all_boxes = tmp_pkl['all_boxes']
        all_segms = tmp_pkl['all_segms']
        all_keyps = tmp_pkl['all_keyps']
        if 'all_personmasks' not in tmp_pkl.keys():
            all_personmasks = None
        else:
            all_personmasks = tmp_pkl['all_personmasks']
        all_parss = tmp_pkl['all_parss']
        all_bodys = tmp_pkl['all_bodys']
        '''
        for i in range(len(all_bodys[1])):
            for j in range(len(all_bodys[1][i])):
                #print("all_bodys[1][i][j]: ",all_bodys[1][i][j].shape)

                all_bodys[1][i][j][0][np.where(all_bodys[1][i][j][0] == 3)] = 4
                all_bodys[1][i][j][0][np.where(all_bodys[1][i][j][0] == 4)] = 3
                all_bodys[1][i][j][0][np.where(all_bodys[1][i][j][0] == 6)] = 5
                all_bodys[1][i][j][0][np.where(all_bodys[1][i][j][0] == 5)] = 6
        '''

        if cfg.VIS:
            vis_wholedataset(
                dataset_name,
                proposal_file,
                output_dir,
                all_boxes=all_boxes,
                all_segms=all_segms,
                all_keyps=all_keyps,
                all_personmasks=all_personmasks,
                all_parss=all_parss,
                all_bodys=all_bodys,
                img_name=['COCO_val2014_000000464089.jpg'],
                show_box=False,
            )
    results = task_evaluation.evaluate_all(dataset, all_boxes, all_segms,
                                           all_keyps, all_personmasks,
                                           all_parss, all_bodys, output_dir)
    return results
Ejemplo n.º 11
0
def grid_search():
    dataset_name, proposal_file = get_inference_dataset(0, is_parent=False)
    roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(
        dataset_name, proposal_file, None 
    )
    num_images = len(roidb)
    num_classes = cfg.MODEL.NUM_CLASSES
    subinds = np.array_split(range(num_images), cfg.NUM_GPUS)

    tag = 'detection'
    output_dir = get_output_dir(cfg.TEST.DATASETS, training=False)

    det_file = os.path.join(output_dir, 'detections.pkl')
    outputs = load_object(det_file)

    print(len(outputs))
    all_dets_cache = outputs['all_boxes']
    print(len(all_dets_cache))

    all_boxes_cache = []
    all_scores_cache = []
    for i, entry in enumerate(roidb):
        print(i)
        max_det = all_dets_cache[1][i].shape[0]
        print(max_det, num_classes)
        
        boxes = np.zeros((max_det, 4), dtype=np.float32)
        scores = np.zeros((max_det, num_classes), dtype=np.float32)
        boxes[:] = -1
        scores[:] = -1
        for j in range(num_classes):
            if len(all_dets_cache[j]) > 0:
                pass
            else:
                continue
            scores[:, j] = all_dets_cache[j][i][:, 4]
        boxes[:, 0:4] = all_dets_cache[1][i][:, :4]
        boxes = np.tile(boxes, (1, scores.shape[1]))
        print(scores.shape, boxes.shape)
        all_boxes_cache.append(boxes)
        all_scores_cache.append(scores)

    timers = defaultdict(Timer)
    resultss = []
    nmses = [1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0]
    threshs = [1e-10, 1e-9, 1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]
    max_per_images = [10000, 1000, 100, 10, 1]

    for nms in nmses:
        for thresh in threshs:
            for max_per_image in max_per_images:
                print("----------------------------------------------------")
                print('NUM: ', nms, ' Thresh: ', thresh, ' MAX_PER_IM: ', max_per_image)
                cfg.immutable(False)
                cfg.TEST.NMS = nms
                cfg.TEST.SCORE_THRESH = thresh
                cfg.TEST.DETECTIONS_PER_IM = max_per_image
                cfg.immutable(True)

                all_boxes, all_segms, all_keyps = empty_results(num_classes, num_images)
                for i, entry in enumerate(roidb):
                    # print(i)
                    timers['im_detect_bbox'].tic()
                    scores = all_scores_cache[i]
                    boxes = all_boxes_cache[i]
                    # print(scores.shape, boxes.shape)

                    timers['im_detect_bbox'].toc()

                    timers['misc_bbox'].tic()
                    scores, boxes, cls_boxes_i = box_results_with_nms_and_limit(scores, boxes)
                    timers['misc_bbox'].toc()

                    extend_results(i, all_boxes, cls_boxes_i)

                results = task_evaluation.evaluate_all(
                    dataset, all_boxes, all_segms, all_keyps, output_dir
                )
                print(results)

    print(resultss)
    f = open('grid_search.csv', 'wb')
    wr = csv.writer(f, dialect='excel')
    wr.writerows(resultss)