Beispiel #1
0
def evaluate_boxes(dataset, all_boxes, output_dir, use_matlab=False):
    """Evaluate bounding box detection."""
    logger.info('Evaluating detections')
    not_comp = not cfg.TEST.COMPETITION_MODE
    if _use_json_dataset_evaluator(dataset):
        coco_eval = json_dataset_evaluator.evaluate_boxes(dataset,
                                                          all_boxes,
                                                          output_dir,
                                                          use_salt=not_comp,
                                                          cleanup=not_comp)
        box_results = _coco_eval_to_box_results(coco_eval)
    elif _use_cityscapes_evaluator(dataset):
        logger.warn('Cityscapes bbox evaluated using COCO metrics/conversions')
        coco_eval = json_dataset_evaluator.evaluate_boxes(dataset,
                                                          all_boxes,
                                                          output_dir,
                                                          use_salt=not_comp,
                                                          cleanup=not_comp)
        box_results = _coco_eval_to_box_results(coco_eval)
    elif _use_voc_evaluator(dataset):
        # For VOC, always use salt and always cleanup because results are
        # written to the shared VOCdevkit results directory
        voc_eval = voc_dataset_evaluator.evaluate_boxes(dataset,
                                                        all_boxes,
                                                        output_dir,
                                                        use_matlab=use_matlab)
        box_results = _voc_eval_to_box_results(voc_eval)
    else:
        raise NotImplementedError('No evaluator for dataset: {}'.format(
            dataset.name))
    return OrderedDict([(dataset.name, box_results)])
Beispiel #2
0
def evaluate_boxes(dataset, all_boxes, output_dir, use_matlab=False):
    """Evaluate bounding box detection."""
    logger.info('Evaluating detections')
    not_comp = not cfg.TEST.COMPETITION_MODE
    if _use_json_dataset_evaluator(dataset):
        coco_eval = json_dataset_evaluator.evaluate_boxes(
            dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp
        )
        box_results = _coco_eval_to_box_results(coco_eval)
    elif _use_cityscapes_evaluator(dataset):
        logger.warn('Cityscapes bbox evaluated using COCO metrics/conversions')
        coco_eval = json_dataset_evaluator.evaluate_boxes(
            dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp
        )
        box_results = _coco_eval_to_box_results(coco_eval)
    elif _use_voc_evaluator(dataset):
        # For VOC, always use salt and always cleanup because results are
        # written to the shared VOCdevkit results directory
        voc_eval = voc_dataset_evaluator.evaluate_boxes(
            dataset, all_boxes, output_dir, use_matlab=use_matlab
        )
        box_results = _voc_eval_to_box_results(voc_eval)
    else:
        raise NotImplementedError(
            'No evaluator for dataset: {}'.format(dataset.name)
        )
    return OrderedDict([(dataset.name, box_results)])
Beispiel #3
0
def evaluate_boxes(dataset, all_boxes, output_dir, args=None):
    """Evaluate bounding box detection."""
    logger.info('Evaluating detections')
    not_comp = not cfg.TEST.COMPETITION_MODE
    if _use_json_dataset_evaluator(dataset):
        coco_eval = json_dataset_evaluator.evaluate_boxes(
            dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp
        )
        box_results = _coco_eval_to_box_results(coco_eval)
    elif _use_cityscapes_evaluator(dataset):
        logger.warn('Cityscapes bbox evaluated using COCO metrics/conversions')
        coco_eval = json_dataset_evaluator.evaluate_boxes(
            dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp
        )
        box_results = _coco_eval_to_box_results(coco_eval)
    elif _use_wad_evaluator(dataset):
        wad_eval = json_dataset_evaluator.evaluate_boxes_wad(dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp, args=args)
        box_results = _coco_eval_to_box_results(wad_eval)
    else:
        raise NotImplementedError('No evaluator for dataset: {}'.format(dataset.name))
    return OrderedDict([(dataset.name, box_results)])
def evaluate_boxes(dataset, all_boxes, output_dir, use_matlab=False):
    """Evaluate bounding box detection."""
    logger.info('Evaluating detections')
    not_comp = not cfg.TEST.COMPETITION_MODE
    if _use_json_dataset_evaluator(dataset):
        coco_eval = json_dataset_evaluator.evaluate_boxes(dataset,
                                                          all_boxes,
                                                          output_dir,
                                                          use_salt=not_comp,
                                                          cleanup=not_comp)
        box_results = _coco_eval_to_box_results(coco_eval)
    else:
        raise NotImplementedError('No evaluator for dataset: {}'.format(
            dataset.name))
    return OrderedDict([(dataset.name, box_results)])
def eval_json(det_json, gt_json):
    json_dataset = JsonDataset(gt_dataset_name)
    gt_json = dataset_catalog.DATASETS[gt_dataset_name]['annotation_file']
    with open(det_json, 'rb') as f:
        det = json.load(f)
    f.close()
    with open(gt_json, 'rb') as f:
        gt = json.load(f)
    f.close()

    # convert det to the all_boxes list
    num_images = len(gt['images'])
    num_classes = 2
    print('Total number of images:', len(det['images']))
    all_boxes, all_segms, all_keyps = empty_results(num_classes, num_images)
    for cls in range(num_classes):
        for image in range(num_images):
            filename = gt['images'][image]['file_name']
            fid = gt['images'][image]['id']
            img_prop = get_by_filename(det, filename)
            if not (img_prop is None):
                img_id, det_prop = img_prop
                boxes = get_boxes_by_img_id(det, img_id)
                if image % 100 == 0:
                    print('Reading detections for:', filename, '--',
                          det_prop['file_name'])
                    print('Det json:', det_json)
                if 'score' in boxes[0]:
                    boxes = np.array([b['bbox'] + [b['score']] for b in boxes])
                else:
                    boxes = np.array([b['bbox'] for b in boxes])
                if len(boxes) > 0:
                    # add w, h to get (x2,y2)
                    boxes[:, 2] += boxes[:, 0]
                    boxes[:, 3] += boxes[:, 1]
                    all_boxes[cls][image] = boxes
            else:
                all_boxes[cls][image] = []
    # save detections
    with open(os.path.join(output_dir, 'detections.pkl'), 'wb') as f:
        pickle.dump(
            dict(all_boxes=all_boxes, all_segms=all_segms,
                 all_keyps=all_keyps), f)
    f.close()
    #input(len(all_boxes[0]))
    coco_eval = evaluate_boxes(json_dataset, all_boxes, output_dir)
    #coco_eval = task_evaluation.evaluate_all(json_dataset,all_boxes,all_segms,all_keyps,output_dir)

    disp_detection_eval_metrics(json_dataset,
                                coco_eval,
                                iou_low=0.5,
                                iou_high=0.5,
                                output_dir=output_dir)
    disp_detection_eval_metrics(json_dataset,
                                coco_eval,
                                iou_low=0.75,
                                iou_high=0.75,
                                output_dir=output_dir)
    disp_detection_eval_metrics(json_dataset,
                                coco_eval,
                                iou_low=0.5,
                                iou_high=0.95,
                                output_dir=output_dir)