コード例 #1
0
 def testExportDetectionsToCOCO(self):
     image_ids = ['first', 'second']
     detections_boxes = [
         np.array([[100, 100, 200, 200]], np.float),
         np.array([[50, 50, 100, 100]], np.float)
     ]
     detections_scores = [
         np.array([.8], np.float),
         np.array([.7], np.float)
     ]
     detections_classes = [np.array([1], np.int32), np.array([1], np.int32)]
     categories = [{
         'id': 0,
         'name': 'person'
     }, {
         'id': 1,
         'name': 'cat'
     }, {
         'id': 2,
         'name': 'dog'
     }]
     output_path = os.path.join(tf.compat.v1.test.get_temp_dir(),
                                'detections.json')
     result = coco_tools.ExportDetectionsToCOCO(image_ids,
                                                detections_boxes,
                                                detections_scores,
                                                detections_classes,
                                                categories,
                                                output_path=output_path)
     self.assertListEqual(result, self._detections_list)
     with tf.io.gfile.GFile(output_path, 'r') as f:
         written_result = f.read()
         # The json output should have floats written to 4 digits of precision.
         matcher = re.compile(r'"bbox":\s+\[\n\s+\d+.\d\d\d\d,',
                              re.MULTILINE)
         self.assertTrue(matcher.findall(written_result))
         written_result = json.loads(written_result)
         self.assertAlmostEqual(result, written_result)
コード例 #2
0
                ground_truth_box = []
                ground_truth_class = []
                if len(data) == 0:
                    ground_truth_box.append([])
                for i in range(len(data)):

                    ground_truth_box.append(data[i][1:])
                    ground_truth_class.append(label_map_dict[data[i][0]])
            ground_truth_boxes.append(np.array(ground_truth_box, np.float))
            ground_truth_classes.append(np.array(ground_truth_class, np.int32))
    return image_ids1, ground_truth_boxes, ground_truth_classes


pred_image_ids, detection_boxes, detection_scores, detection_classes = convert_OBJ_DETE_dt(
    pred_path)
truth_image_ids, ground_truth_boxes, ground_truth_classes = convert_OBJ_DETE_gt(
    truth_path)
detections_list = coco_tools.ExportDetectionsToCOCO(pred_image_ids,
                                                    detection_boxes,
                                                    detection_scores,
                                                    detection_classes,
                                                    label_map_dict)
groundtruth_dict = coco_tools.ExportGroundtruthToCOCO(truth_image_ids,
                                                      ground_truth_boxes,
                                                      ground_truth_classes,
                                                      label_map_dict)

groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
detections = groundtruth.LoadAnnotations(detections_list)
evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections)
metrics, ap = evaluator.ComputeMetrics()
コード例 #3
0
def main(_):
    model_path = FLAGS.model_path
    voc_path = FLAGS.voc_path
    labelmap_file = FLAGS.labelmap_file
    set_file = FLAGS.set_file
    scaler_file = FLAGS.scaler_file
    side_input = FLAGS.side_input

    # Load Model and read label_map.pbtxt
    model = file_util.load_model(model_path)
    categories, labelmap_dict, category_index = file_util.load_labelmap(
        voc_path, labelmap_file)

    # Get information from groundtruth and detection
    gt_ids, gt_boxes, gt_classes = _read_annotations_for_groundtruth(
        voc_path, set_file, labelmap_dict)
    dt_ids, dt_boxes, dt_classes, dt_scores, time_per_image = inference(
        model, voc_path, model_path, set_file, category_index, scaler_file,
        side_input)

    # COCO Evaluation
    groundtruth_dict = coco_tools.ExportGroundtruthToCOCO(
        gt_ids, gt_boxes, gt_classes, categories)
    detections_list = coco_tools.ExportDetectionsToCOCO(
        dt_ids, dt_boxes, dt_scores, dt_classes, categories)
    groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
    detections = groundtruth.LoadAnnotations(detections_list)
    evaluator = coco_tools.COCOEvalWrapper(groundtruth,
                                           detections,
                                           agnostic_mode=False)
    summary_metrics, per_category_ap = evaluator.ComputeMetrics(
        include_metrics_per_category=True, all_metrics_per_category=True)

    # Convert to Percent format
    for k, v in summary_metrics.items():
        summary_metrics[k] = v * 100
    for k, v in per_category_ap.items():
        per_category_ap[k] = v * 100
    print(summary_metrics)
    print(per_category_ap)

    # Prevention for Tensorflow Bug: Cant calculate Flops with custom Inputs
    if not side_input:
        flops = get_flops(model)
    else:
        flops = 0

    metrics_dict = {
        'flops': flops / 1e9,
        'time_per_image': time_per_image * 1000
    }

    # Read Trainable Params and Name Dictionary from Pickle file
    name_params_dict = pickle.load(
        open(os.path.join(model_path, 'metrics', 'name_params.pkl'), 'rb'))
    metrics_dict.update(name_params_dict)

    metrics_dict.update(summary_metrics)
    metrics_dict.update(per_category_ap)

    # Save Metrics to CSV
    metrics_df = pd.DataFrame.from_records([metrics_dict])
    metrics_df.to_csv(os.path.join(model_path, 'metrics', 'metrics.csv'))
コード例 #4
0
        pred_bboxes_list.append(coords)
        pred_conf_list.append(conf)
        pred_classes_list.append(arr0)

# add category input
categories = [{'id': 0, 'name': 'dam'}]

# prepare ground_truth input for COCOWrapper
groundtruth_dict = coco_tools.ExportGroundtruthToCOCO(
    gt_img_ids,
    gt_bboxes_list,
    gt_classes_list,
    categories,
)

# prepare detections input for COCOWrapper
detections_list = coco_tools.ExportDetectionsToCOCO(pred_img_ids,
                                                    pred_bboxes_list,
                                                    pred_conf_list,
                                                    pred_classes_list,
                                                    categories)

# calculate
groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
detections = groundtruth.LoadAnnotations(detections_list)
evaluator = coco_tools.COCOEvalWrapper(groundtruth,
                                       detections,
                                       agnostic_mode=False)
metrics = evaluator.ComputeMetrics()
print('complete')