def testExportGroundtruthToCOCO(self): image_ids = ['first', 'second'] groundtruth_boxes = [ np.array([[100, 100, 200, 200]], np.float), np.array([[50, 50, 100, 100]], np.float) ] groundtruth_classes = [ np.array([1], np.int32), np.array([1], np.int32) ] categories = [{ 'id': 0, 'name': 'person' }, { 'id': 1, 'name': 'cat' }, { 'id': 2, 'name': 'dog' }] output_path = os.path.join(tf.test.get_temp_dir(), 'groundtruth.json') result = coco_tools.ExportGroundtruthToCOCO(image_ids, groundtruth_boxes, groundtruth_classes, categories, output_path=output_path) self.assertDictEqual(result, self._groundtruth_dict) with tf.gfile.GFile(output_path, 'r') as f: written_result = f.read() # The json output should have floats written to 4 digits of precision. matcher = re.compile(r'"bbox":\s+\[\n\s+\d+.\d\d\d\d,', re.MULTILINE) self.assertTrue(matcher.findall(written_result)) written_result = json.loads(written_result) self.assertAlmostEqual(result, written_result)
def main(_): model_path = FLAGS.model_path voc_path = FLAGS.voc_path labelmap_file = FLAGS.labelmap_file set_file = FLAGS.set_file scaler_file = FLAGS.scaler_file side_input = FLAGS.side_input # Load Model and read label_map.pbtxt model = file_util.load_model(model_path) categories, labelmap_dict, category_index = file_util.load_labelmap( voc_path, labelmap_file) # Get information from groundtruth and detection gt_ids, gt_boxes, gt_classes = _read_annotations_for_groundtruth( voc_path, set_file, labelmap_dict) dt_ids, dt_boxes, dt_classes, dt_scores, time_per_image = inference( model, voc_path, model_path, set_file, category_index, scaler_file, side_input) # COCO Evaluation groundtruth_dict = coco_tools.ExportGroundtruthToCOCO( gt_ids, gt_boxes, gt_classes, categories) detections_list = coco_tools.ExportDetectionsToCOCO( dt_ids, dt_boxes, dt_scores, dt_classes, categories) groundtruth = coco_tools.COCOWrapper(groundtruth_dict) detections = groundtruth.LoadAnnotations(detections_list) evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections, agnostic_mode=False) summary_metrics, per_category_ap = evaluator.ComputeMetrics( include_metrics_per_category=True, all_metrics_per_category=True) # Convert to Percent format for k, v in summary_metrics.items(): summary_metrics[k] = v * 100 for k, v in per_category_ap.items(): per_category_ap[k] = v * 100 print(summary_metrics) print(per_category_ap) # Prevention for Tensorflow Bug: Cant calculate Flops with custom Inputs if not side_input: flops = get_flops(model) else: flops = 0 metrics_dict = { 'flops': flops / 1e9, 'time_per_image': time_per_image * 1000 } # Read Trainable Params and Name Dictionary from Pickle file name_params_dict = pickle.load( open(os.path.join(model_path, 'metrics', 'name_params.pkl'), 'rb')) metrics_dict.update(name_params_dict) metrics_dict.update(summary_metrics) metrics_dict.update(per_category_ap) # Save Metrics to CSV metrics_df = pd.DataFrame.from_records([metrics_dict]) metrics_df.to_csv(os.path.join(model_path, 'metrics', 'metrics.csv'))
ground_truth_box = [] ground_truth_class = [] if len(data) == 0: ground_truth_box.append([]) for i in range(len(data)): ground_truth_box.append(data[i][1:]) ground_truth_class.append(label_map_dict[data[i][0]]) ground_truth_boxes.append(np.array(ground_truth_box, np.float)) ground_truth_classes.append(np.array(ground_truth_class, np.int32)) return image_ids1, ground_truth_boxes, ground_truth_classes pred_image_ids, detection_boxes, detection_scores, detection_classes = convert_OBJ_DETE_dt( pred_path) truth_image_ids, ground_truth_boxes, ground_truth_classes = convert_OBJ_DETE_gt( truth_path) detections_list = coco_tools.ExportDetectionsToCOCO(pred_image_ids, detection_boxes, detection_scores, detection_classes, label_map_dict) groundtruth_dict = coco_tools.ExportGroundtruthToCOCO(truth_image_ids, ground_truth_boxes, ground_truth_classes, label_map_dict) groundtruth = coco_tools.COCOWrapper(groundtruth_dict) detections = groundtruth.LoadAnnotations(detections_list) evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections) metrics, ap = evaluator.ComputeMetrics()
for fn in pred_bbox_fn: if 'not_a_dam' not in fn: pred_img_ids.append(fn) coords, conf = parse_txt(os.path.join(args.predicted_bboxes, fn), args.predicted_format, 'predicted') pred_bboxes_list.append(coords) pred_conf_list.append(conf) pred_classes_list.append(arr0) # add category input categories = [{'id': 0, 'name': 'dam'}] # prepare ground_truth input for COCOWrapper groundtruth_dict = coco_tools.ExportGroundtruthToCOCO( gt_img_ids, gt_bboxes_list, gt_classes_list, categories, ) # prepare detections input for COCOWrapper detections_list = coco_tools.ExportDetectionsToCOCO(pred_img_ids, pred_bboxes_list, pred_conf_list, pred_classes_list, categories) # calculate groundtruth = coco_tools.COCOWrapper(groundtruth_dict) detections = groundtruth.LoadAnnotations(detections_list) evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections,