def test_calc_detection_voc_prec_rec(self): prec, rec = calc_detection_voc_prec_rec( self.pred_bboxes, self.pred_labels, self.pred_scores, self.gt_bboxes, self.gt_labels, self.gt_difficults, iou_thresh=self.iou_thresh) self.assertEqual(len(prec), len(self.prec)) for prec_l, expected_prec_l in zip(prec, self.prec): if prec_l is None and expected_prec_l is None: continue np.testing.assert_equal(prec_l, expected_prec_l) self.assertEqual(len(rec), len(self.rec)) for rec_l, expected_rec_l in zip(rec, self.rec): if rec_l is None and expected_rec_l is None: continue np.testing.assert_equal(rec_l, expected_rec_l)
def test_calc_detection_voc_prec_rec(self): prec, rec = calc_detection_voc_prec_rec(self.pred_bboxes, self.pred_labels, self.pred_scores, self.gt_bboxes, self.gt_labels, self.gt_difficults, iou_thresh=self.iou_thresh) self.assertEqual(len(prec), len(self.prec)) for prec_l, expected_prec_l in zip(prec, self.prec): if prec_l is None and expected_prec_l is None: continue np.testing.assert_equal(prec_l, expected_prec_l) self.assertEqual(len(rec), len(self.rec)) for rec_l, expected_rec_l in zip(rec, self.rec): if rec_l is None and expected_rec_l is None: continue np.testing.assert_equal(rec_l, expected_rec_l)
def calc(self): pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels = self.data if self.metric == 'all' or self.metric == 'voc_detection': print('Calculating voc_detection ...') result = eval_detection_voc(pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels, use_07_metric=True) print('mAP: {:f}'.format(result['map'])) print('person mAP: {:f}'.format( result['ap'][voc_utils.voc_bbox_label_names.index('person')])) if self.metric == 'all' or self.metric == 'pr_voc_detection': print('Calculating pr_voc_detection ...') prec, rec = calc_detection_voc_prec_rec(pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels, gt_difficults=None, iou_thresh=0.5) person_prec = prec[voc_utils.voc_bbox_label_names.index('person')] person_rec = rec[voc_utils.voc_bbox_label_names.index('person')] print('Avg person precision: {:f}'.format(np.average(person_prec))) print('Avg person recall: {:f}'.format(np.average(person_rec))) if self.plottings: self.plot(recall=person_rec, precision=person_prec, metric='pr_voc_detection') if self.metric == 'all' or self.metric == 'mot': print('Calculating mot_metrics ...') self.motmetrics()
yl_images.append(tmp_yl_images[curr_index]) print('Gerenrating plots...') now = datetime.datetime.now().strftime('%Y%m%d%H%M%S') fig_dir = 'plottings/metrics/all_models/{}_{}_{}/'.format( 'PTI01', len(fr_images), now) os.makedirs(fig_dir, exist_ok=True) print('Saving metric in {}.'.format(fig_dir)) print('YOLOv2') yl_prec, yl_rec = calc_detection_voc_prec_rec(yl_pred_bboxes, yl_pred_labels, yl_pred_scores, yl_gt_bboxes, yl_gt_labels, gt_difficults=None, iou_thresh=0.5) yl_person_prec = yl_prec[voc_utils.voc_bbox_label_names.index('person')] yl_person_rec = yl_rec[voc_utils.voc_bbox_label_names.index('person')] print("Avg prec {}, Avg rec {}".format(np.average(yl_person_prec), np.average(yl_person_rec))) plt.step(yl_person_rec, yl_person_prec, label='YOLOv2') print('SSD512') s5_prec, s5_rec = calc_detection_voc_prec_rec(s5_pred_bboxes, s5_pred_labels, s5_pred_scores, s5_gt_bboxes, s5_gt_labels,