def setUp(self): matching_iou_threshold = 0.5 self.eval = per_image_vrd_evaluation.PerImageVRDEvaluation( matching_iou_threshold) box_data_type = np.dtype([('subject', 'f4', (4, )), ('object', 'f4', (4, ))]) label_data_type = np.dtype([('subject', 'i4'), ('object', 'i4'), ('relation', 'i4')]) self.detected_box_tuples = np.array([([0, 0, 1, 1], [1, 1, 2, 2]), ([0, 0, 1.1, 1], [1, 1, 2, 2]), ([1, 1, 2, 2], [0, 0, 1.1, 1]), ([0, 0, 1, 1], [3, 4, 5, 6])], dtype=box_data_type) self.detected_class_tuples = np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3), (1, 4, 5)], dtype=label_data_type) self.detected_scores = np.array([0.2, 0.8, 0.1, 0.5], dtype=float) self.groundtruth_box_tuples = np.array( [([0, 0, 1, 1], [1, 1, 2, 2]), ([1, 1, 2, 2], [0, 0, 1.1, 1]), ([0, 0, 1, 1], [3, 4, 5, 5.5])], dtype=box_data_type) self.groundtruth_class_tuples = np.array([(1, 2, 3), (1, 7, 3), (1, 4, 5)], dtype=label_data_type)
def setUp(self): matching_iou_threshold = 0.5 self.eval = per_image_vrd_evaluation.PerImageVRDEvaluation( matching_iou_threshold) box_data_type = np.dtype([('subject', 'f4', (4,)), ('object', 'f4', (4,))]) self.detected_box_tuples = np.array( [([0, 0, 1, 1], [1, 1, 2, 2]), ([0, 0, 1.1, 1], [1, 1, 2, 2]), ([1, 1, 2, 2], [0, 0, 1.1, 1])], dtype=box_data_type) self.detected_scores = np.array([0.2, 0.8, 0.1], dtype=float) self.groundtruth_box_tuples = np.array( [([0, 0, 1, 1], [1, 1, 2, 2])], dtype=box_data_type)
def __init__(self, matching_iou_threshold=0.5): """Constructor. Args: matching_iou_threshold: IOU threshold to use for matching groundtruth boxes to detection boxes. """ self._per_image_eval = per_image_vrd_evaluation.PerImageVRDEvaluation( matching_iou_threshold=matching_iou_threshold) self._groundtruth_box_tuples = {} self._groundtruth_class_tuples = {} self._num_gt_instances = 0 self._num_gt_imgs = 0 self._num_gt_instances_per_relationship = {} self.clear_detections()