コード例 #1
0
def make_representation(bounding_boxes,
                        is_ground_truth=False,
                        score=None,
                        meta=None):
    """
    Args:
        bounding_boxes: string or list of strings `score label x0 y0 x1 y1; label score x0 y0 x1 y1; ...`.
        is_ground_truth: True if bbs are annotation boxes.
        score: value in [0, 1], if not None, all prediction boxes are considered with the given score.
        meta: metadata for representation
    """

    if not isinstance(bounding_boxes, list):
        bounding_boxes = [bounding_boxes]

    result = []
    for idx, box in enumerate(bounding_boxes):
        if box == "":
            arr = np.array([]).reshape((0, 5))
        else:
            arr = np.array(
                [np.fromstring(row, sep=' ') for row in box.split(';')])

        if is_ground_truth or score:
            assert arr.shape[1] == 5
        elif not is_ground_truth and not score:
            assert arr.shape[1] == 6

        if not is_ground_truth and score:
            score_ = score
            if np.isscalar(score_) or len(score_) == 1:
                score_ = np.full(arr.shape[0], score_)
            arr = np.c_[score_, arr]

        if is_ground_truth:
            detection = DetectionAnnotation(str(idx), arr[:, 0], arr[:, 1],
                                            arr[:, 2], arr[:, 3], arr[:, 4])
        else:
            detection = DetectionPrediction(str(idx), arr[:, 1], arr[:, 0],
                                            arr[:, 2], arr[:, 3], arr[:, 4],
                                            arr[:, 5])

        if meta:
            detection.metadata = meta[idx]

        result.append(detection)

    return result
コード例 #2
0
    def test_accuracy_with_wrong_annotation_type_raise_config_error_exception(
            self):
        annotations = [DetectionAnnotation('identifier', 3)]
        predictions = [
            ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])
        ]

        dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 1}], None)
        with pytest.raises(ConfigError):
            dispatcher.update_metrics_on_batch(range(len(annotations)),
                                               annotations, predictions)
コード例 #3
0
    def test_accuracy_with_unsupported_annotation_type_as_annotation_source_for_container_raises_config_error(
            self):
        annotations = [
            ContainerAnnotation(
                {'annotation': DetectionAnnotation('identifier', 3)})
        ]
        predictions = [
            ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])
        ]

        dispatcher = MetricsExecutor([{
            'type': 'accuracy',
            'top_k': 1,
            'annotation_source': 'annotation'
        }], None)
        with pytest.raises(ConfigError):
            dispatcher.update_metrics_on_batch(range(len(annotations)),
                                               annotations, predictions)