def test_object_detection_output(self):

        outputs_1 = []
        outputs_2 = []
        for i, data in enumerate(self.dummy_data):
            output = ObjectDetectionOutput(
                data['boxes'],
                data['object_probabilities'],
                data['class_probabilities'],
            )
            outputs_1.append(output)

            scores = [
                o * max(c) for o, c in zip(data['object_probabilities'],
                                           data['class_probabilities'])
            ]
            output_2 = ObjectDetectionOutput.from_scores(
                data['boxes'],
                scores,
                data['labels'],
            )

        for output_1, output_2 in zip(outputs_1, outputs_2):
            for x, y in zip(output_1.labels, output_2.labels):
                self.assertEqual(x, y)
            for x, y in zip(output_1.scores, output_2.scores):
                self.assertEqual(x, y)
Exemple #2
0
    def test_object_detection_scorer(self):

        # convert bounding boxes
        W, H = 128, 128
        for data in self.dummy_data:
            for i, box in enumerate(data['boxes']):
                x0 = box[0] / W
                y0 = box[1] / H
                x1 = box[2] / W
                y1 = box[3] / H
                data['boxes'][i] = BoundingBox(x0, y0, x1, y1)

        for i, data in enumerate(self.dummy_data):
            self.dummy_data[i] = ObjectDetectionOutput(
                data['boxes'],
                data['object_probabilities'],
                data['class_probabilities'],
            )

        scorer = ScorerObjectDetection(self.dummy_data)
        scores = scorer._calculate_scores()

        res = scores['object-frequency']
        self.assertEqual(len(res), len(self.dummy_data))
        self.assertListEqual(res.tolist(), [1.0, 0.95, 0.9])

        res = scores['prediction-margin']
        self.assertEqual(len(res), len(self.dummy_data))
        self.assertListEqual(res.tolist(), [0.5514945, 0.9488, 0.])
Exemple #3
0
    def test_object_detection_from_class_labels(self):
        # convert bounding boxes
        W, H = 128, 128
        for data in self.dummy_data:
            for i, box in enumerate(data['boxes']):
                x0 = box[0] / W
                y0 = box[1] / H
                x1 = box[2] / W
                y1 = box[3] / H
                data['boxes'][i] = BoundingBox(x0, y0, x1, y1)

        for i, data in enumerate(self.dummy_data):
            self.dummy_data[i] = ObjectDetectionOutput.from_scores(
                data['boxes'], data['object_probabilities'], data['labels'])

        # check for default config
        scorer = ScorerObjectDetection(self.dummy_data)
        scores = scorer.calculate_scores()

        # UNCOMMENT TO BREAK
        # make sure the max entry of a score is not 0.0
        for key, val in scores.items():
            self.assertNotEqual(max(val), 0.0)

        # make sure all scores are numpy arrays
        for key, val in scores.items():
            self.assertEqual(type(scores[key]), type(np.array([])))
    def test_object_detection_scorer_config(self):

        # convert bounding boxes
        W, H = 128, 128
        for data in self.dummy_data:
            for i, box in enumerate(data['boxes']):
                x0 = box[0] / W
                y0 = box[1] / H
                x1 = box[2] / W
                y1 = box[3] / H
                data['boxes'][i] = BoundingBox(x0, y0, x1, y1)

        for i, data in enumerate(self.dummy_data):
            self.dummy_data[i] = ObjectDetectionOutput(
                data['boxes'],
                data['object_probabilities'],
                data['class_probabilities'],
            )

        # check for default config
        scorer = ScorerObjectDetection(self.dummy_data)
        scores = scorer.calculate_scores()
        expected_default_config = {
            'frequency_penalty': 0.25,
            'min_score': 0.9
        }
        self.assertDictEqual(scorer.config, expected_default_config)

        # check for config override
        new_config = {
            'frequency_penalty': 0.55,
            'min_score': 0.6
        }
        scorer = ScorerObjectDetection(self.dummy_data, config=new_config)
        scores = scorer.calculate_scores()
        self.assertDictEqual(scorer.config, new_config)

        # check for invalid key passed
        new_config = {
            'frequenci_penalty': 0.55,
            'minimum_score': 0.6
        }
        with self.assertRaises(KeyError):
            scorer = ScorerObjectDetection(self.dummy_data, config=new_config)

        # check for wrong value passed
        new_config = {
            'frequency_penalty': 'test',
            'min_score': 1.6
        }
        with self.assertRaises(ValueError):
            scorer = ScorerObjectDetection(self.dummy_data, config=new_config)
    def test_object_detection_output_from_scores(self):
        outputs = []
        for i, data in enumerate(self.dummy_data):
            output = ObjectDetectionOutput.from_scores(
                data['boxes'],
                data['object_probabilities'],
                data['labels'],
            )
            outputs.append(output)

        for output in outputs:
            for class_probs in output.class_probabilities:
                self.assertEqual(np.sum(class_probs), 1.0)
def convert_bbox_detectron2lightly(outputs):
    # convert detectron2 predictions into lightly format
    height, width = outputs['instances'].image_size
    boxes = []

    for (bbox_raw, score, class_idx) in zip(outputs['instances'].pred_boxes.tensor, 
                                            outputs['instances'].scores,
                                            outputs['instances'].pred_classes):
        x0, y0, x1, y1 = bbox_raw.cpu().numpy()
        x0 /= width
        y0 /= height
        x1 /= width
        y1 /= height
      
        boxes.append(BoundingBox(x0, y0, x1, y1))
    output = ObjectDetectionOutput.from_scores(
      boxes, outputs['instances'].scores.cpu().numpy(),
      outputs['instances'].pred_classes.cpu().numpy().tolist())
    return output
    def test_object_detection_output_illegal_args(self):

        with self.assertRaises(ValueError):
            # score > 1
            ObjectDetectionOutput.from_scores([BoundingBox(0, 0, 1, 1)], [1.1],
                                              [0])

        with self.assertRaises(ValueError):
            # score < 0
            ObjectDetectionOutput.from_scores([BoundingBox(0, 0, 1, 1)], [-1.],
                                              [1])

        with self.assertRaises(ValueError):
            # different length
            ObjectDetectionOutput([BoundingBox(0, 0, 1, 1)], [0.5, 0.2],
                                  [1, 2])

        with self.assertRaises(ValueError):
            # string labels
            ObjectDetectionOutput.from_scores(
                [BoundingBox(0, 0, 1, 1)],
                [1.1],
                ['hello'],
            )
Exemple #8
0
    def test_object_detection_scorer(self):

        # convert bounding boxes
        W, H = 128, 128
        for data in self.dummy_data:
            for i, box in enumerate(data['boxes']):
                x0 = box[0] / W
                y0 = box[1] / H
                x1 = box[2] / W
                y1 = box[3] / H
                data['boxes'][i] = BoundingBox(x0, y0, x1, y1)

        for i, data in enumerate(self.dummy_data):
            self.dummy_data[i] = ObjectDetectionOutput(
                data['boxes'],
                data['object_probabilities'],
                data['class_probabilities'],
            )

        scorer = ScorerObjectDetection(self.dummy_data)
        scores = scorer.calculate_scores()

        self.assertTrue(set(scores.keys()),
                        set(ScorerObjectDetection.score_names()))

        # make sure the max entry of a score is not 0.0
        for key, val in scores.items():
            self.assertNotEqual(max(val), 0.0)

        # make sure all scores are numpy arrays
        for key, val in scores.items():
            self.assertEqual(type(scores[key]), type(np.array([])))

        res = scores['object_frequency']
        self.assertEqual(len(res), len(self.dummy_data))
        self.assertListEqual(res.tolist(), [1.0, 0.95, 0.9])

        res = scores['objectness_least_confidence']
        self.assertEqual(len(res), len(self.dummy_data))
        self.assertListEqual(res.tolist(), [0.5514945, 0.9488, 0.])

        for score_name, score in scores.items():
            if "classification" in score_name:
                self.assertEqual(len(res), len(self.dummy_data))
            if score_name == "classification_uncertainty_least_confidence":
                self.assertListEqual(list(score), [
                    max(1 - 0.7, 1 - 0.5) / (1 - 1 / 3),
                    (1 - 0.5) / (1 - 1 / 3), 0
                ])
            elif score_name == "classification_uncertainty_margin":
                self.assertListEqual(list(score), [
                    max(1 - (0.7 - 0.2), 1 - (0.5 - 0.4)), 1 - (0.5 - 0.41), 0
                ])
            elif score_name == "classification_uncertainty_entropy":
                entropies_0 = _entropy(
                    np.array(
                        self.dummy_data[0].class_probabilities)) / np.log2(3)
                entropies_1 = _entropy(
                    np.array(
                        self.dummy_data[1].class_probabilities)) / np.log2(3)
                score_target = [
                    float(max(entropies_0)),
                    float(max(entropies_1)), 0
                ]
                for val1, val2 in zip(score, score_target):
                    self.assertAlmostEqual(val1, val2, places=8)
Exemple #9
0
 def score_names(cls) -> List[str]:
     """Returns the names of the calculated active learning scores
     """
     scorer = cls(model_output=[ObjectDetectionOutput([], [], [])])
     score_names = list(scorer.calculate_scores().keys())
     return score_names