Example #1
0
 def test_set(self):
     a = bbox_result.BoundingBox(
         class_names=('class_' + str(np.random.randint(255)), ),
         confidence=np.random.uniform(0, 1),
         x=np.random.randint(800),
         y=np.random.randint(600),
         width=np.random.randint(128),
         height=np.random.randint(128))
     b = bbox_result.BoundingBox(
         class_names=('class_' + str(np.random.randint(255)), ),
         confidence=np.random.uniform(0, 1),
         x=np.random.randint(800),
         y=np.random.randint(600),
         width=np.random.randint(128),
         height=np.random.randint(128))
     c = bbox_result.BoundingBox(
         class_names=('class_' + str(np.random.randint(255)), ),
         confidence=np.random.uniform(0, 1),
         x=np.random.randint(800),
         y=np.random.randint(600),
         width=np.random.randint(128),
         height=np.random.randint(128))
     subject_set = {a, a, a, b}
     self.assertEqual(2, len(subject_set))
     self.assertIn(a, subject_set)
     self.assertIn(b, subject_set)
     self.assertNotIn(c, subject_set)
Example #2
0
    def test_benchmark_results_returns_a_benchmark_result(self):
        trial_result = MockTrialResult(
            gt_bboxes={oid.ObjectId(): [bbox_trial.BoundingBox(('cup',), 0.8256, 15, 22, 100, 100)]},
            bboxes={oid.ObjectId(): [bbox_trial.BoundingBox(('cup',), 0.8256, 15, 22, 100, 100)]})

        benchmark = bbox_overlap.BoundingBoxOverlapBenchmark()
        result = benchmark.benchmark_results(trial_result)
        self.assertIsInstance(result, core.benchmark.BenchmarkResult)
        self.assertNotIsInstance(result, core.benchmark.FailedBenchmark)
        self.assertIsInstance(result, bbox_result.BoundingBoxOverlapBenchmarkResult)
        self.assertEqual(benchmark.identifier, result.benchmark)
        self.assertEqual(trial_result.identifier, result.trial_result)
Example #3
0
 def test_hash(self):
     kwargs = {
         'class_names': ('class_' + str(np.random.randint(255)), ),
         'confidence': np.random.uniform(0, 0.8),
         'x': np.random.randint(800),
         'y': np.random.randint(600),
         'width': np.random.randint(128),
         'height': np.random.randint(128)
     }
     a = bbox_result.BoundingBox(**kwargs)
     b = bbox_result.BoundingBox(**kwargs)
     self.assertEqual(hash(a), hash(b))
     b = bbox_result.BoundingBox(
         **du.defaults({'class_names': 'class_413'}, kwargs))
     self.assertNotEqual(hash(a), hash(b))
     b = bbox_result.BoundingBox(**du.defaults({'confidence': 0.9}, kwargs))
     self.assertNotEqual(hash(a), hash(b))
     b = bbox_result.BoundingBox(**du.defaults({'x': 1600}, kwargs))
     self.assertNotEqual(hash(a), hash(b))
     b = bbox_result.BoundingBox(**du.defaults({'y': 900}, kwargs))
     self.assertNotEqual(hash(a), hash(b))
     b = bbox_result.BoundingBox(**du.defaults({'width': 137}, kwargs))
     self.assertNotEqual(hash(a), hash(b))
     b = bbox_result.BoundingBox(**du.defaults({'height': 137}, kwargs))
     self.assertNotEqual(hash(a), hash(b))
Example #4
0
    def test_benchmark_matches_each_gt_box_only_once(self):
        id1 = oid.ObjectId()
        trial_result = MockTrialResult(
            gt_bboxes={
                id1: [bbox_trial.BoundingBox({'cup'}, 1, 15, 22, 175, 175)]
            },
            bboxes={
                id1: [bbox_trial.BoundingBox({'cup'}, 1, 15, 22, 100, 100),
                      bbox_trial.BoundingBox({'cup'}, 1, 115, 122, 50, 50),
                      bbox_trial.BoundingBox({'cup'}, 1, 165, 172, 25, 25)],
            }
        )
        benchmark = bbox_overlap.BoundingBoxOverlapBenchmark()
        result = benchmark.benchmark_results(trial_result)

        self.assertIn(id1, result.overlaps)
        self.assertEqual(3, len(result.overlaps[id1]))
        self.assertEqual({
            'overlap': 10000,
            'bounding_box_area': 10000,
            'ground_truth_area': 30625,
            'confidence': 1.0,
            'bounding_box_classes': ('cup',),
            'ground_truth_classes': ('cup',)
        }, result.overlaps[id1][0])
        self.assertEqual({
            'overlap': 0,
            'bounding_box_area': 2500,
            'ground_truth_area': 0,
            'confidence': 1.0,
            'bounding_box_classes': ('cup',),
            'ground_truth_classes': tuple()
        }, result.overlaps[id1][1])
        self.assertEqual({
            'overlap': 0,
            'bounding_box_area': 625,
            'ground_truth_area': 0,
            'confidence': 1.0,
            'bounding_box_classes': ('cup',),
            'ground_truth_classes': tuple()
        }, result.overlaps[id1][2])
Example #5
0
 def make_instance(self, *args, **kwargs):
     kwargs = du.defaults(
         kwargs, {
             'system_id': np.random.randint(10, 20),
             'bounding_boxes': {
                 bson.objectid.ObjectId(): tuple(
                     bbox_result.BoundingBox(
                         class_names=('class_' +
                                      str(np.random.randint(255)), ),
                         confidence=np.random.uniform(0, 1),
                         x=np.random.randint(800),
                         y=np.random.randint(600),
                         width=np.random.randint(128),
                         height=np.random.randint(128))
                     for _ in range(np.random.randint(50)))
                 for _ in range(100)
             },
             'ground_truth_bounding_boxes': {
                 bson.objectid.ObjectId(): tuple(
                     bbox_result.BoundingBox(
                         class_names=('class_' +
                                      str(np.random.randint(255)), ),
                         confidence=np.random.uniform(0, 1),
                         x=np.random.randint(800),
                         y=np.random.randint(600),
                         width=np.random.randint(128),
                         height=np.random.randint(128))
                     for _ in range(np.random.randint(50)))
                 for _ in range(100)
             },
             'sequence_type':
             core.sequence_type.ImageSequenceType.NON_SEQUENTIAL,
             'system_settings': {
                 'a': np.random.randint(20, 30)
             }
         })
     return bbox_result.BoundingBoxResult(*args, **kwargs)
Example #6
0
    def test_serialize_and_deserialize(self):
        for _ in range(10):
            bbox1 = bbox_result.BoundingBox(
                class_names=('class_' + str(np.random.randint(255)), ),
                confidence=np.random.uniform(0, 1),
                x=np.random.randint(800),
                y=np.random.randint(600),
                width=np.random.randint(128),
                height=np.random.randint(128))
            s_bbox1 = bbox1.serialize()

            bbox2 = bbox_result.BoundingBox.deserialize(s_bbox1)
            s_bbox2 = bbox2.serialize()

            self.assertEqual(bbox1, bbox2)
            self.assertEqual(s_bbox1, s_bbox2)

            for idx in range(10):
                # Test that repeated serialization and deserialization does not degrade the information
                bbox2 = bbox_result.BoundingBox.deserialize(s_bbox2)
                s_bbox2 = bbox2.serialize()
                self.assertEqual(bbox1, bbox2)
                self.assertEqual(s_bbox1, s_bbox2)
Example #7
0
    def test_benchmark_measures_score_per_gt_bounding_box(self):
        id1 = oid.ObjectId()
        id2 = oid.ObjectId()
        id3 = oid.ObjectId()
        id4 = oid.ObjectId()
        trial_result = MockTrialResult(
            gt_bboxes={
                id1: [bbox_trial.BoundingBox({'cup'}, 1, 15, 22, 100, 100)],
                id2: [bbox_trial.BoundingBox({'car'}, 1, 15, 22, 100, 100)],
                id3: [bbox_trial.BoundingBox({'cow'}, 1, 15, 22, 100, 100)],
                id4: [bbox_trial.BoundingBox({'cat'}, 1, 15, 22, 100, 100)]
            },
            bboxes={
                id1: [bbox_trial.BoundingBox({'cup'}, 1, 15, 22, 100, 100)],       # Matched exactly
                id2: [bbox_trial.BoundingBox({'car'}, 0.8256, 15, 22, 100, 100)],  # Only confidence reduced
                id3: [bbox_trial.BoundingBox({'cow'}, 1, 25, 32, 95, 95)],         # Slightly misplaced
                id4: [bbox_trial.BoundingBox({'cat'}, 0.75, 25, 32, 95, 95)]       # Reduced confidence and slightly misplaced
            }
        )
        benchmark = bbox_overlap.BoundingBoxOverlapBenchmark()
        result = benchmark.benchmark_results(trial_result)

        self.assertIn(id1, result.overlaps)
        self.assertIn(id2, result.overlaps)
        self.assertIn(id3, result.overlaps)
        self.assertIn(id4, result.overlaps)
        self.assertEqual(1, len(result.overlaps[id1]))
        self.assertEqual(1, len(result.overlaps[id2]))
        self.assertEqual(1, len(result.overlaps[id3]))
        self.assertEqual(1, len(result.overlaps[id4]))
        self.assertEqual({
            'overlap': 10000,
            'bounding_box_area': 10000,
            'ground_truth_area': 10000,
            'confidence': 1.0,
            'bounding_box_classes': ('cup',),
            'ground_truth_classes': ('cup',)
        }, result.overlaps[id1][0])
        self.assertEqual({
            'overlap': 10000,
            'bounding_box_area': 10000,
            'ground_truth_area': 10000,
            'confidence': 0.8256,
            'bounding_box_classes': ('car',),
            'ground_truth_classes': ('car',)
        }, result.overlaps[id2][0])
        self.assertEqual({
            'overlap': 8100,
            'bounding_box_area': 9025,
            'ground_truth_area': 10000,
            'confidence': 1.0,
            'bounding_box_classes': ('cow',),
            'ground_truth_classes': ('cow',)
        }, result.overlaps[id3][0])
        self.assertEqual({
            'overlap': 8100,
            'bounding_box_area': 9025,
            'ground_truth_area': 10000,
            'confidence': 0.75,
            'bounding_box_classes': ('cat',),
            'ground_truth_classes': ('cat',)
        }, result.overlaps[id4][0])
Example #8
0
    def test_overlap_computes_bbox_overlap(self):
        bbox1 = bbox_trial.BoundingBox({'cup'}, 1, 15, 22, 175, 175)
        bbox2 = bbox_trial.BoundingBox({'cup'}, 1, 15, 22, 175, 175)
        self.assertEqual(30625, bbox_overlap.compute_overlap(bbox1, bbox2))

        bbox1 = bbox_trial.BoundingBox({'cup'}, 1, 15, 22, 175, 175)
        bbox2 = bbox_trial.BoundingBox({'cup'}, 1, 25, 32, 175, 175)
        self.assertEqual(27225, bbox_overlap.compute_overlap(bbox1, bbox2))

        bbox1 = bbox_trial.BoundingBox({'cup'}, 1, 15, 22, 175, 175)
        bbox2 = bbox_trial.BoundingBox({'cup'}, 1, 5, 12, 175, 175)
        self.assertEqual(27225, bbox_overlap.compute_overlap(bbox1, bbox2))

        bbox1 = bbox_trial.BoundingBox({'cup'}, 1, 15, 22, 175, 175)
        bbox2 = bbox_trial.BoundingBox({'cup'}, 1, 25, 32, 100, 100)
        self.assertEqual(10000, bbox_overlap.compute_overlap(bbox1, bbox2))

        bbox1 = bbox_trial.BoundingBox({'cup'}, 1, 15, 22, 175, 175)
        bbox2 = bbox_trial.BoundingBox({'cup'}, 1, 190, 22, 175, 175)
        self.assertEqual(0, bbox_overlap.compute_overlap(bbox1, bbox2))

        bbox1 = bbox_trial.BoundingBox({'cup'}, 1, 15, 22, 175, 175)
        bbox2 = bbox_trial.BoundingBox({'cup'}, 1, 15, 197, 175, 175)
        self.assertEqual(0, bbox_overlap.compute_overlap(bbox1, bbox2))

        bbox1 = bbox_trial.BoundingBox({'cup'}, 1, 15, 22, 175, 175)
        bbox2 = bbox_trial.BoundingBox({'cup'}, 1, 190, 197, 175, 175)
        self.assertEqual(0, bbox_overlap.compute_overlap(bbox1, bbox2))

        bbox1 = bbox_trial.BoundingBox({'cup'}, 1, 190, 22, 175, 175)
        bbox2 = bbox_trial.BoundingBox({'cup'}, 1, 15, 22, 175, 175)
        self.assertEqual(0, bbox_overlap.compute_overlap(bbox1, bbox2))

        bbox1 = bbox_trial.BoundingBox({'cup'}, 1, 15, 197, 175, 175)
        bbox2 = bbox_trial.BoundingBox({'cup'}, 1, 15, 22, 175, 175)
        self.assertEqual(0, bbox_overlap.compute_overlap(bbox1, bbox2))

        bbox1 = bbox_trial.BoundingBox({'cup'}, 1, 190, 197, 175, 175)
        bbox2 = bbox_trial.BoundingBox({'cup'}, 1, 15, 22, 175, 175)
        self.assertEqual(0, bbox_overlap.compute_overlap(bbox1, bbox2))

        bbox1 = bbox_trial.BoundingBox({'cup'}, 1, 190, 197, 10, 10)
        bbox2 = bbox_trial.BoundingBox({'cup'}, 1, 15, 22, 10, 10)
        self.assertEqual(0, bbox_overlap.compute_overlap(bbox1, bbox2))

        bbox1 = bbox_trial.BoundingBox({'cup'}, 1, 15, 22, 10, 10)
        bbox2 = bbox_trial.BoundingBox({'cup'}, 1, 190, 197, 10, 10)
        self.assertEqual(0, bbox_overlap.compute_overlap(bbox1, bbox2))
Example #9
0
    def process_image(self, image, timestamp):
        """
        Process an image as part of the current run.
        :param image: An Image object
        :param timestamp: The timestamp or index of the image in this collection
        :return: void
        """
        if not self.is_trial_running():
            self.start_trial(
                core.sequence_type.ImageSequenceType.NON_SEQUENTIAL)

        # Convert the input image to be fed into the network
        formatted_image, aspect_ratio = format_img(image.data, self._config)
        if keras_backend.image_dim_ordering() == 'tf':
            formatted_image = np.transpose(formatted_image, (0, 2, 3, 1))

        # get the feature maps and output from the RPN (Region Proposal Network)
        [rpn_class, rpn_regr,
         shared_layers_output] = self._model_rpn.predict(formatted_image)

        # Convert region proposals to bounding boxes
        regions = roi_helpers.rpn_to_roi(rpn_class,
                                         rpn_regr,
                                         self._config,
                                         keras_backend.image_dim_ordering(),
                                         overlap_thresh=0.7)

        # convert from (x1,y1,x2,y2) to (x,y,w,h)
        regions[:, 2] -= regions[:, 0]
        regions[:, 3] -= regions[:, 1]

        # apply the spatial pyramid pooling to the proposed regions
        bboxes = {}
        probs = {}
        for jk in range(regions.shape[0] // self._config.num_rois +
                        1):  # // indicates integer division
            regions_of_interest = np.expand_dims(
                regions[self._config.num_rois * jk:self._config.num_rois *
                        (jk + 1), :],
                axis=0)
            if regions_of_interest.shape[1] == 0:
                break

            if jk == regions.shape[0] // self._config.num_rois:
                # pad regions
                curr_shape = regions_of_interest.shape
                target_shape = (curr_shape[0], self._config.num_rois,
                                curr_shape[2])
                rois_padded = np.zeros(target_shape).astype(
                    regions_of_interest.dtype)
                rois_padded[:, :curr_shape[1], :] = regions_of_interest
                rois_padded[0, curr_shape[1]:, :] = regions_of_interest[0,
                                                                        0, :]
                regions_of_interest = rois_padded

            [prob_class, prob_regr] = self._model_classifier_only.predict(
                [shared_layers_output, regions_of_interest])

            for ii in range(prob_class.shape[1]):

                # If our most likely class is less than our threshold, or is background
                if (np.max(prob_class[0, ii, :]) < self._bbox_threshold
                        or np.argmax(prob_class[0, ii, :])
                        == (prob_class.shape[2] - 1)):
                    continue

                cls_name = self._class_mapping[np.argmax(prob_class[0, ii, :])]
                if cls_name not in bboxes:
                    bboxes[cls_name] = []
                    probs[cls_name] = []

                (x, y, w, h) = regions_of_interest[0, ii, :]

                cls_num = np.argmax(prob_class[0, ii, :])
                try:
                    (tx, ty, tw, th) = prob_regr[0, ii,
                                                 4 * cls_num:4 * (cls_num + 1)]
                    tx /= self._config.classifier_regr_std[0]
                    ty /= self._config.classifier_regr_std[1]
                    tw /= self._config.classifier_regr_std[2]
                    th /= self._config.classifier_regr_std[3]
                    x, y, w, h = roi_helpers.apply_regr(
                        x, y, w, h, tx, ty, tw, th)
                except:
                    pass
                bboxes[cls_name].append([
                    self._config.rpn_stride * x, self._config.rpn_stride * y,
                    self._config.rpn_stride * (x + w),
                    self._config.rpn_stride * (y + h)
                ])
                probs[cls_name].append(np.max(prob_class[0, ii, :]))

        # Aggregate the detected bounding boxes into results for this image
        detected_bboxes = []
        for key in bboxes.keys():
            bbox = np.array(bboxes[key])

            new_boxes, new_probs = roi_helpers.non_max_suppression_fast(
                bbox, np.array(probs[key]), overlap_thresh=0.5)
            for jk in range(new_boxes.shape[0]):
                x1, y1, x2, y2 = new_boxes[jk, :]
                real_x1, real_y1, real_x2, real_y2 = get_real_coordinates(
                    aspect_ratio, x1, y1, x2, y2)
                detected_bboxes.append(
                    bbox_result.BoundingBox(class_names=(key, ),
                                            confidence=new_probs[jk],
                                            x=real_x1,
                                            y=real_y1,
                                            width=real_x2 - real_x1,
                                            height=real_y2 - real_y1))

        self._bounding_boxes[image.identifier] = detected_bboxes
        self._gt_bounding_boxes[image.identifier] = (
            bbox_result.BoundingBox(class_names=obj.class_names,
                                    confidence=1,
                                    x=obj.bounding_box[0],
                                    y=obj.bounding_box[1],
                                    width=obj.bounding_box[2],
                                    height=obj.bounding_box[3])
            for obj in image.metadata.labelled_objects)