def get_localization(self, image, visual=False):
        # Number of classes the object detector can identify
        NUM_CLASSES = 2
        # dictionary mapping integers to appropriate string labels would be fine
        label_map = label_map_util.load_labelmap(self.PATH_TO_LABELS)
        categories = label_map_util.convert_label_map_to_categories(
            label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
        category_index = label_map_util.create_category_index(categories)

        with self.detection_graph.as_default():

            image_expanded = np.expand_dims(image, axis=0)
            (boxes, scores, classes, num_detections) = self.sess.run(
                [self.boxes, self.scores, self.classes, self.num_detections],
                feed_dict={self.image_tensor: image_expanded})
            if visual == True:
                visualization_utils.visualize_boxes_and_labels_on_image_array_tracker(
                    image,
                    np.squeeze(boxes),
                    np.squeeze(classes).astype(np.int32),
                    np.squeeze(scores),
                    category_index,
                    use_normalized_coordinates=True,
                    min_score_thresh=.4,
                    skip_labels=True,
                    line_thickness=3)
                plt.figure(figsize=(9, 6))
                plt.imshow(image)
                plt.show()

            boxes = np.squeeze(boxes)
            classes = np.squeeze(classes)
            scores = np.squeeze(scores)
            cls = classes.tolist()
            idx_vec = [i for i, v in enumerate(cls) if ((scores[i] > 0.6))]
            # 有變化時會從[0]-->[0,1]
            print('show idex_vec::::', idx_vec)
            if len(idx_vec) == 0:
                print(
                    'there are not any detections, passing to the next frame...'
                )
            else:
                tmp_object_boxes = []
                for idx in idx_vec:
                    dim = image.shape[0:2]
                    class_id = classes[idx]
                    # print(class_id)
                    box = self.box_normal_to_pixel(boxes[idx], dim, class_id)
                    box_h = box[2] - box[0]
                    box_w = box[3] - box[1]
                    ratio = box_h / (box_w + 0.01)

                    #if ((ratio < 0.8) and (box_h>20) and (box_w>20)):
                    tmp_object_boxes.append(box)
                    #print(box, ', confidence: ', scores[idx], 'ratio:', ratio)

                self.object_boxes = tmp_object_boxes
        return self.object_boxes
Example #2
0
    def get_localization(self, image, visual=False):
        global category_index

        input_image = np.asarray(image)
        image_np_expanded = np.expand_dims(input_image, axis=0)

        output_dict = dgv.run_inference_for_single_image(
            config.HOST, image_np_expanded)

        # Run inference
        detection_boxes = output_dict['detection_boxes']
        detection_scores = output_dict['detection_scores']
        detection_classes = output_dict['detection_classes']
        num_detections = output_dict['num_detections']

        # Actual detection.
        (
            boxes, scores, classes, num
        ) = detection_boxes, detection_scores, detection_classes, num_detections
        if visual == True:
            visualization_utils.visualize_boxes_and_labels_on_image_array_tracker(
                image,
                np.squeeze(boxes),
                np.squeeze(classes).astype(np.int32),
                np.squeeze(scores),
                category_index,
                use_normalized_coordinates=True,
                min_score_thresh=threshold,
                line_thickness=3)
            plt.figure(figsize=(9, 6))
            plt.imshow(image)
            plt.show()
        boxes = np.squeeze(boxes)
        classes = np.squeeze(classes)
        scores = np.squeeze(scores)
        cls = classes.tolist()
        idx_vec = [i for i, v in enumerate(cls) if ((scores[i] > threshold))]
        if len(idx_vec) == 0:
            print('there are not any detections, passing to the next frame...')
        else:
            tmp_object_boxes = []
            for idx in idx_vec:
                dim = image.shape[0:2]
                box = self.box_normal_to_pixel(boxes[idx], dim)
                box_h = box[2] - box[0]
                box_w = box[3] - box[1]
                ratio = box_h / (box_w + 0.01)

                #if ((ratio < 0.8) and (box_h>20) and (box_w>20)):
                tmp_object_boxes.append(box)
                #print(box, ', confidence: ', scores[idx], 'ratio:', ratio)

            self.object_boxes = tmp_object_boxes
        return self.object_boxes, output_dict
    def get_localization(self, image, visual=False):
        category_index = {1: {'id': 1, 'name': u'player'}}

        with self.detection_graph.as_default():
            image_expanded = np.expand_dims(image, axis=0)
            (boxes, scores, classes, num_detections) = self.sess.run(
                [self.boxes, self.scores, self.classes, self.num_detections],
                feed_dict={self.image_tensor: image_expanded})
            if visual == True:
                visualization_utils.visualize_boxes_and_labels_on_image_array_tracker(
                    image,
                    np.squeeze(boxes),
                    np.squeeze(classes).astype(np.int32),
                    np.squeeze(scores),
                    category_index,
                    use_normalized_coordinates=True,
                    min_score_thresh=.4,
                    line_thickness=3)
                plt.figure(figsize=(9, 6))
                plt.imshow(image)
                plt.show()
            boxes = np.squeeze(boxes)
            classes = np.squeeze(classes)
            scores = np.squeeze(scores)
            cls = classes.tolist()
            idx_vec = [i for i, v in enumerate(cls) if ((scores[i] > 0.6))]
            if len(idx_vec) == 0:
                print(
                    'there are not any detections, passing to the next frame...'
                )
            else:
                tmp_object_boxes = []
                for idx in idx_vec:
                    dim = image.shape[0:2]
                    box = self.box_normal_to_pixel(boxes[idx], dim)
                    box_h = box[2] - box[0]
                    box_w = box[3] - box[1]
                    ratio = box_h / (box_w + 0.01)

                    #if ((ratio < 0.8) and (box_h>20) and (box_w>20)):
                    tmp_object_boxes.append(box)
                    #print(box, ', confidence: ', scores[idx], 'ratio:', ratio)

                self.object_boxes = tmp_object_boxes
        return self.object_boxes