Beispiel #1
0
    def run(self, timestamp, frame, annotations):
        np_frame = np.array(frame)

        detection_annotations = []
        if self._object_detection_engine.run(timestamp, np_frame,
                                             detection_annotations):
            converted_detections = []
            # Converts to MediaPipe Detection proto.
            for idx, annotation in enumerate(detection_annotations):
                detection = mediapipe_tracker.MediaPipeDetection()
                detection.timestamp_usec = timestamp
                detection.label = [annotation.class_name]
                detection.score = [annotation.confidence_score]
                detection.detection_id = idx
                location_data = mediapipe_tracker.LocationData()
                relative_bounding_box = mediapipe_tracker.RelativeBoundingBox()
                relative_bounding_box.xmin = annotation.bbox.left
                relative_bounding_box.ymin = annotation.bbox.top
                relative_bounding_box.width = annotation.bbox.right - annotation.bbox.left
                relative_bounding_box.height = annotation.bbox.bottom - annotation.bbox.top
                location_data.relative_bounding_box = relative_bounding_box
                detection.location_data = location_data
                converted_detections.append(detection)

            # Inputs annotations into mediapipe tracker.
            tracked_annotations = self._mediapipe_tracker.process(
                timestamp, converted_detections, np_frame)

            # Converts back to AutoML Video Edge detection structs.
            for tracked_annotation in tracked_annotations:
                highest_idx = tracked_annotation.score.index(
                    max(tracked_annotation.score))
                output_annotation = ObjectTrackingAnnotation(
                    timestamp=timestamp,
                    track_id=tracked_annotation.detection_id,
                    class_id=1 if tracked_annotation.label_id else -1,
                    class_name=tracked_annotation.label[highest_idx]
                    if tracked_annotation.label else '',
                    confidence_score=tracked_annotation.score[highest_idx],
                    bbox=NormalizedBoundingBox(
                        left=tracked_annotation.location_data.
                        relative_bounding_box.xmin,
                        top=tracked_annotation.location_data.
                        relative_bounding_box.ymin,
                        right=tracked_annotation.location_data.
                        relative_bounding_box.xmin + tracked_annotation.
                        location_data.relative_bounding_box.width,
                        bottom=tracked_annotation.location_data.
                        relative_bounding_box.ymin + tracked_annotation.
                        location_data.relative_bounding_box.height))
                annotations.append(output_annotation)
            return True
        else:
            return False
Beispiel #2
0
  def update(self,frame, detections_from_frame, labels):
      np_frame = np.array(frame)
      annotations = [] 
      # Grabs current millisecond for timestamp.
      timestamp = current_milli_time()
      converted_detections = []
      # Converts to MediaPipe Detection proto.
      for idx, detection_box in enumerate(detections_from_frame):
        detection = mediapipe_tracker.MediaPipeDetection()
        detection.timestamp_usec = timestamp
        detection.label = list(labels.get(detection_box[5]))
        detection.score = [detection_box[4].item()]
        detection.detection_id = idx
        location_data = mediapipe_tracker.LocationData()
        relative_bounding_box = mediapipe_tracker.RelativeBoundingBox()
        relative_bounding_box.xmin = detection_box[0]
        relative_bounding_box.ymin = detection_box[1]
        relative_bounding_box.width = detection_box[2] - detection_box[0]
        relative_bounding_box.height = detection_box[3] - detection_box[1]
        location_data.relative_bounding_box = relative_bounding_box
        detection.location_data = location_data
        converted_detections.append(detection)

      # Inputs annotations into mediapipe tracker.
      tracked_annotations = self._mediapipe_tracker.process(
          timestamp, converted_detections, np_frame)

      # Converts back to AutoML Video Edge detection structs.
      for tracked_annotation in tracked_annotations:
        highest_idx = tracked_annotation.score.index(
            max(tracked_annotation.score))
        output_annotation = ObjectTrackingAnnotation(
            timestamp=timestamp,
            track_id=tracked_annotation.detection_id,
            class_id=1 if tracked_annotation.label_id else -1,
            class_name=tracked_annotation.label[highest_idx]
            if tracked_annotation.label else '',
            confidence_score=tracked_annotation.score[highest_idx],
            bbox=NormalizedBoundingBox(
                left=tracked_annotation.location_data.relative_bounding_box
                .xmin,
                top=tracked_annotation.location_data.relative_bounding_box.ymin,
                right=tracked_annotation.location_data.relative_bounding_box
                .xmin +
                tracked_annotation.location_data.relative_bounding_box.width,
                bottom=tracked_annotation.location_data.relative_bounding_box
                .ymin +
                tracked_annotation.location_data.relative_bounding_box.height))
        annotations.append(output_annotation)
      return annotations
Beispiel #3
0
    def run(self, timestamp, frame, annotations):
        with self.graph.as_default():
            # Tensors to feed in.
            feed_dict = {
                'import/image_tensor:0': np.array(frame)[None, ...],
            }
            if self._is_lstm:
                feed_dict.update({
                    'import/raw_inputs/init_lstm_c:0': self.lstm_c,
                    'import/raw_inputs/init_lstm_h:0': self.lstm_h
                })

            session_return = self.session.run(self._output_nodes,
                                              feed_dict=feed_dict)

            # Unpacks tensor output.
            if self._is_lstm:
                (detection_scores, detection_boxes, detection_classes,
                 num_detections, self.lstm_c, self.lstm_h) = session_return
            else:
                (detection_scores, detection_boxes, detection_classes,
                 num_detections) = session_return

        boxes = detection_boxes[0]  # index by 0 to remove batch dimension
        scores = detection_scores[0]
        classes = detection_classes[0]

        for i in range(int(num_detections)):
            box = boxes[i]

            if scores[i] > self.config.score_threshold:

                bbox = NormalizedBoundingBox(left=box[1],
                                             top=box[0],
                                             right=box[3],
                                             bottom=box[2])

                annotation = ObjectTrackingAnnotation(
                    timestamp=timestamp,
                    track_id=-1,
                    class_id=classes[i],
                    class_name=self.label_map[classes[i]],
                    confidence_score=scores[i],
                    bbox=bbox)

                annotations.append(annotation)

        return True
    def run(self, timestamp, frame, annotations):
        # Interpreter hates it when native tensors are retained.
        # fill_inputs will release input tensors after filling with data.
        self.fill_inputs(frame)
        self._interpreter.invoke()

        boxes = self.output_tensor(0)
        classes = self.output_tensor(1)
        scores = self.output_tensor(2)
        num_detections = self.output_tensor(3)
        if self._is_lstm:
            output_lstm_c = self.output_tensor(4)
            output_lstm_h = self.output_tensor(5)

            np.copyto(self._lstm_c, output_lstm_c)
            np.copyto(self._lstm_h, output_lstm_h)

        for i in range(int(num_detections)):
            box = boxes[i]

            if scores[i] > self._config.score_threshold:

                bbox = NormalizedBoundingBox(left=box[1],
                                             top=box[0],
                                             right=box[3],
                                             bottom=box[2])

                annotation = ObjectTrackingAnnotation(
                    timestamp=timestamp,
                    track_id=-1,
                    class_id=int(classes[i]),
                    class_name=self.label_list[int(classes[i])],
                    confidence_score=scores[i],
                    bbox=bbox)

                annotations.append(annotation)

        return True