def __call__(self,
              image: Union[Image, np.ndarray, str],
              roi: Optional[Rect] = None,
              is_right_eye: bool = False) -> IrisResults:
     height, width = self.input_shape[1:3]
     image_data = image_to_tensor(
         image,
         roi,
         output_size=(width, height),
         keep_aspect_ratio=True,  # equivalent to scale_mode=FIT
         output_range=(0, 1),  # see iris_landmark_cpu.pbtxt
         flip_horizontal=is_right_eye)
     input_data = image_data.tensor_data[np.newaxis]
     self.interpreter.set_tensor(self.input_index, input_data)
     self.interpreter.invoke()
     raw_eye_landmarks = self.interpreter.get_tensor(self.eye_index)
     raw_iris_landmarks = self.interpreter.get_tensor(self.iris_index)
     height, width = self.input_shape[1:3]
     eye_contour = project_landmarks(raw_eye_landmarks,
                                     tensor_size=(width, height),
                                     image_size=image_data.original_size,
                                     padding=image_data.padding,
                                     roi=roi,
                                     flip_horizontal=is_right_eye)
     iris_landmarks = project_landmarks(raw_iris_landmarks,
                                        tensor_size=(width, height),
                                        image_size=image_data.original_size,
                                        padding=image_data.padding,
                                        roi=roi,
                                        flip_horizontal=is_right_eye)
     return IrisResults(eye_contour, iris_landmarks)
    def __call__(
        self,
        image: Union[Image, np.ndarray, str],
        roi: Optional[Rect] = None
    ) -> List[Landmark]:
        """Run inference and return detections from a given image

        Args:
            image (Image|ndarray|str): Numpy array of shape
                `(height, width, 3)` or PIL Image instance or path to image.

            roi (Rect|None): Region within the image that contains a face.

        Returns:
            (list) List of face landmarks in nromalised coordinates relative to
            the input image, i.e. values ranging from [0, 1].
        """
        height, width = self.input_shape[1:3]
        image_data = image_to_tensor(
            image,
            roi,
            output_size=(width, height),
            keep_aspect_ratio=False,
            output_range=(0., 1.))
        input_data = image_data.tensor_data[np.newaxis]
        self.interpreter.set_tensor(self.input_index, input_data)
        self.interpreter.invoke()
        raw_data = self.interpreter.get_tensor(self.data_index)
        raw_face = self.interpreter.get_tensor(self.face_index)
        # second tensor contains confidence score for a face detection
        face_flag = sigmoid(raw_face).flatten()[-1]
        # no data if no face was detected
        if face_flag <= DETECTION_THRESHOLD:
            return []
        # extract and normalise landmark data
        height, width = self.input_shape[1:3]
        return project_landmarks(raw_data,
                                 tensor_size=(width, height),
                                 image_size=image_data.original_size,
                                 padding=image_data.padding,
                                 roi=roi)
Esempio n. 3
0
    def __call__(self,
                 image: Union[Image, np.ndarray, str],
                 roi: Optional[Rect] = None) -> List[Detection]:
        """Run inference and return detections from a given image

        Args:
            image (Image|ndarray|str): Numpy array of shape
                `(height, width, 3)`, PIL Image instance or file name.

            roi (Rect|None): Optional region within the image that may
                contain faces.

        Returns:
            (list) List of detection results with relative coordinates.
        """
        height, width = self.input_shape[1:3]
        image_data = image_to_tensor(image,
                                     roi,
                                     output_size=(width, height),
                                     keep_aspect_ratio=True,
                                     output_range=(-1, 1))
        input_data = image_data.tensor_data[np.newaxis]
        self.interpreter.set_tensor(self.input_index, input_data)
        self.interpreter.invoke()
        raw_boxes = self.interpreter.get_tensor(self.bbox_index)
        raw_scores = self.interpreter.get_tensor(self.score_index)
        boxes = self._decode_boxes(raw_boxes)
        scores = self._get_sigmoid_scores(raw_scores)
        detections = FaceDetection._convert_to_detections(boxes, scores)
        pruned_detections = non_maximum_suppression(detections,
                                                    MIN_SUPPRESSION_THRESHOLD,
                                                    MIN_SCORE,
                                                    weighted=True)
        detections = detection_letterbox_removal(pruned_detections,
                                                 image_data.padding)
        return detections