def face_detection_to_roi(
    face_detection: Detection,
    image_size: Tuple[int, int]
) -> Rect:
    """Return a normalized ROI from a list of face detection results.

    The result of this function is intended to serve as the input of
    calls to `FaceLandmark`:

    ```
        MODEL_PATH = '/var/mediapipe/models/'
        ...
        face_detect = FaceDetection(model_path=MODEL_PATH)
        face_landmarks = FaceLandmark(model_path=MODEL_PATH)
        image = Image.open('/home/user/pictures/photo.jpg')
        # detect faces
        detections = face_detect(image)
        for detection in detections:
            # find ROI from detection
            roi = face_detection_to_roi(detection)
            # extract face landmarks using ROI
            landmarks = face_landmarks(image, roi)
            ...
    ```

    Args:
        face_detection (Detection): Normalized face detection result from a
            call to `FaceDetection`.

        image_size (tuple): A tuple of `(image_width, image_height)` denoting
            the size of the input image the face detection results came from.

    Returns:
        (Rect) Normalized ROI for passing to `FaceLandmark`.
    """
    absolute_detection = face_detection.scaled(image_size)
    left_eye = absolute_detection[FaceIndex.LEFT_EYE]
    right_eye = absolute_detection[FaceIndex.RIGHT_EYE]
    return bbox_to_roi(
        face_detection.bbox,
        image_size,
        rotation_keypoints=[left_eye, right_eye],
        scale=ROI_SCALE,
        size_mode=SizeMode.SQUARE_LONG
    )
Exemple #2
0
    def _convert_to_detections(boxes: np.ndarray,
                               scores: np.ndarray) -> List[Detection]:
        """Apply detection threshold, filter invalid boxes and return
        detection instance.
        """

        # return whether width and height are positive
        def is_valid(box: np.ndarray) -> bool:
            return np.all(box[1] > box[0])

        score_above_threshold = scores > MIN_SCORE
        filtered_boxes = boxes[np.argwhere(score_above_threshold)[:, 1], :]
        filtered_scores = scores[score_above_threshold]
        return [
            Detection(box, score)
            for box, score in zip(filtered_boxes, filtered_scores)
            if is_valid(box)
        ]
def _weighted_non_maximum_suppression(
        indexed_scores: List[Tuple[int, float]], detections: List[Detection],
        min_suppression_threshold: float,
        min_score: Optional[float]) -> List[Detection]:
    """Return only most significant detections; merge similar detections"""
    remaining_indexed_scores = list(indexed_scores)
    remaining: List[Tuple[int, float]] = []
    candidates: List[Tuple[int, float]] = []
    outputs: List[Detection] = []

    while len(remaining_indexed_scores) > 0:
        detection = detections[remaining_indexed_scores[0][0]]
        # exit loop if remaining scores are below threshold
        if min_score is not None and detection.score < min_score:
            break
        num_prev_indexed_scores = len(remaining_indexed_scores)
        detection_bbox = detection.bbox
        remaining.clear()
        candidates.clear()
        weighted_detection = detection
        for (index, score) in remaining_indexed_scores:
            remaining_bbox = detections[index].bbox
            similarity = _overlap_similarity(remaining_bbox, detection_bbox)
            if similarity > min_suppression_threshold:
                candidates.append((index, score))
            else:
                remaining.append((index, score))
        # weighted merging of similar (close) boxes
        if len(candidates) > 0:
            weighted = np.zeros((2 + len(detection), 2), dtype=np.float32)
            total_score = 0.
            for index, score in candidates:
                total_score += score
                weighted += detections[index].data * score
            weighted /= total_score
            weighted_detection = Detection(weighted, detection.score)
        outputs.append(weighted_detection)
        # exit the loop if the number of indexed scores didn't change
        if num_prev_indexed_scores == len(remaining):
            break
        remaining_indexed_scores = list(remaining)
    return outputs
 def adjust_data(detection: Detection) -> Detection:
     adjusted = (detection.data - (left, top)) / (h_scale, v_scale)
     return Detection(adjusted, detection.score)