예제 #1
0
    def annotate(self, image, **kwargs):
        """Get the inference of the image and process the inference result.

        Returns:
            A numpy array, the shape is N * (x, y, w, h, confidence),
            N is the number of detection box.
        """

        # First thing, we need to convert the bob CxHxW
        # to the openCV HxWxC and BGR
        image = bob_to_opencvbgr(image)

        input_height, input_width, _ = image.shape
        try:
            image, scale = self._preprocess(image)
        except Exception as e:
            raise e
        self.model = self.model.to(self.device)
        image = torch.from_numpy(image).unsqueeze(0)
        with torch.no_grad():
            image = image.to(self.device)
            scale = scale.to(self.device)
            loc, conf, landms = self.model(image)
        dets = self._postprocess(loc, conf, scale, input_height, input_width)

        if len(dets) == 0:
            logger.error("Face not detected. Returning None")
            return None

        dets = dets[0] if self.one_face_only else dets

        return dets
def faceX_cropper(
    files,
    database_path,
    output_path,
):
    annotator = FaceX106Landmarks()

    image_size = 112

    # Load
    for f in files:
        f = f.rstrip("\n")

        output_filename = os.path.join(output_path, f)
        if os.path.exists(output_filename):
            continue

        image = bob.io.base.load(os.path.join(database_path, f))

        # If it's grayscaled, expand dims
        if image.ndim == 2:
            image = np.repeat(np.expand_dims(image, 0), 3, axis=0)

        # DEtect landmarks

        annot = annotator.annotate(image.copy())

        if annot is None:
            print(f"Face on {f} was not detected")
        else:
            annot = annot.flatten()

            landmarks = np.array(lms106_2_lms5(annot))
            landmarks = landmarks.reshape((5, 2))

            M, pose_index = estimate_norm(landmarks, image_size=image_size)

            # bob_to_opencvbgr, opencvbgr_to_bob
            image = bob_to_opencvbgr(image)

            cropped_image = cv2.warpAffine(image.copy(),
                                           M, (image_size, image_size),
                                           borderValue=0.0)

            cropped_image = opencvbgr_to_bob(cropped_image)

            os.makedirs(os.path.dirname(output_filename), exist_ok=True)
            bob.io.base.save(cropped_image, output_filename)

            pass
예제 #3
0
    def annotate(self, image, **kwargs):
        """Annotates an image using mtcnn

        Parameters
        ----------
        image : numpy.array
            An RGB image in Bob format.
        **kwargs
            Ignored.

        Returns
        -------
        dict
            Annotations contain: (topleft, bottomright, leye, reye, nose,
            mouthleft, mouthright, quality).
        """

        # Detect the face
        if self.use_mtcnn_detector:
            annotations = self.face_detector.annotate(image)
            if annotations is None:
                return None

            dets = [
                annotations["topleft"][1],
                annotations["topleft"][0],
                annotations["bottomright"][1],
                annotations["bottomright"][0],
            ]
        else:
            dets = self.face_detector.annotate(image.copy())

        if dets is None:
            return None

        # First thing, we need to convert the bob CxHxW
        # to the openCV HxWxC and BGR
        image = bob_to_opencvbgr(image)
        try:
            image_pre = self._preprocess(image, dets)
        except Exception as e:
            raise e
        self.model = self.model.to(self.device)
        image_pre = image_pre.unsqueeze(0)
        with torch.no_grad():
            image_pre = image_pre.to(self.device)
            _, landmarks_normal = self.model(image_pre)
        landmarks = self._postprocess(landmarks_normal)

        return np.array(landmarks)
예제 #4
0
    def transform(self, X, annotations, resize=True):
        """
        Crop the face based on Bounding box positions

        Parameters
        ----------

        X : numpy.ndarray
            The image to be normalized

        annotations : dict
            The annotations of the image. It needs to contain ''topleft'' and ''bottomright'' positions

        resize: bool
            If True, the image will be resized to the final size
            In this case, margin is not used

        """

        assert "topleft" in annotations
        assert "bottomright" in annotations

        # If it's grayscaled, expand dims
        if X.ndim == 2:
            logger.warning(
                "Gray-scaled image. Expanding the channels before detection")
            X = np.repeat(np.expand_dims(X, 0), 3, axis=0)

        top = int(annotations["topleft"][0])
        left = int(annotations["topleft"][1])

        bottom = int(annotations["bottomright"][0])
        right = int(annotations["bottomright"][1])

        width = right - left
        height = bottom - top

        if resize:
            # If resizing, don't use the expanded borders
            face_crop = X[:, top:bottom, left:right, ]

            face_crop = (bob_to_opencvbgr(face_crop)
                         if face_crop.ndim > 2 else face_crop)

            face_crop = cv2.resize(
                face_crop,
                self.final_image_size[::-1],
                interpolation=self.opencv_interpolation,
            )

            face_crop = opencvbgr_to_bob(np.array(face_crop))

        else:

            # Expanding the borders
            top_expanded = int(np.maximum(top - self.margin * height, 0))
            left_expanded = int(np.maximum(left - self.margin * width, 0))

            bottom_expanded = int(
                np.minimum(bottom + self.margin * height, X.shape[1]))
            right_expanded = int(
                np.minimum(right + self.margin * width, X.shape[2]))

            face_crop = X[:, top_expanded:bottom_expanded,
                          left_expanded:right_expanded, ]

        return face_crop
예제 #5
0
    def transform(self, X, annotations=None):
        """
        Geometric normalize a face using the eyes positions

        Parameters
        ----------

        X : numpy.ndarray
            The image to be normalized

        annotations : dict
            The annotations of the image. It needs to contain ''reye'' and ''leye'' positions


        Returns
        -------

            cropped_image : numpy.ndarray
                The normalized image

        """

        self._check_annotations(annotations)

        if not self.allow_upside_down_normalized_faces:
            self._check_upsidedown(annotations)

        (
            source_eyes_distance,
            source_eyes_center,
            source_eyes_angle,
        ) = self._get_anthropometric_measurements(annotations)

        # m_geomNorm->setRotationAngle(angle * 180. / M_PI - m_eyesAngle);
        # Computing the rotation angle with respect to the target eyes angle in degrees
        rotational_angle = source_eyes_angle - self.target_eyes_angle

        # source_target_ratio = source_eyes_distance / self.target_eyes_distance
        target_source_ratio = self.target_eyes_distance / source_eyes_distance

        #

        # ROTATION WITH OPEN CV

        cropped_image = bob_to_opencvbgr(X) if X.ndim > 2 else X
        original_height = cropped_image.shape[0]
        original_width = cropped_image.shape[1]

        cropped_image = self._rotate_image_center(cropped_image,
                                                  rotational_angle,
                                                  source_eyes_center)

        # Cropping

        target_eyes_center_rescaled = np.floor(
            self.target_eyes_center / target_source_ratio).astype("int")

        top = int(source_eyes_center[0] - target_eyes_center_rescaled[0])
        left = int(source_eyes_center[1] - target_eyes_center_rescaled[1])

        bottom = max(0, top) + (int(
            self.final_image_size[0] / target_source_ratio))
        right = max(0, left) + (int(
            self.final_image_size[1] / target_source_ratio))

        cropped_image = cropped_image[max(0, top):bottom,
                                      max(0, left):right, ...]

        # Checking if we need to pad the cropped image
        # This happens when the cropped image extrapolate the original image dimensions
        expanded_image = cropped_image

        if original_height < bottom or original_width < right:

            pad_height = (cropped_image.shape[0] +
                          (bottom - original_height) if
                          original_height < bottom else cropped_image.shape[0])

            pad_width = (cropped_image.shape[1] + (right - original_width)
                         if original_width < right else cropped_image.shape[1])

            expanded_image = (np.zeros(
                (pad_height, pad_width, 3),
                dtype=cropped_image.dtype,
            ) if cropped_image.ndim > 2 else np.zeros(
                (pad_height, pad_width), dtype=cropped_image.dtype))

            expanded_image[0:cropped_image.shape[0], 0:cropped_image.shape[1],
                           ...] = cropped_image

        # Checking if we need to translate the image.
        # This happens when the top, left coordinates on the source images is negative
        if top < 0 or left < 0:
            expanded_image = self._translate_image(expanded_image,
                                                   -1 * min(0, left),
                                                   -1 * min(0, top))

        # Scaling

        expanded_image = cv2.resize(
            expanded_image,
            self.final_image_size[::-1],
            interpolation=self.opencv_interpolation,
        )

        expanded_image = (opencvbgr_to_bob(expanded_image)
                          if X.ndim > 2 else expanded_image)

        return expanded_image