Exemple #1
0
    def _create_image(self, detected_face, mask_type):
        """ Create a mask preview image for saving out to disk

        Parameters
        ----------
        detected_face: `lib.FacesDetect.detected_face`
            A detected_face object for a face
        mask_type: str
            The stored mask type name to create the image for

        Returns
        -------
        :class:`numpy.ndarray`:
            A preview image depending on the output type in one of the following forms:
              - Containing 3 sub images: The original face, the masked face and the mask
              - The mask only
              - The masked face
        """
        mask = detected_face.mask[mask_type]
        mask.set_blur_and_threshold(**self._output["opts"])
        if not self._output["full_frame"] or self._input_is_faces:
            if self._input_is_faces:
                face = AlignedFace(detected_face.landmarks_xy,
                                   image=detected_face.image,
                                   centering=mask.stored_centering,
                                   size=detected_face.image.shape[0],
                                   is_aligned=True).face
            else:
                centering = "legacy" if self._alignments.version == 1.0 else mask.stored_centering
                detected_face.load_aligned(detected_face.image,
                                           centering=centering,
                                           force=True)
                face = detected_face.aligned.face
            mask = cv2.resize(detected_face.mask[mask_type].mask,
                              (face.shape[1], face.shape[0]),
                              interpolation=cv2.INTER_CUBIC)[..., None]
        else:
            face = np.array(detected_face.image
                            )  # cv2 fails if this comes as imageio.core.Array
            mask = mask.get_full_frame_mask(face.shape[1], face.shape[0])
            mask = np.expand_dims(mask, -1)

        height, width = face.shape[:2]
        if self._output["type"] == "combined":
            masked = (face.astype("float32") * mask.astype("float32") /
                      255.).astype("uint8")
            mask = np.tile(mask, 3)
            for img in (face, masked, mask):
                cv2.rectangle(img, (0, 0), (width - 1, height - 1),
                              (255, 255, 255), 1)
                out_image = np.concatenate((face, masked, mask), axis=1)
        elif self._output["type"] == "mask":
            out_image = mask
        elif self._output["type"] == "masked":
            out_image = np.concatenate([face, mask], axis=-1)
        return out_image
Exemple #2
0
    def _background_extract(self, output_folder, progress_queue):
        """ Perform the background extraction in a thread so GUI doesn't become unresponsive.

        Parameters
        ----------
        output_folder: str
            The location to save the output faces to
        progress_queue: :class:`queue.Queue`
            The queue to place incremental counts to for updating the GUI's progress bar
        """
        saver = ImagesSaver(str(get_folder(output_folder)), as_bytes=True)
        loader = ImagesLoader(self._input_location,
                              count=self._alignments.frames_count)
        for frame_idx, (filename, image) in enumerate(loader.load()):
            logger.trace("Outputting frame: %s: %s", frame_idx, filename)
            basename = os.path.basename(filename)
            frame_name, extension = os.path.splitext(basename)
            final_faces = []
            progress_queue.put(1)
            for face_idx, face in enumerate(self._frame_faces[frame_idx]):
                output = "{}_{}{}".format(frame_name, str(face_idx), extension)
                aligned = AlignedFace(face.landmarks_xy,
                                      image=image,
                                      centering="head",
                                      size=512)  # TODO user selectable size
                face.hash, b_image = encode_image_with_hash(
                    aligned.face, extension)
                saver.save(output, b_image)
                final_faces.append(face.to_alignment())
            self._alignments.data[basename]["faces"] = final_faces
        saver.close()
Exemple #3
0
    def sort_face_yaw(self):
        """ Sort by estimated face yaw angle """
        logger.info("Sorting by estimated face yaw angle..")
        filenames = []
        yaws = []
        for filename, image, metadata in tqdm(self._loader.load(),
                                              desc="Classifying Faces",
                                              total=self._loader.count,
                                              leave=False):
            if not metadata:
                msg = ("The images to be sorted do not contain alignment data. Images must have "
                       "been generated by Faceswap's Extract process.\nIf you are sorting an "
                       "older faceset, then you should re-extract the faces from your source "
                       "alignments file to generate this data.")
                raise FaceswapError(msg)
            alignments = metadata["alignments"]
            aligned_face = AlignedFace(np.array(alignments["landmarks_xy"], dtype="float32"),
                                       image=image,
                                       centering="legacy",
                                       is_aligned=True)
            filenames.append(filename)
            yaws.append(aligned_face.pose.yaw)

        logger.info("Sorting...")
        matched_list = list(zip(filenames, yaws))
        img_list = sorted(matched_list, key=operator.itemgetter(1), reverse=True)
        return img_list
Exemple #4
0
 def update_annotation(self):
     """ Get the latest Landmarks and update the mesh."""
     key = "mesh"
     color = self._control_color
     zoomed_offset = self._zoomed_roi[:2]
     for face_idx, face in enumerate(self._face_iterator):
         face_index = self._globals.face_index if self._globals.is_zoomed else face_idx
         if self._globals.is_zoomed:
             aligned = AlignedFace(face.landmarks_xy,
                                   centering="face",
                                   size=min(
                                       self._globals.frame_display_dims))
             landmarks = aligned.landmarks + zoomed_offset
             # Hide all meshes and only display selected
             self._canvas.itemconfig("Mesh", state="hidden")
             self._canvas.itemconfig("Mesh_face_{}".format(face_index),
                                     state="normal")
         else:
             landmarks = self._scale_to_display(face.landmarks_xy)
         logger.trace("Drawing Landmarks Mesh: (landmarks: %s, color: %s)",
                      landmarks, color)
         for idx, (segment,
                   val) in enumerate(self._landmark_mapping.items()):
             key = "mesh_{}".format(idx)
             pts = landmarks[val[0]:val[1]].flatten()
             if segment in ("right_eye", "left_eye", "mouth_inner",
                            "mouth_outer"):
                 kwargs = dict(fill="", outline=color, width=1)
                 self._object_tracker(key, "polygon", face_index, pts,
                                      kwargs)
             else:
                 self._object_tracker(key, "line", face_index, pts,
                                      dict(fill=color, width=1))
     # Place mesh as bottom annotation
     self._canvas.tag_raise(self.__class__.__name__, "main_image")
Exemple #5
0
    def sort_distance(self):
        """ Sort by comparison of face landmark points to mean face by average distance of core
        landmarks. """
        logger.info("Sorting by average distance of landmarks...")
        filenames = []
        distances = []
        filelist = [os.path.join(self._loader.location, fname)
                    for fname in os.listdir(self._loader.location)
                    if os.path.splitext(fname)[-1] == ".png"]
        for filename, metadata in tqdm(read_image_meta_batch(filelist),
                                       total=len(filelist),
                                       desc="Calculating Distances"):
            if not metadata:
                msg = ("The images to be sorted do not contain alignment data. Images must have "
                       "been generated by Faceswap's Extract process.\nIf you are sorting an "
                       "older faceset, then you should re-extract the faces from your source "
                       "alignments file to generate this data.")
                raise FaceswapError(msg)
            alignments = metadata["itxt"]["alignments"]
            aligned_face = AlignedFace(np.array(alignments["landmarks_xy"], dtype="float32"))
            filenames.append(filename)
            distances.append(aligned_face.average_distance)

        logger.info("Sorting...")
        matched_list = list(zip(filenames, distances))
        img_list = sorted(matched_list, key=operator.itemgetter(1))
        return img_list
Exemple #6
0
    def sort_size(self):
        """ Sort the faces by largest face (in original frame) to smallest """
        logger.info("Sorting by original face size...")
        img_list = []
        for filename, image, metadata in tqdm(self._loader.load(),
                                              desc="Calculating face sizes",
                                              total=self._loader.count,
                                              leave=False):
            if not metadata:
                msg = ("The images to be sorted do not contain alignment data. Images must have "
                       "been generated by Faceswap's Extract process.\nIf you are sorting an "
                       "older faceset, then you should re-extract the faces from your source "
                       "alignments file to generate this data.")
                raise FaceswapError(msg)
            alignments = metadata["alignments"]
            aligned_face = AlignedFace(np.array(alignments["landmarks_xy"], dtype="float32"),
                                       image=image,
                                       centering="legacy",
                                       is_aligned=True)
            roi = aligned_face.original_roi
            size = ((roi[1][0] - roi[0][0]) ** 2 + (roi[1][1] - roi[0][1]) ** 2) ** 0.5
            img_list.append((filename, size))

        logger.info("Sorting...")
        return sorted(img_list, key=lambda x: x[1], reverse=True)
Exemple #7
0
    def _get_zoomed_face(self):
        """ Get the zoomed face or a blank image if no faces are available.

        Returns
        -------
        :class:`numpy.ndarray`
            The face sized to the shortest dimensions of the face viewer
        """
        frame_idx = self._globals.frame_index
        face_idx = self._globals.face_index
        faces_in_frame = self._det_faces.face_count_per_index[frame_idx]
        size = min(self._globals.frame_display_dims)

        if face_idx + 1 > faces_in_frame:
            logger.debug(
                "Resetting face index to 0 for more faces in frame than current index: ("
                "faces_in_frame: %s, zoomed_face_index: %s", faces_in_frame,
                face_idx)
            self._globals.tk_face_index.set(0)

        if faces_in_frame == 0:
            face = np.ones((size, size, 3), dtype="uint8")
        else:
            det_face = self._det_faces.current_faces[frame_idx][face_idx]
            face = AlignedFace(det_face.landmarks_xy,
                               image=self._globals.current_frame["image"],
                               centering="face",
                               size=size).face
        logger.trace("face shape: %s", face.shape)
        return face[..., 2::-1]
Exemple #8
0
    def _background_extract(self, output_folder, progress_queue):
        """ Perform the background extraction in a thread so GUI doesn't become unresponsive.

        Parameters
        ----------
        output_folder: str
            The location to save the output faces to
        progress_queue: :class:`queue.Queue`
            The queue to place incremental counts to for updating the GUI's progress bar
        """
        _io = dict(saver=ImagesSaver(str(get_folder(output_folder)), as_bytes=True),
                   loader=ImagesLoader(self._input_location, count=self._alignments.frames_count))

        for frame_idx, (filename, image) in enumerate(_io["loader"].load()):
            logger.trace("Outputting frame: %s: %s", frame_idx, filename)
            src_filename = os.path.basename(filename)
            frame_name = os.path.splitext(src_filename)[0]
            progress_queue.put(1)

            for face_idx, face in enumerate(self._frame_faces[frame_idx]):
                output = "{}_{}{}".format(frame_name, str(face_idx), ".png")
                aligned = AlignedFace(face.landmarks_xy,
                                      image=image,
                                      centering="head",
                                      size=512)  # TODO user selectable size
                meta = dict(alignments=face.to_png_meta(),
                            source=dict(alignments_version=self._alignments.version,
                                        original_filename=output,
                                        face_index=face_idx,
                                        source_filename=src_filename,
                                        source_is_video=self._globals.is_video))

                b_image = encode_image(aligned.face, ".png", metadata=meta)
                _io["saver"].save(output, b_image)
        _io["saver"].close()
Exemple #9
0
    def _add_aligned_face(self, filename, alignments, image_size):
        """ Add a :class:`lib.align.AlignedFace` object to the cache.

        Parameters
        ----------
        filename: str
            The file path for the current image
        alignments: dict
            The alignments for a single face, extracted from a PNG header
        image_size: int
            The pixel size of the image loaded from disk

        Returns
        -------
        :class:`lib.align.DetectedFace`
            The Detected Face object that was used to create the Aligned Face
        """
        if self._size is None:
            self._size = get_centered_size(
                "legacy" if self._extract_version == 1.0 else "head",
                self._centering, image_size)

        detected_face = DetectedFace()
        detected_face.from_png_meta(alignments)

        aligned_face = AlignedFace(detected_face.landmarks_xy,
                                   centering=self._centering,
                                   size=self._size,
                                   is_aligned=True)
        logger.trace("Caching aligned face for: %s", filename)
        self._cache[os.path.basename(filename)]["aligned_face"] = aligned_face
        return detected_face
Exemple #10
0
    def sort_face(self):
        """ Sort by identity similarity """
        logger.info("Sorting by identity similarity...")
        filenames = []
        preds = []
        for filename, image, metadata in tqdm(self._loader.load(),
                                              desc="Classifying Faces",
                                              total=self._loader.count,
                                              leave=False):
            if not metadata:
                msg = ("The images to be sorted do not contain alignment data. Images must have "
                       "been generated by Faceswap's Extract process.\nIf you are sorting an "
                       "older faceset, then you should re-extract the faces from your source "
                       "alignments file to generate this data.")
                raise FaceswapError(msg)
            alignments = metadata["alignments"]
            face = AlignedFace(np.array(alignments["landmarks_xy"], dtype="float32"),
                               image=image,
                               centering="legacy",
                               size=self._vgg_face.input_size,
                               is_aligned=True).face
            filenames.append(filename)
            preds.append(self._vgg_face.predict(face))

        logger.info("Sorting by ward linkage...")

        indices = self._vgg_face.sorted_similarity(np.array(preds), method="ward")
        img_list = np.array(filenames)[indices]
        return img_list
Exemple #11
0
    def get_tk_face(self, frame_index, face_index, face):
        """ Obtain the :class:`TKFace` object for the given face from the cache. If the face does
        not exist in the cache, then it is generated and added prior to returning.

        Parameters
        ----------
        frame_index: int
            The frame index to obtain the face for
        face_index: int
            The face index of the face within the requested frame
        face: :class:`~lib.align.DetectedFace`
            The detected face object, containing the thumbnail jpg

        Returns
        -------
        :class:`TKFace`
            An object for displaying in the faces viewer canvas populated with the aligned mesh
            landmarks and face thumbnail
        """
        is_active = frame_index == self._active_frame.frame_index
        key = "_".join([str(frame_index), str(face_index)])
        if key not in self._tk_faces or is_active:
            logger.trace("creating new tk_face: (key: %s, is_active: %s)", key,
                         is_active)
            if is_active:
                image = AlignedFace(face.landmarks_xy,
                                    image=self._active_frame.current_frame,
                                    centering=self._centering,
                                    size=self.face_size).face
            else:
                image = AlignedFace(face.landmarks_xy,
                                    image=cv2.imdecode(face.thumbnail,
                                                       cv2.IMREAD_UNCHANGED),
                                    centering=self._centering,
                                    size=self.face_size,
                                    is_aligned=True).face
            tk_face = self._get_tk_face_object(face, image, is_active)
            self._tk_faces[key] = tk_face
        else:
            logger.trace("tk_face exists: %s", key)
            tk_face = self._tk_faces[key]
        return tk_face
Exemple #12
0
    def load_aligned(self, item):
        """ Load the model's feed faces and the reference output faces.

        For each detected face in the incoming item, load the feed face and reference face
        images, correctly sized for input and output respectively.

        Parameters
        ----------
        item: dict
            The incoming image, list of :class:`~lib.align.DetectedFace` objects and list of
            :class:`~lib.align.AlignedFace` objects for the feed face(s) and list of
            :class:`~lib.align.AlignedFace` objects for the reference face(s)

        """
        logger.trace("Loading aligned faces: '%s'", item["filename"])
        feed_faces = []
        reference_faces = []
        for detected_face in item["detected_faces"]:
            feed_face = AlignedFace(detected_face.landmarks_xy,
                                    image=item["image"],
                                    centering=self._centering,
                                    size=self._sizes["input"],
                                    coverage_ratio=self._coverage_ratio,
                                    dtype="float32")
            if self._sizes["input"] == self._sizes["output"]:
                reference_faces.append(feed_face)
            else:
                reference_faces.append(
                    AlignedFace(detected_face.landmarks_xy,
                                image=item["image"],
                                centering=self._centering,
                                size=self._sizes["output"],
                                coverage_ratio=self._coverage_ratio,
                                dtype="float32"))
            feed_faces.append(feed_face)
        item["feed_faces"] = feed_faces
        item["reference_faces"] = reference_faces
        logger.trace("Loaded aligned faces: '%s'", item["filename"])
Exemple #13
0
    def estimate_blur_fft(cls, image, metadata=None):
        """ Estimate the amount of blur a fft filtered image has.

        Parameters
        ----------
        image: :class:`numpy.ndarray`
            Use Fourier Transform to analyze the frequency characteristics of the masked
            face using 2D Discrete Fourier Transform (DFT) filter to find the frequency domain.
            A mean value is assigned to the magnitude spectrum and returns a blur score.
            Adapted from https://www.pyimagesearch.com/2020/06/15/
            opencv-fast-fourier-transform-fft-for-blur-detection-in-images-and-video-streams/
        metadata: dict, optional
            The metadata for the face image or ``None`` if no metadata is available. If metadata is
            provided the face will be masked by the "components" mask prior to calculating blur.
            Default:``None``

        Returns
        -------
        float
            The estimated fft blur score for the face
        """
        if metadata is not None:
            alignments = metadata["alignments"]
            det_face = DetectedFace()
            det_face.from_png_meta(alignments)
            aln_face = AlignedFace(np.array(alignments["landmarks_xy"],
                                            dtype="float32"),
                                   image=image,
                                   centering="legacy",
                                   size=256,
                                   is_aligned=True)
            mask = det_face.mask["components"]
            mask.set_sub_crop(aln_face.pose.offset[mask.stored_centering] * -1,
                              centering="legacy")
            mask = cv2.resize(mask.mask, (256, 256),
                              interpolation=cv2.INTER_CUBIC)[..., None]
            image = np.minimum(aln_face.face, mask)
        if image.ndim == 3:
            image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        height, width = image.shape
        c_height, c_width = (int(height / 2.0), int(width / 2.0))
        fft = np.fft.fft2(image)
        fft_shift = np.fft.fftshift(fft)
        fft_shift[c_height - 75:c_height + 75, c_width - 75:c_width + 75] = 0
        ifft_shift = np.fft.ifftshift(fft_shift)
        shift_back = np.fft.ifft2(ifft_shift)
        magnitude = np.log(np.abs(shift_back))
        score = np.mean(magnitude)
        return score
Exemple #14
0
    def get_landmarks(self,
                      frame_index,
                      face_index,
                      face,
                      top_left,
                      refresh=False):
        """ Obtain the landmark points for each mesh annotation.

        First tries to obtain the aligned landmarks from the cache. If the landmarks do not exist
        in the cache, or a refresh has been requested, then the landmarks are calculated from the
        detected face object.

        Parameters
        ----------
        frame_index: int
            The frame index to obtain the face for
        face_index: int
            The face index of the face within the requested frame
        face: :class:`lib.align.DetectedFace`
            The detected face object to obtain landmarks for
        top_left: tuple
            The top left (x, y) points of the face's bounding box within the viewport
        refresh: bool, optional
            Whether to force a reload of the face's aligned landmarks, even if they already exist
            within the cache. Default: ``False``

        Returns
        -------
        dict
            The key is the tkinter canvas object type for each part of the mesh annotation
            (`polygon`, `line`). The value is a list containing the (x, y) coordinates of each
            part of the mesh annotation, from the top left corner location.
        """
        key = "{}_{}".format(frame_index, face_index)
        landmarks = self._landmarks.get(key, None)
        if not landmarks or refresh:
            aligned = AlignedFace(face.landmarks_xy,
                                  centering=self._centering,
                                  size=self.face_size)
            landmarks = dict(polygon=[], line=[])
            for area, val in self._landmark_mapping.items():
                points = aligned.landmarks[val[0]:val[1]] + top_left
                shape = "polygon" if area.endswith("eye") or area.startswith(
                    "mouth") else "line"
                landmarks[shape].append(points)
            self._landmarks[key] = landmarks
        return landmarks
Exemple #15
0
    def post_edit_trigger(self, frame_index, face_index):
        """ Update the jpg thumbnail and the viewport thumbnail on a face edit.

        Parameters
        ----------
        frame_index: int
            The frame that the face is being set for
        face_index: int
            The face index within the frame
        """
        face = self._frame_faces[frame_index][face_index]
        aligned = AlignedFace(face.landmarks_xy,
                              image=self._globals.current_frame["image"],
                              centering="head",
                              size=96)
        face.thumbnail = generate_thumbnail(aligned.face, size=96)
        self._tk_edited.set(True)
Exemple #16
0
 def update_annotation(self):
     """ Draw the latest Extract Boxes around the faces. """
     color = self._control_color
     roi = self._zoomed_roi
     for idx, face in enumerate(self._face_iterator):
         logger.trace("Drawing Extract Box: (idx: %s)", idx)
         if self._globals.is_zoomed:
             box = np.array((roi[0], roi[1], roi[2], roi[1], roi[2], roi[3], roi[0], roi[3]))
         else:
             aligned = AlignedFace(face.landmarks_xy, centering="face")
             box = self._scale_to_display(aligned.original_roi).flatten()
         top_left = box[:2] - 10
         kwargs = dict(fill=color, font=("Default", 20, "bold"), text=str(idx))
         self._object_tracker("eb_text", "text", idx, top_left, kwargs)
         kwargs = dict(fill="", outline=color, width=1)
         self._object_tracker("eb_box", "polygon", idx, box, kwargs)
         self._update_anchor_annotation(idx, box, color)
     logger.trace("Updated extract box annotations")
Exemple #17
0
    def sort_face(self):
        """ Sort by identity similarity """
        logger.info("Sorting by identity similarity...")

        self._loader = FacesLoader(self._args.input_dir)  # TODO This should be set in init
        ratio = _EXTRACT_RATIOS["legacy"] / _EXTRACT_RATIOS["head"]
        filenames = []
        preds = []
        no_hash = 0
        for filename, image, hsh in tqdm(self._loader.load(),
                                         desc="Classifying Faces...",
                                         total=self._loader.count):
            if self._alignments is not None and self._alignments.version != 1.0:
                face = self._alignments.hashes_to_alignment.get(hsh)
                if face:
                    image = AlignedFace(face["landmarks_xy"],
                                        image=image,
                                        centering="legacy",
                                        size=self._vgg_face.input_size,
                                        is_aligned=True).face
                elif image.shape[0] != image.shape[1]:
                    logger.warning("Skipping image '%s' as it is not square (probably not a "
                                   "face)", filename)
                    continue
                else:  # Center crop the image and add count to warning count
                    center = image.shape[0] // 2
                    crop = slice(center - int(center * ratio), center + int(center * ratio))
                    image = image[crop, crop, :]
                    no_hash += 1

            filenames.append(filename)
            preds.append(self._vgg_face.predict(image))

        logger.info("Sorting by ward linkage...")

        indices = self._vgg_face.sorted_similarity(np.array(preds), method="ward")
        img_list = np.array(filenames)[indices]

        if no_hash:
            logger.warning("%s image(s) were not found in the alignments file. This will likely "
                           "result in sub-par sorting results, so you should check the output "
                           "carefully", no_hash)

        return img_list
Exemple #18
0
    def landmark(self, frame_index, face_index, landmark_index, shift_x,
                 shift_y, is_zoomed):
        """ Shift a single landmark point for the :class:`~lib.align.DetectedFace` object
        at the given frame and face indices by the given x and y values.

        Parameters
        ----------
        frame_index: int
            The frame that the face is being set for
        face_index: int
            The face index within the frame
        landmark_index: int or list
            The landmark index to shift. If a list is provided, this should be a list of landmark
            indices to be shifted
        shift_x: int
            The amount to shift the landmark by along the x axis
        shift_y: int
            The amount to shift the landmark by along the y axis
        is_zoomed: bool
            ``True`` if landmarks are being adjusted on a zoomed image otherwise ``False``
        """
        face = self._faces_at_frame_index(frame_index)[face_index]
        if is_zoomed:
            aligned = AlignedFace(face.landmarks_xy,
                                  centering="face",
                                  size=min(self._globals.frame_display_dims))
            landmark = aligned.landmarks[landmark_index]
            landmark += (shift_x, shift_y)
            matrix = aligned.adjusted_matrix
            matrix = cv2.invertAffineTransform(matrix)
            if landmark.ndim == 1:
                landmark = np.reshape(landmark, (1, 1, 2))
                landmark = cv2.transform(landmark, matrix,
                                         landmark.shape).squeeze()
                face.landmarks_xy[landmark_index] = landmark
            else:
                for lmk, idx in zip(landmark, landmark_index):
                    lmk = np.reshape(lmk, (1, 1, 2))
                    lmk = cv2.transform(lmk, matrix, lmk.shape).squeeze()
                    face.landmarks_xy[idx] = lmk
        else:
            face.landmarks_xy[landmark_index] += (shift_x, shift_y)
        face.mask = self._extractor.get_masks(frame_index, face_index)
        self._globals.tk_update.set(True)
Exemple #19
0
    def _set_thumbail(self, filename, frame, frame_index):
        """ Extracts the faces from the frame and adds to alignments file

        Parameters
        ----------
        filename: str
            The filename of the frame within the alignments file
        frame: :class:`numpy.ndarray`
            The frame that contains the faces
        frame_index: int
            The frame index of this frame in the :attr:`_frame_faces`
        """
        for face_idx, face in enumerate(self._frame_faces[frame_index]):
            aligned = AlignedFace(face.landmarks_xy,
                                  image=frame,
                                  centering="head",
                                  size=96)
            face.thumbnail = generate_thumbnail(aligned.face, size=96)
            self._alignments.thumbnails.add_thumbnail(filename, face_idx, face.thumbnail)
        with self._pbar["lock"]:
            self._pbar["pbar"].update(1)
Exemple #20
0
 def update_annotation(self):
     """ Get the latest Landmarks points and update. """
     zoomed_offset = self._zoomed_roi[:2]
     for face_idx, face in enumerate(self._face_iterator):
         face_index = self._globals.face_index if self._globals.is_zoomed else face_idx
         if self._globals.is_zoomed:
             aligned = AlignedFace(face.landmarks_xy,
                                   centering="face",
                                   size=min(
                                       self._globals.frame_display_dims))
             landmarks = aligned.landmarks + zoomed_offset
             # Hide all landmarks and only display selected
             self._canvas.itemconfig("lm_dsp", state="hidden")
             self._canvas.itemconfig("lm_dsp_face_{}".format(face_index),
                                     state="normal")
         else:
             landmarks = self._scale_to_display(face.landmarks_xy)
         for lm_idx, landmark in enumerate(landmarks):
             self._display_landmark(landmark, face_index, lm_idx)
             self._label_landmark(landmark, face_index, lm_idx)
             self._grab_landmark(landmark, face_index, lm_idx)
     logger.trace("Updated landmark annotations")
Exemple #21
0
    def estimate_blur(cls, image, metadata=None):
        """ Estimate the amount of blur an image has with the variance of the Laplacian.
        Normalize by pixel number to offset the effect of image size on pixel gradients & variance.

        Parameters
        ----------
        image: :class:`numpy.ndarray`
            The face image to calculate blur for
        metadata: dict, optional
            The metadata for the face image or ``None`` if no metadata is available. If metadata is
            provided the face will be masked by the "components" mask prior to calculating blur.
            Default:``None``

        Returns
        -------
        float
            The estimated blur score for the face
        """
        if metadata is not None:
            alignments = metadata["alignments"]
            det_face = DetectedFace()
            det_face.from_png_meta(alignments)
            aln_face = AlignedFace(np.array(alignments["landmarks_xy"],
                                            dtype="float32"),
                                   image=image,
                                   centering="legacy",
                                   size=256,
                                   is_aligned=True)
            mask = det_face.mask["components"]
            mask.set_sub_crop(aln_face.pose.offset[mask.stored_centering] * -1,
                              centering="legacy")
            mask = cv2.resize(mask.mask, (256, 256),
                              interpolation=cv2.INTER_CUBIC)[..., None]
            image = np.minimum(aln_face.face, mask)
        if image.ndim == 3:
            image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        blur_map = cv2.Laplacian(image, cv2.CV_32F)
        score = np.var(blur_map) / np.sqrt(image.shape[0] * image.shape[1])
        return score
Exemple #22
0
    def post_edit_trigger(self, frame_index, face_index):
        """ Update the jpg thumbnail, the viewport thumbnail, the landmark masks and the aligned
        face on a face edit.

        Parameters
        ----------
        frame_index: int
            The frame that the face is being set for
        face_index: int
            The face index within the frame
        """
        face = self._frame_faces[frame_index][face_index]
        face.load_aligned(None, force=True)  # Update average distance
        face.mask = self._extractor.get_masks(frame_index, face_index)

        aligned = AlignedFace(face.landmarks_xy,
                              image=self._globals.current_frame["image"],
                              centering="head",
                              size=96)
        face.thumbnail = generate_thumbnail(aligned.face, size=96)
        if self._globals.filter_mode == "Misaligned Faces":
            self._detected_faces.tk_face_count_changed.set(True)
        self._tk_edited.set(True)
Exemple #23
0
    def _obtain_mask(cls, detected_face, mask_type):
        """ Obtain the mask for the correct "face" centering that is used in the thumbnail display.

        Parameters
        -----------
        detected_face: :class:`lib.align.DetectedFace`
            The Detected Face object to obtain the mask for
        mask_type: str
            The type of mask to obtain

        Returns
        -------
        :class:`numpy.ndarray` or ``None``
            The single channel mask of requested mask type, if it exists, otherwise ``None``
        """
        mask = detected_face.mask.get(mask_type)
        if not mask:
            return None
        if mask.stored_centering != "face":
            face = AlignedFace(detected_face.landmarks_xy)
            mask.set_sub_crop(face.pose.offset["face"] - face.pose.offset[mask.stored_centering],
                              centering="face")
        return mask.mask.squeeze()
Exemple #24
0
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s)", self.__class__.__name__, arguments)
        self._args = arguments

        # load faces
        faces_alignments = AlignmentsBase(self._args.faces_align_dir)
        print()
        print(f'Faces alignments: {len(faces_alignments._data.keys())}')
        print(faces_alignments._data.keys())

        self._faces = {}
        faces_loader = ImagesLoader(self._args.faces_dir)
        for filename, image in faces_loader.load():
            face_name = os.path.basename(filename)

            faces = faces_alignments.get_faces_in_frame(face_name)
            detected_faces = list()
            for rawface in faces:
                face = DetectedFace()
                face.from_alignment(rawface, image=image)

                feed_face = AlignedFace(face.landmarks_xy,
                                        image=image,
                                        centering='face',
                                        size=image.shape[0],
                                        coverage_ratio=1.0,
                                        dtype="float32")

                detected_faces.append(feed_face)

            self._faces[face_name] = (filename, image, detected_faces)

        print('Faces:', len(self._faces))
        print(self._faces.keys())
        print()

        self._patch_threads = None
        self._images = ImagesLoader(self._args.input_dir, fast_count=True)
        self._alignments = Alignments(self._args, False, self._images.is_video)

        if self._alignments.version == 1.0:
            logger.error("The alignments file format has been updated since the given alignments "
                         "file was generated. You need to update the file to proceed.")
            logger.error("To do this run the 'Alignments Tool' > 'Extract' Job.")
            sys.exit(1)

        self._opts = OptionalActions(self._args, self._images.file_list, self._alignments)

        self._add_queues()
        self._disk_io = DiskIO(self._alignments, self._images, arguments)
        self._predictor = Predict(self._disk_io.load_queue, self._queue_size, self._faces, arguments)
        self._validate()
        get_folder(self._args.output_dir)

        configfile = self._args.configfile if hasattr(self._args, "configfile") else None
        self._converter = Converter(self._predictor.output_size,
                                    self._predictor.coverage_ratio,
                                    self._predictor.centering,
                                    self._disk_io.draw_transparent,
                                    self._disk_io.pre_encode,
                                    arguments,
                                    configfile=configfile)

        logger.debug("Initialized %s", self.__class__.__name__)
Exemple #25
0
    def check(self, image, detected_face):
        """ Check the extracted Face

        Parameters
        ----------
        image: :class:`numpy.ndarray`
            The original frame that contains the face to be checked
        detected_face: :class:`lib.align.DetectedFace`
            The detected face object that contains the face to be checked

        Returns
        -------
        bool
            ``True`` if the face matches a filter otherwise ``False``
        """
        logger.trace("Checking face with FaceFilter")
        distances = {"filter": list(), "nfilter": list()}
        feed = AlignedFace(detected_face.landmarks_xy,
                           image=image,
                           size=224,
                           centering="legacy")
        encodings = self.vgg_face.predict(feed.face)
        for filt in self.filters.values():
            similarity = self.vgg_face.find_cosine_similiarity(
                filt["encoding"], encodings)
            distances[filt["type"]].append(similarity)

        avgs = {
            key: avg(val) if val else None
            for key, val in distances.items()
        }
        mins = {
            key: min(val) if val else None
            for key, val in distances.items()
        }
        # Filter
        if distances["filter"] and avgs["filter"] > self.threshold:
            msg = "Rejecting filter face: {} > {}".format(
                round(avgs["filter"], 2), self.threshold)
            retval = False
        # nFilter no Filter
        elif not distances["filter"] and avgs["nfilter"] < self.threshold:
            msg = "Rejecting nFilter face: {} < {}".format(
                round(avgs["nfilter"], 2), self.threshold)
            retval = False
        # Filter with nFilter
        elif distances["filter"] and distances[
                "nfilter"] and mins["filter"] > mins["nfilter"]:
            msg = (
                "Rejecting face as distance from nfilter sample is smaller: (filter: {}, "
                "nfilter: {})".format(round(mins["filter"], 2),
                                      round(mins["nfilter"], 2)))
            retval = False
        elif distances["filter"] and distances[
                "nfilter"] and avgs["filter"] > avgs["nfilter"]:
            msg = (
                "Rejecting face as average distance from nfilter sample is smaller: (filter: "
                "{}, nfilter: {})".format(round(mins["filter"], 2),
                                          round(mins["nfilter"], 2)))
            retval = False
        elif distances["filter"] and distances["nfilter"]:
            # k-nearest-neighbor classifier
            var_k = min(
                5,
                min(len(distances["filter"]), len(distances["nfilter"])) + 1)
            var_n = sum(
                list(
                    map(
                        lambda x: x[0],
                        list(
                            sorted([(1, d) for d in distances["filter"]] +
                                   [(0, d) for d in distances["nfilter"]],
                                   key=lambda x: x[1]))[:var_k])))
            ratio = var_n / var_k
            if ratio < 0.5:
                msg = (
                    "Rejecting face as k-nearest neighbors classification is less than "
                    "0.5: {}".format(round(ratio, 2)))
                retval = False
            else:
                msg = None
                retval = True
        else:
            msg = None
            retval = True
        if msg:
            logger.verbose(msg)
        else:
            logger.trace("Accepted face: (similarity: %s, threshold: %s)",
                         distances, self.threshold)
        return retval
Exemple #26
0
    def get_batch(self, queue):
        """ Get items for inputting into the masker from the queue in batches

        Items are returned from the ``queue`` in batches of
        :attr:`~plugins.extract._base.Extractor.batchsize`

        Items are received as :class:`~plugins.extract.pipeline.ExtractMedia` objects and converted
        to ``dict`` for internal processing.

        To ensure consistent batch sizes for masker the items are split into separate items for
        each :class:`~lib.align.DetectedFace` object.

        Remember to put ``'EOF'`` to the out queue after processing
        the final batch

        Outputs items in the following format. All lists are of length
        :attr:`~plugins.extract._base.Extractor.batchsize`:

        >>> {'filename': [<filenames of source frames>],
        >>>  'detected_faces': [[<lib.align.DetectedFace objects]]}

        Parameters
        ----------
        queue : queue.Queue()
            The ``queue`` that the plugin will be fed from.

        Returns
        -------
        exhausted, bool
            ``True`` if queue is exhausted, ``False`` if not
        batch, dict
            A dictionary of lists of :attr:`~plugins.extract._base.Extractor.batchsize`:
        """
        exhausted = False
        batch = dict()
        idx = 0
        while idx < self.batchsize:
            item = self._collect_item(queue)
            if item == "EOF":
                logger.trace("EOF received")
                exhausted = True
                break
            # Put frames with no faces into the out queue to keep TQDM consistent
            if not item.detected_faces:
                self._queues["out"].put(item)
                continue
            for f_idx, face in enumerate(item.detected_faces):
                feed_face = AlignedFace(face.landmarks_xy,
                                        image=item.get_image_copy(
                                            self.color_format),
                                        centering=self._storage_centering,
                                        size=self.input_size,
                                        coverage_ratio=self.coverage_ratio,
                                        dtype="float32",
                                        is_aligned=self._image_is_aligned)
                batch.setdefault("detected_faces", []).append(face)
                batch.setdefault("feed_faces", []).append(feed_face)
                batch.setdefault("filename", []).append(item.filename)
                idx += 1
                if idx == self.batchsize:
                    frame_faces = len(item.detected_faces)
                    if f_idx + 1 != frame_faces:
                        self._rollover = ExtractMedia(
                            item.filename,
                            item.image,
                            detected_faces=item.detected_faces[f_idx + 1:])
                        logger.trace(
                            "Rolled over %s faces of %s to next batch for '%s'",
                            len(self._rollover.detected_faces), frame_faces,
                            item.filename)
                    break
        if batch:
            logger.trace(
                "Returning batch: %s", {
                    k: v.shape if isinstance(v, np.ndarray) else v
                    for k, v in batch.items()
                })
        else:
            logger.trace(item)
        return exhausted, batch