コード例 #1
0
ファイル: extract.py プロジェクト: suzg/faceswap-1
    def _output_processing(self, extract_media: ExtractMedia,
                           size: int) -> None:
        """ Prepare faces for output

        Loads the aligned face, generate the thumbnail, perform any processing actions and verify
        the output.

        Parameters
        ----------
        extract_media: :class:`plugins.extract.pipeline.ExtractMedia`
            Output from :class:`plugins.extract.pipeline.Extractor`
        size: int
            The size that the aligned face should be created at
        """
        for face in extract_media.detected_faces:
            face.load_aligned(extract_media.image, size=size, centering="head")
            face.thumbnail = generate_thumbnail(face.aligned.face,
                                                size=96,
                                                quality=60)
        self._post_process.do_actions(extract_media)
        extract_media.remove_image()

        faces_count = len(extract_media.detected_faces)
        if faces_count == 0:
            logger.verbose("No faces were detected in image: %s",
                           os.path.basename(extract_media.filename))

        if not self._verify_output and faces_count > 1:
            self._verify_output = True
コード例 #2
0
    def _process_legacy(self, filename, image, detected_faces):
        """ Process legacy face extractions to new extraction method.

        Updates stored masks to new extract size

        Parameters
        ----------
        filename: str
            The current frame filename
        image: :class:`numpy.ndarray`
            The current image the contains the faces
        detected_faces: list
            list of :class:`lib.align.DetectedFace` objects for the current frame
        """
        # Update landmarks based masks for face centering
        mask_item = ExtractMedia(filename,
                                 image,
                                 detected_faces=detected_faces)
        self._mask_pipeline.input_queue.put(mask_item)
        faces = next(self._mask_pipeline.detected_faces()).detected_faces

        # Pad and shift Neural Network based masks to face centering
        for face in faces:
            self._pad_legacy_masks(face)
        return faces
コード例 #3
0
    def detect_faces(self, filename, image):
        """ Extract the face from a frame (If alignments file not found) """
        self.extractor.input_queue.put(ExtractMedia(filename, image))
        faces = next(self.extractor.detected_faces())

        final_faces = [face for face in faces.detected_faces]
        return final_faces
コード例 #4
0
 def _feed_face(self):
     """ :class:`plugins.extract.pipeline.ExtractMedia`: The current face for feeding into the
     aligner, formatted for the pipeline """
     face = self._detected_faces.current_faces[self._frame_index][
         self._face_index]
     return ExtractMedia(self._globals.current_frame["filename"],
                         self._globals.current_frame["image"],
                         detected_faces=[face])
コード例 #5
0
    def _input_faces(self, *args):
        """ Input pre-aligned faces to the Extractor plugin inside a thread

        Parameters
        ----------
        args: tuple
            The arguments that are to be loaded inside this thread. Contains the queue that the
            faces should be put to
        """
        log_once = False
        logger.debug("args: %s", args)
        if self._update_type != "output":
            queue = args[0]
        for filename, image, metadata in tqdm(self._loader.load(),
                                              total=self._loader.count):
            if not metadata:  # Legacy faces. Update the headers
                if not log_once:
                    logger.warning(
                        "Legacy faces discovered. These faces will be updated")
                    log_once = True
                metadata = update_legacy_png_header(filename, self._alignments)
                if not metadata:
                    # Face not found
                    self._counts["skip"] += 1
                    logger.warning(
                        "Legacy face not found in alignments file. This face has not "
                        "been updated: '%s'", filename)
                    continue

            frame_name = metadata["source"]["source_filename"]
            face_index = metadata["source"]["face_index"]
            alignment = self._alignments.get_faces_in_frame(frame_name)
            if not alignment or face_index > len(alignment) - 1:
                self._counts["skip"] += 1
                logger.warning(
                    "Skipping Face not found in alignments file. skipping: '%s'",
                    filename)
                continue
            alignment = alignment[face_index]
            self._counts["face"] += 1

            if self._check_for_missing(frame_name, face_index, alignment):
                continue

            detected_face = self._get_detected_face(alignment)
            if self._update_type == "output":
                detected_face.image = image
                self._save(frame_name, face_index, detected_face)
            else:
                media = ExtractMedia(filename,
                                     image,
                                     detected_faces=[detected_face])
                setattr(media, "mask_tool_face_info",
                        metadata["source"])  # TODO formalize
                queue.put(media)
                self._counts["update"] += 1
        if self._update_type != "output":
            queue.put("EOF")
コード例 #6
0
    def _input_faces(self, *args):
        """ Input pre-aligned faces to the Extractor plugin inside a thread

        Parameters
        ----------
        args: tuple
            The arguments that are to be loaded inside this thread. Contains the queue that the
            faces should be put to
        """
        logger.debug("args: %s", args)
        if self._update_type != "output":
            queue = args[0]
        for filename, image, hsh in tqdm(self._loader.load(),
                                         total=self._loader.count):
            if hsh not in self._alignments.hashes_to_frame:
                self._counts["skip"] += 1
                logger.warning("Skipping face not in alignments file: '%s'",
                               filename)
                continue

            frames = self._alignments.hashes_to_frame[hsh]
            if len(frames) > 1:
                # Filter the output by filename in case of multiple frames with the same face
                logger.debug(
                    "Filtering multiple hashes to current filename: (filename: '%s', "
                    "frames: %s", filename, frames)
                lookup = os.path.splitext(os.path.basename(filename))[0]
                frames = {
                    k: v
                    for k, v in frames.items()
                    if lookup.startswith(os.path.splitext(k)[0])
                }
                logger.debug("Filtered: (filename: '%s', frame: '%s')",
                             filename, frames)

            for frame, idx in frames.items():
                self._counts["face"] += 1
                alignment = self._alignments.get_faces_in_frame(frame)[idx]
                if self._check_for_missing(frame, idx, alignment):
                    continue
                detected_face = self._get_detected_face(alignment)
                if self._update_type == "output":
                    detected_face.image = image
                    self._save(frame, idx, detected_face)
                else:
                    queue.put(
                        ExtractMedia(filename,
                                     image,
                                     detected_faces=[detected_face]))
                    self._counts["update"] += 1
        if self._update_type != "output":
            queue.put("EOF")
コード例 #7
0
ファイル: face_filter.py プロジェクト: endcrypt/Deepvid
 def queue_images(self, extractor):
     """ queue images for detection and alignment """
     in_queue = extractor.input_queue
     for fname, img in self.filters.items():
         logger.debug("Adding to filter queue: '%s' (%s)", fname,
                      img["type"])
         feed_dict = ExtractMedia(fname,
                                  img["image"],
                                  detected_faces=img.get("detected_faces"))
         logger.debug("Queueing filename: '%s' items: %s", fname, feed_dict)
         in_queue.put(feed_dict)
     logger.debug("Sending EOF to filter queue")
     in_queue.put("EOF")
コード例 #8
0
    def _input_frames(self, *args):
        """ Input frames to the Extractor plugin inside a thread

        Parameters
        ----------
        args: tuple
            The arguments that are to be loaded inside this thread. Contains the queue that the
            faces should be put to
        """
        logger.debug("args: %s", args)
        if self._update_type != "output":
            queue = args[0]
        for filename, image in tqdm(self._loader.load(),
                                    total=self._loader.count):
            frame = os.path.basename(filename)
            if not self._alignments.frame_exists(frame):
                self._counts["skip"] += 1
                logger.warning("Skipping frame not in alignments file: '%s'",
                               frame)
                continue
            if not self._alignments.frame_has_faces(frame):
                logger.debug("Skipping frame with no faces: '%s'", frame)
                continue

            faces_in_frame = self._alignments.get_faces_in_frame(frame)
            self._counts["face"] += len(faces_in_frame)

            # To keep face indexes correct/cover off where only one face in an image is missing a
            # mask where there are multiple faces we process all faces again for any frames which
            # have missing masks.
            if all(
                    self._check_for_missing(frame, idx, alignment)
                    for idx, alignment in enumerate(faces_in_frame)):
                continue

            detected_faces = [
                self._get_detected_face(alignment)
                for alignment in faces_in_frame
            ]
            if self._update_type == "output":
                for idx, detected_face in enumerate(detected_faces):
                    detected_face.image = image
                    self._save(frame, idx, detected_face)
            else:
                self._counts["update"] += len(detected_faces)
                queue.put(
                    ExtractMedia(filename,
                                 image,
                                 detected_faces=detected_faces))
        if self._update_type != "output":
            queue.put("EOF")
コード例 #9
0
 def _load(self):
     """ Load the images
     Loads images from :class:`lib.image.ImagesLoader`, formats them into a dict compatible
     with :class:`plugins.extract.Pipeline.Extractor` and passes them into the extraction queue.
     """
     logger.debug("Load Images: Start")
     load_queue = self._extractor.input_queue
     for filename, image in self._images.load():
         if load_queue.shutdown.is_set():
             logger.debug("Load Queue: Stop signal received. Terminating")
             break
         item = ExtractMedia(filename, image[..., :3])
         load_queue.put(item)
     load_queue.put("EOF")
     logger.debug("Load Images: Complete")
コード例 #10
0
    def _input_frames(self, *args):
        """ Input frames to the Extractor plugin inside a thread

        Parameters
        ----------
        args: tuple
            The arguments that are to be loaded inside this thread. Contains the queue that the
            faces should be put to
        """
        logger.debug("args: %s", args)
        if self._update_type != "output":
            queue = args[0]
        for filename, image in tqdm(self._loader.load(),
                                    total=self._loader.count):
            frame = os.path.basename(filename)
            if not self._alignments.frame_exists(frame):
                self._skip_count += 1
                logger.warning("Skipping frame not in alignments file: '%s'",
                               frame)
                continue
            if not self._alignments.frame_has_faces(frame):
                logger.debug("Skipping frame with no faces: '%s'", frame)
                continue
            detected_faces = []
            for idx, alignment in enumerate(
                    self._alignments.get_faces_in_frame(frame)):
                self._face_count += 1
                if self._check_for_missing(frame, idx, alignment):
                    continue
                detected_face = self._get_detected_face(alignment)
                if self._update_type == "output":
                    detected_face.image = image
                    self._save(frame, idx, detected_face)
                else:
                    detected_faces.append(detected_face)
                    self._update_count += 1
            if self._update_type != "output":
                queue.put(
                    ExtractMedia(filename,
                                 image,
                                 detected_faces=[detected_face]))
        if self._update_type != "output":
            queue.put("EOF")
コード例 #11
0
ファイル: convert.py プロジェクト: s884812/faceswap
    def _detect_faces(self, filename, image):
        """ Extract the face from a frame for On-The-Fly conversion.

        Pulls detected faces out of the Extraction pipeline.

        Parameters
        ----------
        filename: str
            The filename to return the detected faces for
        image: :class:`numpy.ndarray`
            The frame that the detected faces exist in

        Returns
        -------
        list
            List of :class:`lib.align.DetectedFace` objects
         """
        self._extractor.input_queue.put(ExtractMedia(filename, image))
        faces = next(self._extractor.detected_faces())
        return faces.detected_faces
コード例 #12
0
ファイル: jobs_manual.py プロジェクト: zjuzhq/faceswap
    def update_landmarks(self):
        """ Update the landmarks """
        feed = ExtractMedia(self.media["frame_id"],
                            self.media["image"],
                            detected_faces=[self.media["bounding_box"]])
        self.queues["in"].put(feed)
        detected_face = next(self.extractor.detected_faces()).detected_faces[0]
        alignment = detected_face.to_alignment()
        # Mask will now be incorrect for updated landmarks so delete
        alignment["mask"] = dict()

        frame = self.media["frame_id"]

        if self.interface.get_selected_face_id() is None:
            idx = self.alignments.add_face(frame, alignment)
            self.interface.set_state_value("edit", "selected", idx)
        else:
            self.alignments.update_face(frame,
                                        self.interface.get_selected_face_id(),
                                        alignment)
            self.interface.set_redraw(True)

        self.interface.state["edit"]["updated"] = True
        self.interface.state["edit"]["update_faces"] = True
コード例 #13
0
ファイル: sort.py プロジェクト: xzmagic/faceswap
 def alignment_dict(filename, image):
     """ Set the image to an ExtractMedia object for alignment """
     height, width = image.shape[:2]
     face = DetectedFace(x=0, w=width, y=0, h=height)
     return ExtractMedia(filename, image, detected_faces=[face])
コード例 #14
0
    def _input_faces(self, *args):
        """ Input pre-aligned faces to the Extractor plugin inside a thread

        Parameters
        ----------
        args: tuple
            The arguments that are to be loaded inside this thread. Contains the queue that the
            faces should be put to
        """
        log_once = False
        logger.debug("args: %s", args)
        if self._update_type != "output":
            queue = args[0]
        for filename, image, metadata in tqdm(self._loader.load(),
                                              total=self._loader.count):
            if not metadata:  # Legacy faces. Update the headers
                if not log_once:
                    logger.warning(
                        "Legacy faces discovered. These faces will be updated")
                    log_once = True
                metadata = update_legacy_png_header(filename, self._alignments)
                if not metadata:  # Face not found
                    self._counts["skip"] += 1
                    logger.warning(
                        "Legacy face not found in alignments file. This face has not "
                        "been updated: '%s'", filename)
                    continue
            if "source_frame_dims" not in metadata["source"]:
                logger.error(
                    "The faces need to be re-extracted as at least some of them do not "
                    "contain information required to correctly generate masks."
                )
                logger.error(
                    "You can re-extract the face-set by using the Alignments Tool's "
                    "Extract job.")
                break
            frame_name = metadata["source"]["source_filename"]
            face_index = metadata["source"]["face_index"]
            alignment = self._alignments.get_faces_in_frame(frame_name)
            if not alignment or face_index > len(alignment) - 1:
                self._counts["skip"] += 1
                logger.warning(
                    "Skipping Face not found in alignments file: '%s'",
                    filename)
                continue
            alignment = alignment[face_index]
            self._counts["face"] += 1

            if self._check_for_missing(frame_name, face_index, alignment):
                continue

            detected_face = self._get_detected_face(alignment)
            if self._update_type == "output":
                detected_face.image = image
                self._save(frame_name, face_index, detected_face)
            else:
                media = ExtractMedia(filename,
                                     image,
                                     detected_faces=[detected_face])
                # Hacky overload of ExtractMedia's shape parameter to apply the actual original
                # frame dimension
                media._image_shape = (*metadata["source"]["source_frame_dims"],
                                      3)
                setattr(media, "mask_tool_face_info",
                        metadata["source"])  # TODO formalize
                queue.put(media)
                self._counts["update"] += 1
        if self._update_type != "output":
            queue.put("EOF")