コード例 #1
0
ファイル: fsmedia.py プロジェクト: longlory/faceswap
    def process(self, output_item):
        """ Detect and move blurry face """
        extractor = AlignerExtract()

        for idx, detected_face in enumerate(output_item["detected_faces"]):
            frame_name = detected_face["file_location"].parts[-1]
            face = detected_face["face"]
            logger.trace("Checking for blurriness. Frame: '%s', Face: %s",
                         frame_name, idx)
            aligned_landmarks = face.aligned_landmarks
            resized_face = face.aligned_face
            size = face.aligned["size"]
            padding = int(size * 0.1875)
            feature_mask = extractor.get_feature_mask(aligned_landmarks / size,
                                                      size, padding)
            feature_mask = cv2.blur(feature_mask, (10, 10))
            isolated_face = cv2.multiply(feature_mask,
                                         resized_face.astype(float)).astype(
                                             np.uint8)
            blurry, focus_measure = self.is_blurry(isolated_face)

            if blurry:
                blur_folder = detected_face["file_location"].parts[:-1]
                blur_folder = get_folder(Path(*blur_folder) / Path("blurry"))
                detected_face["file_location"] = blur_folder / Path(frame_name)
                logger.verbose(
                    "%s's focus measure of %s was below the blur threshold, "
                    "moving to 'blurry'", frame_name,
                    "{0:.2f}".format(focus_measure))
コード例 #2
0
    def load_feed_face(self, image, size=64, coverage_ratio=0.625, dtype=None):
        """ Align a face in the correct dimensions for feeding into a model.

        Parameters
        ----------
        image: numpy.ndarray
            The image that contains the face to be aligned
        size: int
            The size of the face in pixels to be fed into the model
        coverage_ratio: float, optional
            the ratio of the extracted image that was used for training. Default: `0.625`
        dtype: str, optional
            Optionally set a ``dtype`` for the final face to be formatted in. Default: ``None``

        Notes
        -----
        This method must be executed to get access to the following `properties`:
            - :func:`feed_face`
            - :func:`feed_interpolators`
        """
        logger.trace("Loading feed face: (size: %s, coverage_ratio: %s, dtype: %s)",
                     size, coverage_ratio, dtype)

        self.feed["size"] = size
        self.feed["padding"] = self._padding_from_coverage(size, coverage_ratio)
        self.feed["matrix"] = get_align_mat(self)

        face = AlignerExtract().transform(image, self.feed["matrix"], size, self.feed["padding"])
        face = np.clip(face[:, :, :3] / 255., 0., 1.)
        self.feed["face"] = face if dtype is None else face.astype(dtype)

        logger.trace("Loaded feed face. (face_shape: %s, matrix: %s)",
                     self.feed_face.shape, self._feed_matrix)
コード例 #3
0
ファイル: fsmedia.py プロジェクト: stonezuohui/faceswap
    def process(self, output_item):
        """ Detect and move blurry face """
        extractor = AlignerExtract()

        for idx, detected_face in enumerate(output_item["detected_faces"]):
            frame_name = detected_face["file_location"].parts[-1]
            face = detected_face["face"]
            logger.trace("Checking for blurriness. Frame: '%s', Face: %s", frame_name, idx)
            aligned_landmarks = face.aligned_landmarks
            resized_face = face.aligned_face
            size = face.aligned["size"]
            padding = int(size * 0.1875)
            feature_mask = extractor.get_feature_mask(
                aligned_landmarks / size,
                size, padding)
            feature_mask = cv2.blur(  # pylint: disable=no-member
                feature_mask, (10, 10))
            isolated_face = cv2.multiply(  # pylint: disable=no-member
                feature_mask,
                resized_face.astype(float)).astype(np.uint8)
            blurry, focus_measure = self.is_blurry(isolated_face)

            if blurry:
                blur_folder = detected_face["file_location"].parts[:-1]
                blur_folder = get_folder(Path(*blur_folder) / Path("blurry"))
                detected_face["file_location"] = blur_folder / Path(frame_name)
                logger.verbose("%s's focus measure of %s was below the blur threshold, "
                               "moving to 'blurry'", frame_name, "{0:.2f}".format(focus_measure))
コード例 #4
0
    def process(self, output_item):
        """ Detect and move blurry face """
        extractor = AlignerExtract()

        for face in output_item["detected_faces"]:
            aligned_landmarks = face.aligned_landmarks
            resized_face = face.aligned_face
            size = face.aligned["size"]
            feature_mask = extractor.get_feature_mask(
                aligned_landmarks / size,
                size, 48)
            feature_mask = cv2.blur(  # pylint: disable=no-member
                feature_mask, (10, 10))
            isolated_face = cv2.multiply(  # pylint: disable=no-member
                feature_mask,
                resized_face.astype(float)).astype(np.uint8)
            blurry, focus_measure = is_blurry(isolated_face, self.blur_thresh)

            if blurry:
                blur_folder = output_item["output_file"].parts[:-1]
                blur_folder = get_folder(Path(*blur_folder) / Path("blurry"))
                frame_name = output_item["output_file"].parts[-1]
                output_item["output_file"] = blur_folder / Path(frame_name)
                if self.verbose:
                    print("{}'s focus measure of {} was below the blur "
                          "threshold, moving to \"blurry\"".format(
                              frame_name, focus_measure))
コード例 #5
0
    def load_aligned(self, image, size=256, align_eyes=False, dtype=None):
        """ No need to load aligned information for all uses of this
            class, so only call this to load the information for easy
            reference to aligned properties for this face """
        # Don't reload an already aligned face:
        if self.aligned:
            logger.trace(
                "Skipping alignment calculation for already aligned face")
        else:
            logger.trace(
                "Loading aligned face: (size: %s, align_eyes: %s, dtype: %s)",
                size, align_eyes, dtype)
            padding = int(size * self.extract_ratio) // 2
            self.aligned["size"] = size
            self.aligned["padding"] = padding
            self.aligned["align_eyes"] = align_eyes
            self.aligned["matrix"] = get_align_mat(self, size, align_eyes)
            self.aligned["face"] = None
        if image is not None and self.aligned["face"] is None:
            logger.trace("Getting aligned face")
            face = AlignerExtract().transform(image, self.aligned["matrix"],
                                              size, padding)
            self.aligned["face"] = face if dtype is None else face.astype(
                dtype)

        logger.trace(
            "Loaded aligned face: %s",
            {key: val
             for key, val in self.aligned.items() if key != "face"})
コード例 #6
0
    def load_aligned(self, image, size=256, align_eyes=False, dtype=None):
        """ Align a face from a given image.

        Aligning a face is a relatively expensive task and is not required for all uses of
        the :class:`~lib.faces_detect.DetectedFace` object, so call this function explicitly to
        load an aligned face.

        This method plugs into :mod:`lib.aligner` to perform face alignment based on this face's
        ``landmarks_xy``. If the face has already been aligned, then this function will return
        having performed no action.

        Parameters
        ----------
        image: numpy.ndarray
            The image that contains the face to be aligned
        size: int
            The size of the output face in pixels
        align_eyes: bool, optional
            Optionally perform additional alignment to align eyes. Default: `False`
        dtype: str, optional
            Optionally set a ``dtype`` for the final face to be formatted in. Default: ``None``

        Notes
        -----
        This method must be executed to get access to the following `properties`:
            - :func:`original_roi`
            - :func:`aligned_landmarks`
            - :func:`aligned_face`
            - :func:`adjusted_interpolators`
        """
        if self.aligned:
            # Don't reload an already aligned face
            logger.trace(
                "Skipping alignment calculation for already aligned face")
        else:
            logger.trace(
                "Loading aligned face: (size: %s, align_eyes: %s, dtype: %s)",
                size, align_eyes, dtype)
            padding = int(size * self._extract_ratio) // 2
            self.aligned["size"] = size
            self.aligned["padding"] = padding
            self.aligned["align_eyes"] = align_eyes
            self.aligned["matrix"] = get_align_mat(self, size, align_eyes)
            self.aligned["face"] = None
        if image is not None and self.aligned["face"] is None:
            logger.trace("Getting aligned face")
            face = AlignerExtract().transform(image, self.aligned["matrix"],
                                              size, padding)
            self.aligned["face"] = face if dtype is None else face.astype(
                dtype)

        logger.trace(
            "Loaded aligned face: %s",
            {key: val
             for key, val in self.aligned.items() if key != "face"})
コード例 #7
0
ファイル: faces_detect.py プロジェクト: reefbreak/faceswap
    def load_feed_face(self,
                       image,
                       size=64,
                       coverage_ratio=0.625,
                       dtype=None,
                       is_aligned_face=False):
        """ Align a face in the correct dimensions for feeding into a model.

        Parameters
        ----------
        image: numpy.ndarray
            The image that contains the face to be aligned
        size: int
            The size of the face in pixels to be fed into the model
        coverage_ratio: float, optional
            the ratio of the extracted image that was used for training. Default: `0.625`
        dtype: str, optional
            Optionally set a ``dtype`` for the final face to be formatted in. Default: ``None``
        is_aligned_face: bool, optional
            Indicates that the :attr:`image` is an aligned face rather than a frame.
            Default: ``False``

        Notes
        -----
        This method must be executed to get access to the following `properties`:
            - :func:`feed_face`
            - :func:`feed_interpolators`
        """
        logger.trace(
            "Loading feed face: (size: %s, coverage_ratio: %s, dtype: %s, "
            "is_aligned_face: %s)", size, coverage_ratio, dtype,
            is_aligned_face)

        self.feed["size"] = size
        self.feed["padding"] = self._padding_from_coverage(
            size, coverage_ratio)
        self.feed["matrix"] = get_align_mat(self)
        if is_aligned_face:
            original_size = image.shape[0]
            interp = cv2.INTER_CUBIC if original_size < size else cv2.INTER_AREA
            face = cv2.resize(image, (size, size), interpolation=interp)
        else:
            face = AlignerExtract().transform(image, self.feed["matrix"], size,
                                              self.feed["padding"])
        self.feed["face"] = face if dtype is None else face.astype(dtype)

        logger.trace("Loaded feed face. (face_shape: %s, matrix: %s)",
                     self.feed_face.shape, self.feed_matrix)
コード例 #8
0
ファイル: _base.py プロジェクト: vineel123/face_swap
    def __init__(self,
                 loglevel,
                 configfile=None,
                 normalize_method=None,
                 git_model_id=None,
                 model_filename=None,
                 colorspace="BGR",
                 input_size=256):
        logger.debug(
            "Initializing %s: (loglevel: %s, configfile: %s, normalize_method: %s, "
            "git_model_id: %s, model_filename: '%s', colorspace: '%s'. input_size: %s)",
            self.__class__.__name__, loglevel, configfile, normalize_method,
            git_model_id, model_filename, colorspace, input_size)
        self.loglevel = loglevel
        self.normalize_method = normalize_method
        self.colorspace = colorspace.upper()
        self.input_size = input_size
        self.extract = Extract()
        self.init = None
        self.error = None

        # The input and output queues for the plugin.
        # See lib.queue_manager.QueueManager for getting queues
        self.queues = {"in": None, "out": None}

        #  Get model if required
        self.model_path = self.get_model(git_model_id, model_filename)

        # Approximate VRAM required for aligner. Used to calculate
        # how many parallel processes / batches can be run.
        # Be conservative to avoid OOM.
        self.vram = None
        logger.debug("Initialized %s", self.__class__.__name__)
コード例 #9
0
ファイル: faces_detect.py プロジェクト: stonezuohui/faceswap
    def load_aligned(self, image, size=256, align_eyes=False, dtype=None):
        """ No need to load aligned information for all uses of this
            class, so only call this to load the information for easy
            reference to aligned properties for this face """
        logger.trace("Loading aligned face: (size: %s, align_eyes: %s, dtype: %s)",
                     size, align_eyes, dtype)
        padding = int(size * self.extract_ratio) // 2
        self.aligned["size"] = size
        self.aligned["padding"] = padding
        self.aligned["align_eyes"] = align_eyes
        self.aligned["matrix"] = get_align_mat(self, size, align_eyes)
        if image is None:
            self.aligned["face"] = None
        else:
            face = AlignerExtract().transform(
                image,
                self.aligned["matrix"],
                size,
                padding)
            self.aligned["face"] = face if dtype is None else face.astype(dtype)

        logger.trace("Loaded aligned face: %s", {key: val
                                                 for key, val in self.aligned.items()
                                                 if key != "face"})
コード例 #10
0
ファイル: _base.py プロジェクト: yanglei50/DeepFakeTutorial
    def __init__(self, verbose=False):
        self.verbose = verbose
        self.cachepath = os.path.join(os.path.dirname(__file__), ".cache")
        self.extract = Extract()
        self.init = None

        # The input and output queues for the plugin.
        # See lib.multithreading.QueueManager for getting queues
        self.queues = {"in": None, "out": None}

        #  Path to model if required
        self.model_path = self.set_model_path()

        # Approximate VRAM required for aligner. Used to calculate
        # how many parallel processes / batches can be run.
        # Be conservative to avoid OOM.
        self.vram = None
コード例 #11
0
    def __init__(self, loglevel):
        logger.debug("Initializing %s", self.__class__.__name__)
        self.loglevel = loglevel
        self.cachepath = os.path.join(os.path.dirname(__file__), ".cache")
        self.extract = Extract()
        self.init = None

        # The input and output queues for the plugin.
        # See lib.queue_manager.QueueManager for getting queues
        self.queues = {"in": None, "out": None}

        #  Path to model if required
        self.model_path = self.set_model_path()

        # Approximate VRAM required for aligner. Used to calculate
        # how many parallel processes / batches can be run.
        # Be conservative to avoid OOM.
        self.vram = None
        logger.debug("Initialized %s", self.__class__.__name__)
コード例 #12
0
    def landmark(self, frame_index, face_index, landmark_index, shift_x, shift_y, is_zoomed):
        """ Shift a single landmark point for the :class:`~lib.faces_detect.DetectedFace` object
        at the given frame and face indices by the given x and y values.

        Parameters
        ----------
        frame_index: int
            The frame that the face is being set for
        face_index: int
            The face index within the frame
        landmark_index: int or list
            The landmark index to shift. If a list is provided, this should be a list of landmark
            indices to be shifted
        shift_x: int
            The amount to shift the landmark by along the x axis
        shift_y: int
            The amount to shift the landmark by along the y axis
        is_zoomed: bool
            ``True`` if landmarks are being adjusted on a zoomed image otherwise ``False``
        """
        face = self._faces_at_frame_index(frame_index)[face_index]
        if is_zoomed:
            if not np.any(face.aligned_landmarks):  # This will be None on a resize
                face.load_aligned(None, size=min(self._globals.frame_display_dims))
            landmark = face.aligned_landmarks[landmark_index]
            landmark += (shift_x, shift_y)
            matrix = AlignerExtract.transform_matrix(face.aligned["matrix"],
                                                     face.aligned["size"],
                                                     face.aligned["padding"])
            matrix = cv2.invertAffineTransform(matrix)
            if landmark.ndim == 1:
                landmark = np.reshape(landmark, (1, 1, 2))
                landmark = cv2.transform(landmark, matrix, landmark.shape).squeeze()
                face.landmarks_xy[landmark_index] = landmark
            else:
                for lmk, idx in zip(landmark, landmark_index):
                    lmk = np.reshape(lmk, (1, 1, 2))
                    lmk = cv2.transform(lmk, matrix, lmk.shape).squeeze()
                    face.landmarks_xy[idx] = lmk
        else:
            face.landmarks_xy[landmark_index] += (shift_x, shift_y)
        face.mask = self._extractor.get_masks(frame_index, face_index)
        self._globals.tk_update.set(True)