コード例 #1
0
    def load(self, filename):
        """ Load data from an existing serialized file

        Parameters
        ----------
        filename: str
            The path to the serialized file

        Returns
        ----------
        data: varies
            The data in a python object format

        Example
        ------
        >>> serializer = get_serializer('json')
        >>> json_file = '/path/to/json/file.json'
        >>> data = serializer.load(json_file)
        """
        logger.debug("filename: %s", filename)
        try:
            with open(filename, self._read_option) as s_file:
                data = s_file.read()
                logger.debug("stored data type: %s", type(data))
                retval = self.unmarshal(data)

        except IOError as err:
            msg = f"Error reading from '{filename}': {err.strerror}"
            raise FaceswapError(msg) from err
        logger.debug("data type: %s", type(retval))
        return retval
コード例 #2
0
    def save(self, filename, data):
        """ Serialize data and save to a file

        Parameters
        ----------
        filename: str
            The path to where the serialized file should be saved
        data: varies
            The data that is to be serialized to file

        Example
        ------
        >>> serializer = get_serializer('json')
        >>> data ['foo', 'bar']
        >>> json_file = '/path/to/json/file.json'
        >>> serializer.save(json_file, data)
        """
        logger.debug("filename: %s, data type: %s", filename, type(data))
        filename = self._check_extension(filename)
        try:
            with open(filename, self._write_option) as s_file:
                s_file.write(self.marshal(data))
        except IOError as err:
            msg = f"Error writing to '{filename}': {err.strerror}"
            raise FaceswapError(msg) from err
コード例 #3
0
    def random_warp(self, image):
        """ get pair of random warped images from aligned face image """
        logger.trace("Randomly warping image")
        height, width = image.shape[0:2]
        coverage = self.get_coverage(image)
        try:
            assert height == width and height % 2 == 0
        except AssertionError as err:
            msg = (
                "Training images should be square with an even number of pixels across each "
                "side. An image was found with width: {}, height: {}."
                "\nMost likely this is a frame rather than a face within your training set. "
                "\nMake sure that the only images within your training set are faces generated "
                "from the Extract process.".format(width, height))
            raise FaceswapError(msg) from err

        range_ = np.linspace(height // 2 - coverage // 2,
                             height // 2 + coverage // 2,
                             5,
                             dtype='float32')
        mapx = np.broadcast_to(range_, (5, 5)).copy()
        mapy = mapx.T
        # mapx, mapy = np.float32(np.meshgrid(range_,range_)) # instead of broadcast

        pad = int(1.25 * self.input_size)
        slices = slice(pad // 10, -pad // 10)
        dst_slices = [
            slice(0, (size + 1), (size // 4)) for size in self.output_sizes
        ]
        interp = np.empty((2, self.input_size, self.input_size),
                          dtype='float32')

        for i, map_ in enumerate([mapx, mapy]):
            map_ = map_ + np.random.normal(size=(5, 5), scale=self.scale)
            interp[i] = cv2.resize(map_, (pad, pad))[slices, slices]  # pylint:disable=no-member

        warped_image = cv2.remap(  # pylint:disable=no-member
            image, interp[0], interp[1], cv2.INTER_LINEAR)  # pylint:disable=no-member
        logger.trace("Warped image shape: %s", warped_image.shape)

        src_points = np.stack([mapx.ravel(), mapy.ravel()], axis=-1)
        dst_points = [
            np.mgrid[dst_slice, dst_slice] for dst_slice in dst_slices
        ]
        mats = [
            umeyama(src_points, True, dst_pts.T.reshape(-1, 2))[0:2]
            for dst_pts in dst_points
        ]

        target_images = [
            cv2.warpAffine(
                image,  # pylint:disable=no-member
                mat,
                (self.output_sizes[idx], self.output_sizes[idx]))
            for idx, mat in enumerate(mats)
        ]

        logger.trace("Target image shapes: %s",
                     [tgt.shape for tgt in target_images])
        return self.compile_images(warped_image, target_images)
コード例 #4
0
    def marshal(self, data):
        """ Serialize an object

        Parameters
        ----------
        data: varies
            The data that is to be serialized

        Returns
        -------
        data: varies
            The data in a the serialized data format

        Example
        ------
        >>> serializer = get_serializer('json')
        >>> data ['foo', 'bar']
        >>> json_data = serializer.marshal(data)
        """
        logger.debug("data type: %s", type(data))
        try:
            retval = self._marshal(data)
        except Exception as err:
            msg = f"Error serializing data for type {type(data)}: {str(err)}"
            raise FaceswapError(msg) from err
        logger.debug("returned data type: %s", type(retval))
        return retval
コード例 #5
0
ファイル: sort.py プロジェクト: nonomal/faceswap
    def sort_distance(self):
        """ Sort by comparison of face landmark points to mean face by average distance of core
        landmarks. """
        logger.info("Sorting by average distance of landmarks...")
        filenames = []
        distances = []
        filelist = [os.path.join(self._loader.location, fname)
                    for fname in os.listdir(self._loader.location)
                    if os.path.splitext(fname)[-1] == ".png"]
        for filename, metadata in tqdm(read_image_meta_batch(filelist),
                                       total=len(filelist),
                                       desc="Calculating Distances"):
            if not metadata:
                msg = ("The images to be sorted do not contain alignment data. Images must have "
                       "been generated by Faceswap's Extract process.\nIf you are sorting an "
                       "older faceset, then you should re-extract the faces from your source "
                       "alignments file to generate this data.")
                raise FaceswapError(msg)
            alignments = metadata["itxt"]["alignments"]
            aligned_face = AlignedFace(np.array(alignments["landmarks_xy"], dtype="float32"))
            filenames.append(filename)
            distances.append(aligned_face.average_distance)

        logger.info("Sorting...")
        matched_list = list(zip(filenames, distances))
        img_list = sorted(matched_list, key=operator.itemgetter(1))
        return img_list
コード例 #6
0
    def unmarshal(self, serialized_data):
        """ Unserialize data to its original object type

        Parameters
        ----------
        serialized_data: varies
            Data in serializer format that is to be unmarshalled to its original object

        Returns
        -------
        data: varies
            The data in a python object format

        Example
        ------
        >>> serializer = get_serializer('json')
        >>> json_data = <json object>
        >>> data = serializer.unmarshal(json_data)
        """
        logger.debug("data type: %s", type(serialized_data))
        try:
            retval = self._unmarshal(serialized_data)
        except Exception as err:
            msg = f"Error unserializing data for type {type(serialized_data)}: {str(err)}"
            raise FaceswapError(msg) from err
        logger.debug("returned data type: %s", type(retval))
        return retval
コード例 #7
0
ファイル: session.py プロジェクト: hermioneDeep/dftp
    def _set_session(self, allow_growth):
        """ Sets the session and graph.

        If the backend is AMD then this does nothing and the global ``Keras`` ``Session``
        is used
        """
        if get_backend() == "amd":
            return None

        self.graph = tf.Graph()
        config = tf.ConfigProto()
        if allow_growth and get_backend() == "nvidia":
            config.gpu_options.allow_growth = True
        try:
            session = tf.Session(graph=tf.Graph(), config=config)
        except tf_error.InternalError as err:
            if "driver version is insufficient" in str(err):
                msg = (
                    "Your Nvidia Graphics Driver is insufficient for running Faceswap. "
                    "Please upgrade to the latest version.")
                raise FaceswapError(msg) from err
            raise err
        logger.debug(
            "Created tf.session: (graph: %s, session: %s, config: %s)",
            session.graph, session, config)
        return session
コード例 #8
0
ファイル: generator.py プロジェクト: zueskalare/faceswap
    def _validate_version(self, png_meta, filename):
        """ Validate that there are not a mix of v1.0 extracted faces and v2.x faces.

        Parameters
        ----------
        png_meta: dict
            The information held within the Faceswap PNG Header
        filename: str
            The full path to the file being validated

        Raises
        ------
        FaceswapError
            If a version 1.0 face appears in a 2.x set or vice versa
        """
        alignment_version = png_meta["source"]["alignments_version"]

        if not self._extract_version:
            logger.debug("Setting initial extract version: %s",
                         alignment_version)
            self._extract_version = alignment_version
            if alignment_version == 1.0 and self._centering != "legacy":
                self._reset_cache(True)
            return

        if (self._extract_version == 1.0 and alignment_version > 1.0) or (
                alignment_version == 1.0 and self._extract_version > 1.0):
            raise FaceswapError(
                "Mixing legacy and full head extracted facesets is not supported. "
                "The following folder contains a mix of extracted face types: "
                "{}".format(os.path.dirname(filename)))

        self._extract_version = min(alignment_version, self._extract_version)
コード例 #9
0
    def _test_tkinter():
        """ If the user is running the GUI, test whether the tkinter app is available on their
        machine. If not exit gracefully.

        This avoids having to import every tkinter function within the GUI in a wrapper and
        potentially spamming traceback errors to console.

        Raises
        ------
        FaceswapError
            If tkinter cannot be imported
        """
        try:
            import tkinter  # noqa pylint: disable=unused-import,import-outside-toplevel
        except ImportError as err:
            logger.error(
                "It looks like TkInter isn't installed for your OS, so the GUI has been "
                "disabled. To enable the GUI please install the TkInter application. You "
                "can try:")
            logger.info("Anaconda: conda install tk")
            logger.info(
                "Windows/macOS: Install ActiveTcl Community Edition from "
                "http://www.activestate.com")
            logger.info("Ubuntu/Mint/Debian: sudo apt install python3-tk")
            logger.info("Arch: sudo pacman -S tk")
            logger.info("CentOS/Redhat: sudo yum install tkinter")
            logger.info("Fedora: sudo dnf install python3-tkinter")
            raise FaceswapError("TkInter not found") from err
コード例 #10
0
    def _collate_and_store_loss(self, loss):
        """ Collate the loss into totals for each side.

        The losses are summed into a total for each side. Loss totals are added to
        :attr:`model.state._history` to track the loss drop per save iteration for backup purposes.

        If NaN protection is enabled, Checks for NaNs and raises an error if detected.

        Parameters
        ----------
        loss: list
            The list of loss ``floats`` for this iteration.

        Returns
        -------
        list
            List of 2 ``floats`` which is the total loss for each side

        Raises
        ------
        FaceswapError
            If a NaN is detected, a :class:`FaceswapError` will be raised
        """
        # NaN protection
        if self._config["nan_protection"] and not all(np.isfinite(val) for val in loss):
            logger.critical("NaN Detected. Loss: %s", loss)
            raise FaceswapError("A NaN was detected and you have NaN protection enabled. Training "
                                "has been terminated.")

        split = len(loss) // 2
        combined_loss = [sum(loss[:split]), sum(loss[split:])]
        self._model.add_history(combined_loss)
        logger.trace("original loss: %s, comibed_loss: %s", loss, combined_loss)
        return combined_loss
コード例 #11
0
ファイル: generator.py プロジェクト: zueskalare/faceswap
    def pre_fill(self, filenames, side):
        """ When warp to landmarks is enabled, the cache must be pre-filled, as each side needs
        access to the other side's alignments.

        Parameters
        ----------
        filenames: list
            The list of full paths to the images to load the metadata from
        side: str
            `"a"` or `"b"`. The side of the model being cached. Used for info output
        """
        with self._lock:
            for filename, meta in tqdm(
                    read_image_meta_batch(filenames),
                    desc="WTL: Caching Landmarks ({})".format(side.upper()),
                    total=len(filenames),
                    leave=False):
                if "itxt" not in meta or "alignments" not in meta["itxt"]:
                    raise FaceswapError(
                        f"Invalid face image found. Aborting: '{filename}'")

                size = meta["width"]
                meta = meta["itxt"]
                # Version Check
                self._validate_version(meta, filename)
                detected_face = self._add_aligned_face(filename,
                                                       meta["alignments"],
                                                       size)
                self._cache[os.path.basename(
                    filename)]["detected_face"] = detected_face
            self._partial_load = True
コード例 #12
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.input_shape = (128, 128, 3)

        self.features = dict(lowmem=0, fair=1, best=2)[self.config["features"]]
        self.encoder_filters = 64 if self.features > 0 else 48

        bonum_fortunam = 128
        self.encoder_dim = {
            0: 512 + bonum_fortunam,
            1: 1024 + bonum_fortunam,
            2: 1536 + bonum_fortunam
        }[self.features]
        self.details = dict(fast=0, good=1)[self.config["details"]]
        try:
            self.upscale_ratio = {
                128: 2,
                256: 4,
                384: 6
            }[self.config["output_size"]]
        except KeyError:
            logger.error(
                "Config error: output_size must be one of: 128, 256, or 384.")
            raise FaceswapError(
                "Config error: output_size must be one of: 128, 256, or 384.")

        logger.debug(
            "output_size: %s, features: %s, encoder_filters: %s, encoder_dim: %s, "
            " details: %s, upscale_ratio: %s", self.config["output_size"],
            self.features, self.encoder_filters, self.encoder_dim,
            self.details, self.upscale_ratio)
コード例 #13
0
ファイル: losses_plaid.py プロジェクト: littlekign/faceswap
    def __call__(self, y_true, y_pred):
        """ Return the Gradient Magnitude Similarity Deviation Loss.

        Parameters
        ----------
        y_true: tensor or variable
            The ground truth value
        y_pred: tensor or variable
            The predicted value

        Returns
        -------
        tensor
            The loss value
        """
        raise FaceswapError(
            "GMSD Loss is not currently compatible with PlaidML. Please select a "
            "different Loss method.")

        true_edge = self._scharr_edges(y_true, True)
        pred_edge = self._scharr_edges(y_pred, True)
        ephsilon = 0.0025
        upper = 2.0 * true_edge * pred_edge
        lower = K.square(true_edge) + K.square(pred_edge)
        gms = (upper + ephsilon) / (lower + ephsilon)
        gmsd = K.std(gms, axis=(1, 2, 3), keepdims=True)
        gmsd = K.squeeze(gmsd, axis=-1)
        return gmsd
コード例 #14
0
ファイル: _base.py プロジェクト: zx110101/faceswap
    def initialize(self, *args, **kwargs):
        """ Inititalize the extractor plugin

            Should be called from :mod:`~plugins.extract.pipeline`
        """
        logger.debug("initialize %s: (args: %s, kwargs: %s)",
                     self.__class__.__name__, args, kwargs)
        p_type = "Detector" if self._plugin_type == "detect" else "Aligner"
        logger.info("Initializing %s %s...", self.name, p_type)
        self.queue_size = 1
        self._add_queues(kwargs["in_queue"], kwargs["out_queue"],
                         ["predict", "post"])
        self._compile_threads()
        try:
            self.init_model()
        except tf_errors.UnknownError as err:
            if "failed to get convolution algorithm" in str(err).lower():
                msg = (
                    "Tensorflow raised an unknown error. This is most likely caused by a "
                    "failure to launch cuDNN which can occur for some GPU/Tensorflow "
                    "combinations. You should enable `allow_growth` to attempt to resolve this "
                    "issue:"
                    "\nGUI: Go to Settings > Extract Plugins > Global and enable the "
                    "`allow_growth` option."
                    "\nCLI: Go to `faceswap/config/extract.ini` and change the `allow_growth "
                    "option to `True`.")
                raise FaceswapError(msg) from err
            raise err
        logger.info("Initialized %s %s with batchsize of %s", self.name,
                    p_type, self.batchsize)
コード例 #15
0
ファイル: convert.py プロジェクト: CarlosGitHub2020/face
    def _get_face_hashes(self):
        """ Check for the existence of an aligned directory for identifying which faces in the
        target frames should be swapped.

        Returns
        -------
        list
            A list of face hashes that exist in the given input aligned directory.
        """
        face_hashes = list()
        input_aligned_dir = self._args.input_aligned_dir

        if input_aligned_dir is None:
            logger.verbose(
                "Aligned directory not specified. All faces listed in the "
                "alignments file will be converted")
        elif not os.path.isdir(input_aligned_dir):
            logger.warning(
                "Aligned directory not found. All faces listed in the "
                "alignments file will be converted")
        else:
            file_list = get_image_paths(input_aligned_dir)
            logger.info("Getting Face Hashes for selected Aligned Images")
            for face in tqdm(file_list, desc="Hashing Faces"):
                face_hashes.append(read_image_hash(face))
            logger.debug("Face Hashes: %s", (len(face_hashes)))
            if not face_hashes:
                raise FaceswapError(
                    "Aligned directory is empty, no faces will be converted!")
            if len(face_hashes) <= len(self._input_images) / 3:
                logger.warning(
                    "Aligned directory contains far fewer images than the input "
                    "directory, are you sure this is the right folder?")
        return face_hashes
コード例 #16
0
 def get_location(self, folder, filename):
     """ Return the path to alignments file """
     logger.debug("Getting location: (folder: '%s', filename: '%s')",
                  folder, filename)
     extension = os.path.splitext(filename)[1]
     if extension in (".json", ".p", ".pickle", ".yaml", ".yml"):
         # Reformat legacy alignments file
         filename = self.update_file_format(folder, filename)
         logger.debug("Updated legacy alignments. New filename: '%s'",
                      filename)
     elif not extension:
         filename = "{}.{}".format(filename, self.serializer.file_extension)
         logger.debug("File extension set from serializer: '%s'",
                      self.serializer.file_extension)
     elif extension != ".fsa":
         raise FaceswapError(
             "{} is not a valid alignments file".format(filename))
     location = os.path.join(str(folder), filename)
     if not os.path.exists(location):
         # Test for old format alignments files and reformat if they exist
         # This will be executed if an alignments file has not been explicitly provided
         # therefore it will not have been picked up in the extension test
         self.test_for_legacy(location)
     logger.verbose("Alignments filepath: '%s'", location)
     return location
コード例 #17
0
    def _check_weights_file(cls, weights_file: str) -> Optional[str]:
        """ Validate that we have a valid path to a .h5 file.

        Parameters
        ----------
        weights_file: str
            The full path to a weights file

        Returns
        -------
        str
            The full path to a weights file
        """
        if not weights_file:
            logger.debug("No weights file selected.")
            return None

        msg = ""
        if not os.path.exists(weights_file):
            msg = f"Load weights selected, but the path '{weights_file}' does not exist."
        elif not os.path.splitext(weights_file)[-1].lower() == ".h5":
            msg = (f"Load weights selected, but the path '{weights_file}' is not a valid Keras "
                   f"model (.h5) file.")

        if msg:
            msg += " Please check and try again."
            raise FaceswapError(msg)

        logger.verbose("Using weights file: %s", weights_file)  # type:ignore
        return weights_file
コード例 #18
0
ファイル: preview.py プロジェクト: zhuyin521/faceswap
    def get_filelist(self):
        """ Return a list of files, filtering out those frames which do not contain faces """
        logger.debug("Filtering file list to frames with faces")
        if self.images.is_video:
            filelist = [
                "{}_{:06d}.png".format(
                    os.path.splitext(self.images.input_images)[0], frame_no)
                for frame_no in range(1, self.images.images_found + 1)
            ]
        else:
            filelist = self.images.input_images

        retval = [
            filename for filename in filelist
            if self.alignments.frame_has_faces(os.path.basename(filename))
        ]
        logger.debug("Filtered out frames: %s",
                     self.images.images_found - len(retval))
        try:
            assert retval
        except AssertionError as err:
            msg = (
                "No faces were found in any of the frames passed in. Make sure you are passing "
                "in a frames source rather than extracted faces, and that you have provided "
                "the correct alignments file.")
            raise FaceswapError(msg) from err
        return retval
コード例 #19
0
    def _get_landmarks(self, filenames, side):
        """ Obtains the 68 Point Landmarks for the images in this batch. This is only called if
        config :attr:`_warp_to_landmarks` is ``True``. If the landmarks for an image cannot be
        found, then an error is raised. """
        logger.trace("Retrieving landmarks: (filenames: %s, side: '%s')",
                     filenames, side)
        src_points = [
            self._landmarks[side].get(filename, None) for filename in filenames
        ]
        # Raise error on missing alignments
        if not all(isinstance(pts, np.ndarray) for pts in src_points):
            missing = [
                filenames[idx] for idx, pts in enumerate(src_points)
                if pts is None
            ]
            msg = (
                "Files missing alignments for this batch: {}"
                "\nAt least one of your images does not have a matching entry in your "
                "alignments file."
                "\nIf you are using 'warp to landmarks' then every "
                "face you intend to train on must exist within the alignments file."
                "\nThe specific files that caused this failure are listed above."
                "\nMost likely there will be more than just these files missing from the "
                "alignments file. You can use the Alignments Tool to help identify missing "
                "alignments".format(missing))
            raise FaceswapError(msg)

        logger.trace("Returning: (src_points: %s)",
                     [str(src) for src in src_points])
        return np.array(src_points)
コード例 #20
0
    def sort_size(self):
        """ Sort the faces by largest face (in original frame) to smallest """
        logger.info("Sorting by original face size...")
        img_list = []
        for filename, image, metadata in tqdm(self._loader.load(),
                                              desc="Calculating face sizes",
                                              total=self._loader.count,
                                              leave=False):
            if not metadata:
                msg = (
                    "The images to be sorted do not contain alignment data. Images must have "
                    "been generated by Faceswap's Extract process.\nIf you are sorting an "
                    "older faceset, then you should re-extract the faces from your source "
                    "alignments file to generate this data.")
                raise FaceswapError(msg)
            alignments = metadata["alignments"]
            aligned_face = AlignedFace(np.array(alignments["landmarks_xy"],
                                                dtype="float32"),
                                       image=image,
                                       centering="legacy",
                                       is_aligned=True)
            roi = aligned_face.original_roi
            size = ((roi[1][0] - roi[0][0])**2 +
                    (roi[1][1] - roi[0][1])**2)**0.5
            img_list.append((filename, size))

        logger.info("Sorting...")
        return sorted(img_list, key=lambda x: x[1], reverse=True)
コード例 #21
0
    def _check_location_exists(self):
        """ Check whether the output location exists and is a folder

        Raises
        ------
        FaceswapError
            If the given location does not exist or the location is not a folder
        """
        if not isinstance(self.location, str):
            raise FaceswapError("The output location must be a string not a "
                                "{}".format(type(self.location)))
        super()._check_location_exists()
        if not os.path.isdir(self.location):
            raise FaceswapError(
                "The output location '{}' is not a folder".format(
                    self.location))
コード例 #22
0
    def sort_face_yaw(self):
        """ Sort by estimated face yaw angle """
        logger.info("Sorting by estimated face yaw angle..")
        filenames = []
        yaws = []
        for filename, image, metadata in tqdm(self._loader.load(),
                                              desc="Classifying Faces",
                                              total=self._loader.count,
                                              leave=False):
            if not metadata:
                msg = (
                    "The images to be sorted do not contain alignment data. Images must have "
                    "been generated by Faceswap's Extract process.\nIf you are sorting an "
                    "older faceset, then you should re-extract the faces from your source "
                    "alignments file to generate this data.")
                raise FaceswapError(msg)
            alignments = metadata["alignments"]
            aligned_face = AlignedFace(np.array(alignments["landmarks_xy"],
                                                dtype="float32"),
                                       image=image,
                                       centering="legacy",
                                       is_aligned=True)
            filenames.append(filename)
            yaws.append(aligned_face.pose.yaw)

        logger.info("Sorting...")
        matched_list = list(zip(filenames, yaws))
        img_list = sorted(matched_list,
                          key=operator.itemgetter(1),
                          reverse=True)
        return img_list
コード例 #23
0
    def sort_face(self):
        """ Sort by identity similarity """
        logger.info("Sorting by identity similarity...")
        filenames = []
        preds = []
        for filename, image, metadata in tqdm(self._loader.load(),
                                              desc="Classifying Faces",
                                              total=self._loader.count,
                                              leave=False):
            if not metadata:
                msg = (
                    "The images to be sorted do not contain alignment data. Images must have "
                    "been generated by Faceswap's Extract process.\nIf you are sorting an "
                    "older faceset, then you should re-extract the faces from your source "
                    "alignments file to generate this data.")
                raise FaceswapError(msg)
            alignments = metadata["alignments"]
            face = AlignedFace(np.array(alignments["landmarks_xy"],
                                        dtype="float32"),
                               image=image,
                               centering="legacy",
                               size=self._vgg_face.input_size,
                               is_aligned=True).face
            filenames.append(filename)
            preds.append(self._vgg_face.predict(face))

        logger.info("Sorting by ward linkage...")

        indices = self._vgg_face.sorted_similarity(np.array(preds),
                                                   method="ward")
        img_list = np.array(filenames)[indices]
        return img_list
コード例 #24
0
    def _scharr_edges(
            cls, image: plaidml.tile.Value, magnitude: bool,
            image_shape: Tuple[None, int, int, int]) -> plaidml.tile.Value:
        """ Returns a tensor holding modified Scharr edge maps.

        Parameters
        ----------
        image: :class:`plaidml.tile.Value`
            Image tensor with shape [batch_size, h, w, d] and type float32. The image(s) must be
            2x2 or larger.
        magnitude: bool
            Boolean to determine if the edge magnitude or edge direction is returned
        image_shape: tuple
            The shape of the incoming image

        Returns
        -------
        :class:`plaidml.tile.Value`
            Tensor holding edge maps for each channel. Returns a tensor with shape `[batch_size, h,
            w, d, 2]` where the last two dimensions hold `[[dy[0], dx[0]], [dy[1], dx[1]], ...,
            [dy[d-1], dx[d-1]]]` calculated using the Scharr filter.
        """
        # Define vertical and horizontal Scharr filters.
        # 5x5 modified Scharr kernel ( reshape to (5,5,1,2) )
        matrix = np.array([[[[0.00070, 0.00070]], [[0.00520, 0.00370]],
                            [[0.03700, 0.00000]], [[0.00520, -0.0037]],
                            [[0.00070, -0.0007]]],
                           [[[0.00370, 0.00520]], [[0.11870, 0.11870]],
                            [[0.25890, 0.00000]], [[0.11870, -0.1187]],
                            [[0.00370, -0.0052]]],
                           [[[0.00000, 0.03700]], [[0.00000, 0.25890]],
                            [[0.00000, 0.00000]], [[0.00000, -0.2589]],
                            [[0.00000, -0.0370]]],
                           [[[-0.0037, 0.00520]], [[-0.1187, 0.11870]],
                            [[-0.2589, 0.00000]], [[-0.1187, -0.1187]],
                            [[-0.0037, -0.0052]]],
                           [[[-0.0007, 0.00070]], [[-0.0052, 0.00370]],
                            [[-0.0370, 0.00000]], [[-0.0052, -0.0037]],
                            [[-0.0007, -0.0007]]]])
        # num_kernels = [2]
        kernels = K.constant(matrix, dtype='float32')
        kernels = K.tile(kernels, [1, 1, image_shape[-1], 1])

        # Use depth-wise convolution to calculate edge maps per channel.
        # Output tensor has shape [batch_size, h, w, d * num_kernels].
        pad_sizes = [[0, 0], [2, 2], [2, 2], [0, 0]]
        padded = pad(image, pad_sizes, mode='REFLECT')
        output = K.depthwise_conv2d(padded, kernels)

        # TODO magnitude not implemented for plaidml
        if not magnitude:  # direction of edges
            raise FaceswapError(
                "Magnitude for GMSD Loss is not implemented in PlaidML")
        #    # Reshape to [batch_size, h, w, d, num_kernels].
        #    shape = K.concatenate([image_shape, num_kernels], axis=0)
        #    output = K.reshape(output, shape=shape)
        #    output.set_shape(static_image_shape.concatenate(num_kernels))
        #    output = tf.atan(K.squeeze(output[:, :, :, :, 0] / output[:, :, :, :, 1], axis=None))
        # magnitude of edges -- unified x & y edges don't work well with Neural Networks
        return output
コード例 #25
0
ファイル: io.py プロジェクト: wei/faceswap
    def _load(self) -> keras.models.Model:
        """ Loads the model from disk

        If the predict function is to be called and the model cannot be found in the model folder
        then an error is logged and the process exits.

        When loading the model, the plugin model folder is scanned for custom layers which are
        added to Keras' custom objects.

        Returns
        -------
        :class:`keras.models.Model`
            The saved model loaded from disk
        """
        logger.debug("Loading model: %s", self._filename)
        if self._is_predict and not self.model_exists:
            logger.error("Model could not be found in folder '%s'. Exiting",
                         self._model_dir)
            sys.exit(1)

        try:
            model = load_model(self._filename, compile=False)
        except RuntimeError as err:
            if "unable to get link info" in str(err).lower():
                msg = (
                    f"Unable to load the model from '{self._filename}'. This may be a "
                    "temporary error but most likely means that your model has corrupted.\n"
                    "You can try to load the model again but if the problem persists you "
                    "should use the Restore Tool to restore your model from backup.\n"
                    f"Original error: {str(err)}")
                raise FaceswapError(msg) from err
            raise err
        except KeyError as err:
            if "unable to open object" in str(err).lower():
                msg = (
                    f"Unable to load the model from '{self._filename}'. This may be a "
                    "temporary error but most likely means that your model has corrupted.\n"
                    "You can try to load the model again but if the problem persists you "
                    "should use the Restore Tool to restore your model from backup.\n"
                    f"Original error: {str(err)}")
                raise FaceswapError(msg) from err
            raise err

        logger.info("Loaded model from disk: '%s'", self._filename)
        return model
コード例 #26
0
ファイル: _base.py プロジェクト: procule/df
 def build(self):
     """ Build the model. Override for custom build methods """
     self.add_networks()
     self.load_models(swapped=False)
     try:
         self.build_autoencoders()
     except ValueError as err:
         if "must be from the same graph" in str(err).lower():
             msg = ("There was an error loading saved weights. This is most likely due to "
                    "model corruption during a previous save."
                    "\nYou should restore weights from a snapshot or from backup files. "
                    "You can use the 'Restore' Tool to restore from backup.")
             raise FaceswapError(msg) from err
         if "multi_gpu_model" in str(err).lower():
             raise FaceswapError(str(err)) from err
         raise err
     self.log_summary()
     self.compile_predictors(initialize=True)
コード例 #27
0
ファイル: io.py プロジェクト: wei/faceswap
    def load(self, model_exists: bool) -> None:
        """ Load weights for newly created models, or output warning for pre-existing models.

        Parameters
        ----------
        model_exists: bool
            ``True`` if a model pre-exists and is being resumed, ``False`` if this is a new model
        """
        if not self._weights_file:
            logger.debug("No weights file provided. Not loading weights.")
            return
        if model_exists and self._weights_file:
            logger.warning(
                "Ignoring weights file '%s' as this model is resuming.",
                self._weights_file)
            return

        weights_models = self._get_weights_model()
        all_models = get_all_sub_models(self._model)

        for model_name in self._load_layers:
            sub_model = next(
                (lyr for lyr in all_models if lyr.name == model_name), None)
            sub_weights = next(
                (lyr for lyr in weights_models if lyr.name == model_name),
                None)

            if not sub_model or not sub_weights:
                msg = f"Skipping layer {model_name} as not in "
                msg += "current_model." if not sub_model else f"weights '{self._weights_file}.'"
                logger.warning(msg)
                continue

            logger.info("Loading weights for layer '%s'", model_name)
            skipped_ops = 0
            loaded_ops = 0
            for layer in sub_model.layers:
                success = self._load_layer_weights(layer, sub_weights,
                                                   model_name)
                if success == 0:
                    skipped_ops += 1
                elif success == 1:
                    loaded_ops += 1

        del weights_models

        if loaded_ops == 0:
            raise FaceswapError(
                f"No weights were succesfully loaded from your weights file: "
                f"'{self._weights_file}'. Please check and try again.")
        if skipped_ops > 0:
            logger.warning(
                "%s weight(s) were unable to be loaded for your model. This is most "
                "likely because the weights you are trying to load were trained with "
                "different settings than you have set for your current model.",
                skipped_ops)
コード例 #28
0
ファイル: convert.py プロジェクト: zxxzzxxz/faceswap
 def validate(self):
     """ Make the output folder if it doesn't exist and check that video flag is
         a valid choice """
     if (self.args.writer == "ffmpeg" and not self.images.is_video
             and self.args.reference_video is None):
         raise FaceswapError(
             "Output as video selected, but using frames as input. You must "
             "provide a reference video ('-ref', '--reference-video').")
     output_dir = get_folder(self.args.output_dir)
     logger.info("Output Directory: %s", output_dir)
コード例 #29
0
    def _validate(self):
        """ Validate the Command Line Options.

        Ensure that certain cli selections are valid and won't result in an error. Checks:
            * If frames have been passed in with video output, ensure user supplies reference
            video.
            * If a mask-type is selected, ensure it exists in the alignments file.
            * If a predicted mask-type is selected, ensure model has been trained with a mask
            otherwise attempt to select first available masks, otherwise raise error.

        Raises
        ------
        FaceswapError
            If an invalid selection has been found.

        """
        if (self._args.writer == "ffmpeg" and
                not self._images.is_video and
                self._args.reference_video is None):
            raise FaceswapError("Output as video selected, but using frames as input. You must "
                                "provide a reference video ('-ref', '--reference-video').")
        if (self._args.mask_type not in ("none", "predicted") and
                not self._alignments.mask_is_valid(self._args.mask_type)):
            msg = ("You have selected the Mask Type `{}` but at least one face does not have this "
                   "mask stored in the Alignments File.\nYou should generate the required masks "
                   "with the Mask Tool or set the Mask Type option to an existing Mask Type.\nA "
                   "summary of existing masks is as follows:\nTotal faces: {}, Masks: "
                   "{}".format(self._args.mask_type, self._alignments.faces_count,
                               self._alignments.mask_summary))
            raise FaceswapError(msg)
        if self._args.mask_type == "predicted" and not self._predictor.has_predicted_mask:
            available_masks = [k for k, v in self._alignments.mask_summary.items()
                               if k != "none" and v == self._alignments.faces_count]
            if not available_masks:
                msg = ("Predicted Mask selected, but the model was not trained with a mask and no "
                       "masks are stored in the Alignments File.\nYou should generate the "
                       "required masks with the Mask Tool or set the Mask Type to `none`.")
                raise FaceswapError(msg)
            mask_type = available_masks[0]
            logger.warning("Predicted Mask selected, but the model was not trained with a "
                           "mask. Selecting first available mask: '%s'", mask_type)
            self._args.mask_type = mask_type
コード例 #30
0
ファイル: train.py プロジェクト: ohshyuk5/DeepBackend
    def _set_timelapse(self):
        """ Set time-lapse paths if requested.

        Returns
        -------
        dict
            The time-lapse keyword arguments for passing to the trainer

        """
        if (not self._args.timelapse_input_a
                and not self._args.timelapse_input_b
                and not self._args.timelapse_output):
            return None
        if (not self._args.timelapse_input_a
                or not self._args.timelapse_input_b
                or not self._args.timelapse_output):
            raise FaceswapError(
                "To enable the timelapse, you have to supply all the parameters "
                "(--timelapse-input-A, --timelapse-input-B and "
                "--timelapse-output).")

        timelapse_output = str(get_folder(self._args.timelapse_output))

        for folder in (self._args.timelapse_input_a,
                       self._args.timelapse_input_b):
            if folder is not None and not os.path.isdir(folder):
                raise FaceswapError(
                    "The Timelapse path '{}' does not exist".format(folder))
            exts = [
                os.path.splitext(fname)[-1] for fname in os.listdir(folder)
            ]
            if not any(ext in _image_extensions for ext in exts):
                raise FaceswapError(
                    "The Timelapse path '{}' does not contain any valid "
                    "images".format(folder))
        kwargs = {
            "input_a": self._args.timelapse_input_a,
            "input_b": self._args.timelapse_input_b,
            "output": timelapse_output
        }
        logger.debug("Timelapse enabled: %s", kwargs)
        return kwargs