コード例 #1
0
ファイル: generator.py プロジェクト: zueskalare/faceswap
    def cache_metadata(self, filenames):
        """ Obtain the batch with metadata for items that need caching and cache them to
        :attr:`_cache`.

        Parameters
        ----------
        filenames: list
            List of full paths to image file names

        Returns
        -------
        :class:`numpy.ndarray`
            The batch of face images loaded from disk
        """
        keys = [os.path.basename(filename) for filename in filenames]
        with self._lock:
            if _check_reset(self):
                self._reset_cache(False)

            needs_cache = [
                filename for filename, key in zip(filenames, keys)
                if not self._cache[key]["cached"]
            ]
            logger.trace("Needs cache: %s", needs_cache)

            if not needs_cache:
                # Don't bother reading the metadata if no images in this batch need caching
                logger.debug("All metadata already cached for: %s", keys)
                return read_image_batch(filenames)

            batch, metadata = read_image_batch(filenames, with_metadata=True)

            # Populate items into cache
            for filename in needs_cache:
                key = os.path.basename(filename)
                meta = metadata[filenames.index(filename)]

                # Version Check
                self._validate_version(meta, filename)
                if self._partial_load:  # Faces already loaded for Warp-to-landmarks
                    detected_face = self._cache[key]["detected_face"]
                else:
                    detected_face = self._add_aligned_face(
                        filename, meta["alignments"], batch.shape[1])

                self._add_mask(filename, detected_face)
                for area in ("eye", "mouth"):
                    self._add_localized_mask(filename, detected_face, area)

                self._cache[key]["cached"] = True
            # Update the :attr:`cache_full` attribute
            cache_full = all(item["cached"] for item in self._cache.values())
            if cache_full:
                logger.verbose("Cache filled: '%s'",
                               os.path.dirname(filenames[0]))
                self._cache_full = cache_full

        return batch
コード例 #2
0
    def _process_batch(self, filenames, side):
        """ Performs the augmentation and compiles target images and samples. See
        :func:`minibatch_ab` for more details on the output. """
        logger.trace("Process batch: (filenames: '%s', side: '%s')", filenames,
                     side)
        batch = read_image_batch(filenames)
        batch = self._apply_mask(filenames, batch, side)
        processed = dict()

        # Initialize processing training size on first image
        if not self._processing.initialized:
            self._processing.initialize(batch.shape[1])

        # Get Landmarks prior to manipulating the image
        if self._warp_to_landmarks:
            batch_src_pts = self._get_landmarks(filenames, side)
            batch_dst_pts = self._get_closest_match(filenames, side,
                                                    batch_src_pts)
            warp_kwargs = dict(batch_src_points=batch_src_pts,
                               batch_dst_points=batch_dst_pts)
        else:
            warp_kwargs = dict()

        # Color Augmentation of the image only
        if self._augment_color:
            batch[..., :3] = self._processing.color_adjust(batch[..., :3])

        # Random Transform and flip
        batch = self._processing.transform(batch)
        if not self._no_flip:
            batch = self._processing.random_flip(batch)

        # Add samples to output if this is for display
        if self._processing.is_display:
            processed["samples"] = batch[..., :3].astype("float32") / 255.0

        # Get Targets
        processed.update(self._processing.get_targets(batch))

        # Random Warp # TODO change masks to have a input mask and a warped target mask
        if not self._config["disable_warp"]:
            processed["feed"] = [
                self._processing.warp(batch[..., :3], self._warp_to_landmarks,
                                      **warp_kwargs)
            ]
        else:
            size = (self._model_input_size, self._model_input_size)
            processed["feed"] = [
                np.array([cv2.resize(img, size)
                          for img in batch[..., :3]]).astype("float32") / 255.0
            ]

        logger.trace(
            "Processed batch: (filenames: %s, side: '%s', processed: %s)",
            filenames, side, {
                k:
                v.shape if isinstance(v, np.ndarray) else [i.shape for i in v]
                for k, v in processed.items()
            })
        return processed
コード例 #3
0
    def _process_batch(self, filenames, side):
        """ Performs the augmentation and compiles target images and samples. See
        :func:`minibatch_ab` for more details on the output. """
        logger.trace("Process batch: (filenames: '%s', side: '%s')", filenames,
                     side)
        batch = read_image_batch(filenames)
        processed = dict()
        to_landmarks = self._training_opts["warp_to_landmarks"]

        # Initialize processing training size on first image
        if not self._processing.initialized:
            self._processing.initialize(batch.shape[1])

        # Get Landmarks prior to manipulating the image
        if self._mask_class or to_landmarks:
            batch_src_pts = self._get_landmarks(filenames, batch, side)

        # Color augmentation before mask is added
        if self._training_opts["augment_color"]:
            batch = self._processing.color_adjust(batch)

        # Add mask to batch prior to transforms and warps
        if self._mask_class:
            batch = np.array([
                self._mask_class(src_pts, image, channels=4).mask
                for src_pts, image in zip(batch_src_pts, batch)
            ])

        # Random Transform and flip
        batch = self._processing.transform(batch)
        if not self._training_opts["no_flip"]:
            batch = self._processing.random_flip(batch)

        # Add samples to output if this is for display
        if self._processing.is_display:
            processed["samples"] = batch[..., :3].astype("float32") / 255.0

        # Get Targets
        processed.update(self._processing.get_targets(batch))

        # Random Warp
        if to_landmarks:
            warp_kwargs = dict(batch_src_points=batch_src_pts,
                               batch_dst_points=self._get_closest_match(
                                   filenames, side, batch_src_pts))
        else:
            warp_kwargs = dict()
        processed["feed"] = self._processing.warp(batch[..., :3], to_landmarks,
                                                  **warp_kwargs)

        logger.trace(
            "Processed batch: (filenames: %s, side: '%s', processed: %s)",
            filenames, side, {
                k:
                v.shape if isinstance(v, np.ndarray) else [i.shape for i in v]
                for k, v in processed.items()
            })

        return processed
コード例 #4
0
    def _process_batch(self, filenames, side):
        """ Performs the augmentation and compiles target images and samples.

        If this is the first time a face has been loaded, then it's meta data is extracted from the
        png header and added to :attr:`_face_cache`

        See
        :func:`minibatch_ab` for more details on the output.

        Parameters
        ----------
        filenames: list
            List of full paths to image file names
        side: str
            The side of the model being trained on (`a` or `b`)
        """
        logger.trace("Process batch: (filenames: '%s', side: '%s')", filenames,
                     side)

        if not self._face_cache.cache_full:
            batch = self._face_cache.cache_metadata(filenames)
        else:
            batch = read_image_batch(filenames)

        cache = self._face_cache.get_items(filenames)
        batch, landmarks = self._crop_to_center(filenames, cache, batch, side)
        batch = self._apply_mask(filenames, cache, batch, side)
        processed = dict()

        # Initialize processing training size on first image
        if not self._processing.initialized:
            self._processing.initialize(batch.shape[1])

        # Get Landmarks prior to manipulating the image
        if self._warp_to_landmarks:
            batch_dst_pts = self._get_closest_match(filenames, side, landmarks)
            warp_kwargs = dict(batch_src_points=landmarks,
                               batch_dst_points=batch_dst_pts)
        else:
            warp_kwargs = dict()

        # Color Augmentation of the image only
        if self._augment_color:
            batch[..., :3] = self._processing.color_adjust(batch[..., :3])

        # Random Transform and flip
        batch = self._processing.transform(batch)
        if not self._no_flip:
            batch = self._processing.random_flip(batch)

        # Switch color order for RGB models
        if self._color_order == "rgb":
            batch = batch[..., [2, 1, 0, 3]]

        # Add samples to output if this is for display
        if self._processing.is_display:
            processed["samples"] = batch[..., :3].astype("float32") / 255.0

        # Get Targets
        processed.update(self._processing.get_targets(batch))

        # Random Warp # TODO change masks to have a input mask and a warped target mask
        if self._no_warp:
            processed["feed"] = [self._processing.skip_warp(batch[..., :3])]
        else:
            processed["feed"] = [
                self._processing.warp(batch[..., :3], self._warp_to_landmarks,
                                      **warp_kwargs)
            ]

        logger.trace(
            "Processed batch: (filenames: %s, side: '%s', processed: %s)",
            filenames, side, {
                k:
                v.shape if isinstance(v, np.ndarray) else [i.shape for i in v]
                for k, v in processed.items()
            })
        return processed
コード例 #5
0
    def cache_metadata(self, filenames):
        """ Obtain the batch with metadata for items that need caching and cache them to
        :attr:`_cache`.

        Parameters
        ----------
        filenames: list
            List of full paths to image file names

        Returns
        -------
        :class:`numpy.ndarray`
            The batch of face images loaded from disk
        """
        keys = [os.path.basename(filename) for filename in filenames]
        with self._lock:
            if _check_reset(self):
                self._reset_cache(False)

            needs_cache = [
                filename for filename, key in zip(filenames, keys)
                if not self._cache[key]["cached"]
            ]
            logger.trace("Needs cache: %s", needs_cache)

            if not needs_cache:
                # Don't bother reading the metadata if no images in this batch need caching
                logger.debug("All metadata already cached for: %s", keys)
                return read_image_batch(filenames)

            batch, metadata = read_image_batch(filenames, with_metadata=True)

            if len(batch.shape) == 1:
                folder = os.path.dirname(filenames[0])
                details = [
                    "{0} ({1})".format(
                        key, f"{img.shape[1]}px" if isinstance(
                            img, np.ndarray) else type(img))
                    for key, img in zip(keys, batch)
                ]
                msg = (
                    f"There are mismatched image sizes in the folder '{folder}'. All training "
                    "images for each side must have the same dimensions.\nThe batch that "
                    f"failed contains the following files:\n{details}.")
                raise FaceswapError(msg)

            # Populate items into cache
            for filename in needs_cache:
                key = os.path.basename(filename)
                meta = metadata[filenames.index(filename)]

                # Version Check
                self._validate_version(meta, filename)
                if self._partial_load:  # Faces already loaded for Warp-to-landmarks
                    detected_face = self._cache[key]["detected_face"]
                else:
                    detected_face = self._add_aligned_face(
                        filename, meta["alignments"], batch.shape[1])

                self._add_mask(filename, detected_face)
                for area in ("eye", "mouth"):
                    self._add_localized_mask(filename, detected_face, area)

                self._cache[key]["cached"] = True
            # Update the :attr:`cache_full` attribute
            cache_full = all(item["cached"] for item in self._cache.values())
            if cache_full:
                logger.verbose("Cache filled: '%s'",
                               os.path.dirname(filenames[0]))
                self._cache_full = cache_full

        return batch