Example #1
0
    def _get_extractor(self, exclude_gpus):
        """ Obtain a Mask extractor plugin and launch it

        Parameters
        ----------
        exclude_gpus: list or ``None``
            A list of indices correlating to connected GPUs that Tensorflow should not use. Pass
            ``None`` to not exclude any GPUs.

        Returns
        -------
        :class:`plugins.extract.pipeline.Extractor`:
            The launched Extractor
        """
        if self._update_type == "output":
            logger.debug(
                "Update type `output` selected. Not launching extractor")
            return None
        logger.debug("masker: %s", self._mask_type)
        extractor = Extractor(None,
                              None,
                              self._mask_type,
                              exclude_gpus=exclude_gpus,
                              image_is_aligned=self._input_is_faces)
        extractor.launch()
        logger.debug(extractor)
        return extractor
Example #2
0
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__,
                     arguments)
        self._args = arguments
        self._output_dir = None if self._args.skip_saving_faces else get_folder(
            self._args.output_dir)

        logger.info("Output Directory: %s", self._args.output_dir)
        self._images = ImagesLoader(self._args.input_dir, fast_count=True)
        self._alignments = Alignments(self._args, True, self._images.is_video)

        self._existing_count = 0
        self._set_skip_list()

        self._post_process = PostProcess(arguments)
        configfile = self._args.configfile if hasattr(self._args,
                                                      "configfile") else None
        normalization = None if self._args.normalization == "none" else self._args.normalization

        maskers = ["components", "extended"]
        maskers += self._args.masker if self._args.masker else []
        self._extractor = Extractor(self._args.detector,
                                    self._args.aligner,
                                    maskers,
                                    configfile=configfile,
                                    multiprocess=not self._args.singleprocess,
                                    exclude_gpus=self._args.exclude_gpus,
                                    rotate_images=self._args.rotate_images,
                                    min_size=self._args.min_size,
                                    normalize_method=normalization,
                                    re_feed=self._args.re_feed)
        self._threads = list()
        self._verify_output = False
        logger.debug("Initialized %s", self.__class__.__name__)
Example #3
0
    def _load_extractor(self):
        """ Load the CV2-DNN Face Extractor Chain.

        For On-The-Fly conversion we use a CPU based extractor to avoid stacking the GPU.
        Results are poor.

        Returns
        -------
        :class:`plugins.extract.Pipeline.Extractor`
            The face extraction chain to be used for on-the-fly conversion
        """

        logger.debug("Loading extractor")
        logger.warning("On-The-Fly conversion selected. This will use the inferior cv2-dnn for "
                       "extraction and will produce poor results.")
        logger.warning("It is recommended to generate an alignments file for your destination "
                       "video with Extract first for superior results.")
        extractor = Extractor(detector="cv2-dnn",
                              aligner="cv2-dnn",
                              masker="none",
                              multiprocess=True,
                              rotate_images=None,
                              min_size=20)
        extractor.launch()
        logger.debug("Loaded extractor")
        return extractor
Example #4
0
 def __init__(self, arguments):
     logger.debug("Initializing %s: (args: %s", self.__class__.__name__,
                  arguments)
     self.args = arguments
     Utils.set_verbosity(self.args.loglevel)
     self.output_dir = get_folder(self.args.output_dir)
     logger.info("Output Directory: %s", self.args.output_dir)
     self.images = Images(self.args)
     self.alignments = Alignments(self.args, True, self.images.is_video)
     self.post_process = PostProcess(arguments)
     configfile = self.args.configfile if hasattr(self.args,
                                                  "configfile") else None
     normalization = None if self.args.normalization == "none" else self.args.normalization
     self.extractor = Extractor(self.args.detector,
                                self.args.aligner,
                                self.args.loglevel,
                                configfile=configfile,
                                multiprocess=not self.args.singleprocess,
                                rotate_images=self.args.rotate_images,
                                min_size=self.args.min_size,
                                normalize_method=normalization)
     self.save_queue = queue_manager.get_queue("extract_save")
     self.verify_output = False
     self.save_interval = None
     if hasattr(self.args, "save_interval"):
         self.save_interval = self.args.save_interval
     logger.debug("Initialized %s", self.__class__.__name__)
Example #5
0
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__,
                     arguments)
        self._args = arguments

        self._output_dir = str(get_folder(self._args.output_dir))

        logger.info("Output Directory: %s", self._args.output_dir)
        self._images = ImagesLoader(self._args.input_dir,
                                    load_with_hash=False,
                                    fast_count=True)
        self._alignments = Alignments(self._args, True, self._images.is_video)

        self._existing_count = 0
        self._set_skip_list()

        self._post_process = PostProcess(arguments)
        configfile = self._args.configfile if hasattr(self._args,
                                                      "configfile") else None
        normalization = None if self._args.normalization == "none" else self._args.normalization
        self._extractor = Extractor(self._args.detector,
                                    self._args.aligner,
                                    self._args.masker,
                                    configfile=configfile,
                                    multiprocess=not self._args.singleprocess,
                                    rotate_images=self._args.rotate_images,
                                    min_size=self._args.min_size,
                                    normalize_method=normalization)
        self._threads = list()
        self._verify_output = False
        logger.debug("Initialized %s", self.__class__.__name__)
Example #6
0
 def align_faces(self, detector_name, aligner_name, multiprocess):
     """ Use the requested detectors to retrieve landmarks for filter images """
     extractor = Extractor(detector_name,
                           aligner_name,
                           multiprocess=multiprocess)
     self.run_extractor(extractor)
     del extractor
     self.load_aligned_face()
Example #7
0
    def _get_extractor(self):
        """ Obtain a Mask extractor plugin and launch it

        Returns
        -------
        :class:`plugins.extract.pipeline.Extractor`:
            The launched Extractor
        """
        if self._update_type == "output":
            logger.debug("Update type `output` selected. Not launching extractor")
            return None
        logger.debug("masker: %s", self._mask_type)
        extractor = Extractor(None, None, self._mask_type,
                              image_is_aligned=self._input_is_faces)
        extractor.launch()
        logger.debug(extractor)
        return extractor
Example #8
0
    def _load_extractor(self):
        """ Load the CV2-DNN Face Extractor Chain.

        For On-The-Fly conversion we use a CPU based extractor to avoid stacking the GPU.
        Results are poor.

        Returns
        -------
        :class:`plugins.extract.Pipeline.Extractor`
            The face extraction chain to be used for on-the-fly conversion
        """
        if not self._alignments.have_alignments_file and not self._args.on_the_fly:
            logger.error(
                "No alignments file found. Please provide an alignments file for your "
                "destination video (recommended) or enable on-the-fly conversion (not "
                "recommended).")
            sys.exit(1)
        if self._alignments.have_alignments_file:
            if self._args.on_the_fly:
                logger.info(
                    "On-The-Fly conversion selected, but an alignments file was found. "
                    "Using pre-existing alignments file: '%s'",
                    self._alignments.file)
            else:
                logger.debug("Alignments file found: '%s'",
                             self._alignments.file)
            return None

        logger.debug("Loading extractor")
        logger.warning(
            "On-The-Fly conversion selected. This will use the inferior cv2-dnn for "
            "extraction and will produce poor results.")
        logger.warning(
            "It is recommended to generate an alignments file for your destination "
            "video with Extract first for superior results.")
        extractor = Extractor(detector="cv2-dnn",
                              aligner="cv2-dnn",
                              masker=self._args.mask_type,
                              multiprocess=True,
                              rotate_images=None,
                              min_size=20)
        extractor.launch()
        logger.debug("Loaded extractor")
        return extractor
Example #9
0
    def load_extractor(self):
        """ Set on the fly extraction """
        if self.alignments.have_alignments_file:
            return None

        logger.debug("Loading extractor")
        logger.warning("No Alignments file found. Extracting on the fly.")
        logger.warning(
            "NB: This will use the inferior cv2-dnn for extraction "
            "and  landmarks. It is recommended to perfom Extract first for "
            "superior results")
        extractor = Extractor(detector="cv2-dnn",
                              aligner="cv2-dnn",
                              multiprocess=False,
                              rotate_images=None,
                              min_size=20)
        extractor.launch()
        logger.debug("Loaded extractor")
        return extractor
Example #10
0
    def load_extractor(self):
        """ Set on the fly extraction """
        if self.alignments.have_alignments_file:
            return None

        logger.debug("Loading extractor")
        logger.warning("No Alignments file found. Extracting on the fly.")
        logger.warning("NB: This will use the inferior cv2-dnn for extraction "
                       "and  landmarks. It is recommended to perfom Extract first for "
                       "superior results")
        extractor = Extractor(detector="cv2-dnn",
                              aligner="cv2-dnn",
                              loglevel=self.args.loglevel,
                              multiprocess=False,
                              rotate_images=None,
                              min_size=20)
        extractor.launch()
        logger.debug("Loaded extractor")
        return extractor
Example #11
0
 def launch_aligner(self):
     """ Load the aligner plugin to retrieve landmarks """
     extractor = Extractor(None, "fan", None,
                           normalize_method="hist", exclude_gpus=self._args.exclude_gpus)
     extractor.set_batchsize("align", 1)
     extractor.launch()
     return extractor
Example #12
0
 def init_extractor(self):
     """ Initialize Aligner """
     logger.debug("Initialize Extractor")
     extractor = Extractor("manual",
                           "fan",
                           multiprocess=True,
                           normalize_method="hist")
     self.queues["in"] = extractor.input_queue
     # Set the batchsizes to 1
     extractor.set_batchsize("detector", 1)
     extractor.set_batchsize("aligner", 1)
     extractor.launch()
     logger.debug("Initialized Extractor")
     return extractor
Example #13
0
    def _legacy_check(self):
        """ Check whether the alignments file was created with the legacy extraction method.

        If so, force user to re-extract all faces if any options have been specified, otherwise
        raise the appropriate warnings and set the legacy options.
        """
        if self._arguments.large or self._arguments.extract_every_n != 1:
            logger.warning(
                "This alignments file was generated with the legacy extraction method."
            )
            logger.warning(
                "You should run this extraction job, but with 'large' deselected and "
                "'extract-every-n' set to 1 to update the alignments file.")
            logger.warning(
                "You can then re-run this extraction job with your chosen options."
            )
            sys.exit(0)

        maskers = ["components", "extended"]
        nn_masks = [
            mask for mask in list(self._alignments.mask_summary)
            if mask not in maskers
        ]
        logtype = logger.warning if nn_masks else logger.info
        logtype(
            "This alignments file was created with the legacy extraction method and will be "
            "updated.")
        logtype(
            "Faces will be extracted using the new method and landmarks based masks will be "
            "regenerated.")
        if nn_masks:
            logtype(
                "However, the NN based masks '%s' will be cropped to the legacy extraction "
                "method, so you may want to run the mask tool to regenerate these "
                "masks.", "', '".join(nn_masks))
        self._mask_pipeline = Extractor(None, None, maskers, multiprocess=True)
        self._mask_pipeline.launch()
        # Update alignments versioning
        self._alignments._version = _VERSION  # pylint:disable=protected-access
Example #14
0
 def _init_aligner(self):
     """ Initialize Aligner in a background thread, and set it to :attr:`_aligner`. """
     logger.debug("Initialize Aligner")
     # Make sure non-GPU aligner is allocated first
     for model in ("mask", "cv2-dnn", "FAN"):
         logger.debug("Initializing aligner: %s", model)
         plugin = None if model == "mask" else model
         aligner = Extractor(None,
                             plugin, ["components", "extended"],
                             multiprocess=True,
                             normalize_method="hist")
         if plugin:
             aligner.set_batchsize("align", 1)  # Set the batchsize to 1
         aligner.launch()
         logger.debug("Initialized %s Extractor", model)
         self._aligners[model] = aligner
Example #15
0
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__, arguments)
        self.args = arguments
        Utils.set_verbosity(self.args.loglevel)
        self.output_dir = get_folder(self.args.output_dir)
        logger.info("Output Directory: %s", self.args.output_dir)
        self.images = Images(self.args)
        self.alignments = Alignments(self.args, True, self.images.is_video)
        self.post_process = PostProcess(arguments)
        self.extractor = Extractor(self.args.detector,
                                   self.args.aligner,
                                   self.args.loglevel,
                                   self.args.multiprocess,
                                   self.args.rotate_images,
                                   self.args.min_size)

        self.save_queue = queue_manager.get_queue("extract_save")
        self.verify_output = False
        self.save_interval = None
        if hasattr(self.args, "save_interval"):
            self.save_interval = self.args.save_interval
        logger.debug("Initialized %s", self.__class__.__name__)
Example #16
0
class Extract():  # pylint:disable=too-few-public-methods
    """ The Faceswap Face Extraction Process.

    The extraction process is responsible for detecting faces in a series of images/video, aligning
    these faces and then generating a mask.

    It leverages a series of user selected plugins, chained together using
    :mod:`plugins.extract.pipeline`.

    The extract process is self contained and should not be referenced by any other scripts, so it
    contains no public properties.

    Parameters
    ----------
    arguments: argparse.Namespace
        The arguments to be passed to the extraction process as generated from Faceswap's command
        line arguments
    """
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__,
                     arguments)
        self._args = arguments

        self._output_dir = str(get_folder(self._args.output_dir))

        logger.info("Output Directory: %s", self._args.output_dir)
        self._images = ImagesLoader(self._args.input_dir,
                                    load_with_hash=False,
                                    fast_count=True)
        self._alignments = Alignments(self._args, True, self._images.is_video)

        self._existing_count = 0
        self._set_skip_list()

        self._post_process = PostProcess(arguments)
        configfile = self._args.configfile if hasattr(self._args,
                                                      "configfile") else None
        normalization = None if self._args.normalization == "none" else self._args.normalization
        self._extractor = Extractor(self._args.detector,
                                    self._args.aligner,
                                    self._args.masker,
                                    configfile=configfile,
                                    multiprocess=not self._args.singleprocess,
                                    rotate_images=self._args.rotate_images,
                                    min_size=self._args.min_size,
                                    normalize_method=normalization)
        self._threads = list()
        self._verify_output = False
        logger.debug("Initialized %s", self.__class__.__name__)

    @property
    def _save_interval(self):
        """ int: The number of frames to be processed between each saving of the alignments file if
        it has been provided, otherwise ``None`` """
        if hasattr(self._args, "save_interval"):
            return self._args.save_interval
        return None

    @property
    def _skip_num(self):
        """ int: Number of frames to skip if extract_every_n has been provided """
        return self._args.extract_every_n if hasattr(self._args,
                                                     "extract_every_n") else 1

    def _set_skip_list(self):
        """ Add the skip list to the image loader

        Checks against `extract_every_n` and the existence of alignments data (can exist if
        `skip_existing` or `skip_existing_faces` has been provided) and compiles a list of frame
        indices that should not be processed, providing these to :class:`lib.image.ImagesLoader`.
        """
        if self._skip_num == 1 and not self._alignments.data:
            logger.debug("No frames to be skipped")
            return
        skip_list = []
        for idx, filename in enumerate(self._images.file_list):
            if idx % self._skip_num != 0:
                logger.trace(
                    "Adding image '%s' to skip list due to extract_every_n = %s",
                    filename, self._skip_num)
                skip_list.append(idx)
            # Items may be in the alignments file if skip-existing[-faces] is selected
            elif os.path.basename(filename) in self._alignments.data:
                self._existing_count += 1
                logger.trace("Removing image: '%s' due to previously existing",
                             filename)
                skip_list.append(idx)
        if self._existing_count != 0:
            logger.info(
                "Skipping %s frames due to skip_existing/skip_existing_faces.",
                self._existing_count)
        logger.debug("Adding skip list: %s", skip_list)
        self._images.add_skip_list(skip_list)

    def process(self):
        """ The entry point for triggering the Extraction Process.

        Should only be called from  :class:`lib.cli.ScriptExecutor`
        """
        logger.info('Starting, this may take a while...')
        # from lib.queue_manager import queue_manager ; queue_manager.debug_monitor(3)
        self._threaded_redirector("load")
        self._run_extraction()
        for thread in self._threads:
            thread.join()
        self._alignments.save()
        finalize(self._images.process_count + self._existing_count,
                 self._alignments.faces_count, self._verify_output)

    def _threaded_redirector(self, task, io_args=None):
        """ Redirect image input/output tasks to relevant queues in background thread

        Parameters
        ----------
        task: str
            The name of the task to be put into a background thread
        io_args: tuple, optional
            Any arguments that need to be provided to the background function
        """
        logger.debug("Threading task: (Task: '%s')", task)
        io_args = tuple() if io_args is None else (io_args, )
        func = getattr(self, "_{}".format(task))
        io_thread = MultiThread(func, *io_args, thread_count=1)
        io_thread.start()
        self._threads.append(io_thread)

    def _load(self):
        """ Load the images

        Loads images from :class:`lib.image.ImagesLoader`, formats them into a dict compatible
        with :class:`plugins.extract.Pipeline.Extractor` and passes them into the extraction queue.
        """
        logger.debug("Load Images: Start")
        load_queue = self._extractor.input_queue
        for filename, image in self._images.load():
            if load_queue.shutdown.is_set():
                logger.debug("Load Queue: Stop signal received. Terminating")
                break
            item = ExtractMedia(filename, image[..., :3])
            load_queue.put(item)
        load_queue.put("EOF")
        logger.debug("Load Images: Complete")

    def _reload(self, detected_faces):
        """ Reload the images and pair to detected face

        When the extraction pipeline is running in serial mode, images are reloaded from disk,
        paired with their extraction data and passed back into the extraction queue

        Parameters
        ----------
        detected_faces: dict
            Dictionary of :class:`plugins.extract.pipeline.ExtractMedia` with the filename as the
            key for repopulating the image attribute.
        """
        logger.debug("Reload Images: Start. Detected Faces Count: %s",
                     len(detected_faces))
        load_queue = self._extractor.input_queue
        for filename, image in self._images.load():
            if load_queue.shutdown.is_set():
                logger.debug("Reload Queue: Stop signal received. Terminating")
                break
            logger.trace("Reloading image: '%s'", filename)
            extract_media = detected_faces.pop(filename, None)
            if not extract_media:
                logger.warning("Couldn't find faces for: %s", filename)
                continue
            extract_media.set_image(image)
            load_queue.put(extract_media)
        load_queue.put("EOF")
        logger.debug("Reload Images: Complete")

    def _run_extraction(self):
        """ The main Faceswap Extraction process

        Receives items from :class:`plugins.extract.Pipeline.Extractor` and either saves out the
        faces and data (if on the final pass) or reprocesses data through the pipeline for serial
        processing.
        """
        size = self._args.size if hasattr(self._args, "size") else 256
        saver = ImagesSaver(self._output_dir, as_bytes=True)
        exception = False
        phase_desc = "Extraction"

        for phase in range(self._extractor.passes):
            if exception:
                break
            is_final = self._extractor.final_pass
            detected_faces = dict()
            self._extractor.launch()
            self._check_thread_error()
            if self._args.singleprocess:
                phase_desc = self._extractor.phase.title()
            desc = "Running pass {} of {}: {}".format(phase + 1,
                                                      self._extractor.passes,
                                                      phase_desc)
            status_bar = tqdm(self._extractor.detected_faces(),
                              total=self._images.process_count,
                              file=sys.stdout,
                              desc=desc)
            for idx, extract_media in enumerate(status_bar):
                self._check_thread_error()
                if is_final:
                    self._output_processing(extract_media, size)
                    self._output_faces(saver, extract_media)
                    if self._save_interval and (idx +
                                                1) % self._save_interval == 0:
                        self._alignments.save()
                else:
                    extract_media.remove_image()
                    # cache extract_media for next run
                    detected_faces[extract_media.filename] = extract_media
                status_bar.update(1)

            if not is_final:
                logger.debug("Reloading images")
                self._threaded_redirector("reload", detected_faces)
        saver.close()

    def _check_thread_error(self):
        """ Check if any errors have occurred in the running threads and their errors """
        for thread in self._threads:
            thread.check_and_raise_error()

    def _output_processing(self, extract_media, size):
        """ Prepare faces for output

        Loads the aligned face, perform any processing actions and verify the output.

        Parameters
        ----------
        extract_media: :class:`plugins.extract.pipeline.ExtractMedia`
            Output from :class:`plugins.extract.pipeline.Extractor`
        size: int
            The size that the aligned face should be created at
        """
        for face in extract_media.detected_faces:
            face.load_aligned(extract_media.image, size=size)

        self._post_process.do_actions(extract_media)
        extract_media.remove_image()

        faces_count = len(extract_media.detected_faces)
        if faces_count == 0:
            logger.verbose("No faces were detected in image: %s",
                           os.path.basename(extract_media.filename))

        if not self._verify_output and faces_count > 1:
            self._verify_output = True

    def _output_faces(self, saver, extract_media):
        """ Output faces to save thread

        Set the face filename based on the frame name and put the face to the
        :class:`~lib.image.ImagesSaver` save queue and add the face information to the alignments
        data.

        Parameters
        ----------
        saver: lib.images.ImagesSaver
            The background saver for saving the image
        extract_media: :class:`~plugins.extract.pipeline.ExtractMedia`
            The output from :class:`~plugins.extract.Pipeline.Extractor`
        """
        logger.trace("Outputting faces for %s", extract_media.filename)
        final_faces = list()
        filename, extension = os.path.splitext(
            os.path.basename(extract_media.filename))
        for idx, face in enumerate(extract_media.detected_faces):
            output_filename = "{}_{}{}".format(filename, str(idx), extension)
            face.hash, image = encode_image_with_hash(face.aligned_face,
                                                      extension)

            saver.save(output_filename, image)
            final_faces.append(face.to_alignment())
        self._alignments.data[os.path.basename(
            extract_media.filename)] = dict(faces=final_faces)
        del extract_media
Example #17
0
class Extract():
    """ The extract process. """
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__, arguments)
        self.args = arguments
        Utils.set_verbosity(self.args.loglevel)
        self.output_dir = get_folder(self.args.output_dir)
        logger.info("Output Directory: %s", self.args.output_dir)
        self.images = Images(self.args)
        self.alignments = Alignments(self.args, True, self.images.is_video)
        self.post_process = PostProcess(arguments)
        self.extractor = Extractor(self.args.detector,
                                   self.args.aligner,
                                   self.args.loglevel,
                                   self.args.multiprocess,
                                   self.args.rotate_images,
                                   self.args.min_size)

        self.save_queue = queue_manager.get_queue("extract_save")
        self.verify_output = False
        self.save_interval = None
        if hasattr(self.args, "save_interval"):
            self.save_interval = self.args.save_interval
        logger.debug("Initialized %s", self.__class__.__name__)

    @property
    def skip_num(self):
        """ Number of frames to skip if extract_every_n is passed """
        return self.args.extract_every_n if hasattr(self.args, "extract_every_n") else 1

    def process(self):
        """ Perform the extraction process """
        logger.info('Starting, this may take a while...')
        # queue_manager.debug_monitor(3)
        self.threaded_io("load")
        save_thread = self.threaded_io("save")
        self.run_extraction()
        save_thread.join()
        self.alignments.save()
        Utils.finalize(self.images.images_found // self.skip_num,
                       self.alignments.faces_count,
                       self.verify_output)

    def threaded_io(self, task, io_args=None):
        """ Perform I/O task in a background thread """
        logger.debug("Threading task: (Task: '%s')", task)
        io_args = tuple() if io_args is None else (io_args, )
        if task == "load":
            func = self.load_images
        elif task == "save":
            func = self.save_faces
        elif task == "reload":
            func = self.reload_images
        io_thread = MultiThread(func, *io_args, thread_count=1)
        io_thread.start()
        return io_thread

    def load_images(self):
        """ Load the images """
        logger.debug("Load Images: Start")
        load_queue = self.extractor.input_queue
        idx = 0
        for filename, image in self.images.load():
            idx += 1
            if load_queue.shutdown.is_set():
                logger.debug("Load Queue: Stop signal received. Terminating")
                break
            if idx % self.skip_num != 0:
                logger.trace("Skipping image '%s' due to extract_every_n = %s",
                             filename, self.skip_num)
                continue
            if image is None or not image.any():
                logger.warning("Unable to open image. Skipping: '%s'", filename)
                continue
            imagename = os.path.basename(filename)
            if imagename in self.alignments.data.keys():
                logger.trace("Skipping image: '%s'", filename)
                continue
            item = {"filename": filename,
                    "image": image}
            load_queue.put(item)
        load_queue.put("EOF")
        logger.debug("Load Images: Complete")

    def reload_images(self, detected_faces):
        """ Reload the images and pair to detected face """
        logger.debug("Reload Images: Start. Detected Faces Count: %s", len(detected_faces))
        load_queue = self.extractor.input_queue
        for filename, image in self.images.load():
            if load_queue.shutdown.is_set():
                logger.debug("Reload Queue: Stop signal received. Terminating")
                break
            logger.trace("Reloading image: '%s'", filename)
            detect_item = detected_faces.pop(filename, None)
            if not detect_item:
                logger.warning("Couldn't find faces for: %s", filename)
                continue
            detect_item["image"] = image
            load_queue.put(detect_item)
        load_queue.put("EOF")
        logger.debug("Reload Images: Complete")

    def save_faces(self):
        """ Save the generated faces """
        logger.debug("Save Faces: Start")
        while True:
            if self.save_queue.shutdown.is_set():
                logger.debug("Save Queue: Stop signal received. Terminating")
                break
            item = self.save_queue.get()
            logger.trace(item)
            if item == "EOF":
                break
            filename, face = item

            logger.trace("Saving face: '%s'", filename)
            try:
                with open(filename, "wb") as out_file:
                    out_file.write(face)
            except Exception as err:  # pylint: disable=broad-except
                logger.error("Failed to save image '%s'. Original Error: %s", filename, err)
                continue
        logger.debug("Save Faces: Complete")

    def process_item_count(self):
        """ Return the number of items to be processedd """
        processed = sum(os.path.basename(frame) in self.alignments.data.keys()
                        for frame in self.images.input_images)
        logger.debug("Items already processed: %s", processed)

        if processed != 0 and self.args.skip_existing:
            logger.info("Skipping previously extracted frames: %s", processed)
        if processed != 0 and self.args.skip_faces:
            logger.info("Skipping frames with detected faces: %s", processed)

        to_process = (self.images.images_found - processed) // self.skip_num
        logger.debug("Items to be Processed: %s", to_process)
        if to_process == 0:
            logger.error("No frames to process. Exiting")
            queue_manager.terminate_queues()
            exit(0)
        return to_process

    def run_extraction(self):
        """ Run Face Detection """
        to_process = self.process_item_count()
        size = self.args.size if hasattr(self.args, "size") else 256
        align_eyes = self.args.align_eyes if hasattr(self.args, "align_eyes") else False
        exception = False

        for phase in range(self.extractor.passes):
            if exception:
                break
            is_final = self.extractor.final_pass
            detected_faces = dict()
            self.extractor.launch()
            for idx, faces in enumerate(tqdm(self.extractor.detected_faces(),
                                             total=to_process,
                                             file=sys.stdout,
                                             desc="Running pass {} of {}: {}".format(
                                                 phase + 1,
                                                 self.extractor.passes,
                                                 self.extractor.phase.title()))):

                exception = faces.get("exception", False)
                if exception:
                    break
                filename = faces["filename"]

                if self.extractor.final_pass:
                    self.output_processing(faces, align_eyes, size, filename)
                    self.output_faces(filename, faces)
                    if self.save_interval and idx + 1 % self.save_interval == 0:
                        self.alignments.save()
                else:
                    del faces["image"]
                    detected_faces[filename] = faces

            if is_final:
                logger.debug("Putting EOF to save")
                self.save_queue.put("EOF")
            else:
                logger.debug("Reloading images")
                self.threaded_io("reload", detected_faces)

    def output_processing(self, faces, align_eyes, size, filename):
        """ Prepare faces for output """
        self.align_face(faces, align_eyes, size, filename)
        self.post_process.do_actions(faces)

        faces_count = len(faces["detected_faces"])
        if faces_count == 0:
            logger.verbose("No faces were detected in image: %s",
                           os.path.basename(filename))

        if not self.verify_output and faces_count > 1:
            self.verify_output = True

    def align_face(self, faces, align_eyes, size, filename):
        """ Align the detected face and add the destination file path """
        final_faces = list()
        image = faces["image"]
        landmarks = faces["landmarks"]
        detected_faces = faces["detected_faces"]
        for idx, face in enumerate(detected_faces):
            detected_face = DetectedFace()
            detected_face.from_bounding_box(face, image)
            detected_face.landmarksXY = landmarks[idx]
            detected_face.load_aligned(image, size=size, align_eyes=align_eyes)
            final_faces.append({"file_location": self.output_dir / Path(filename).stem,
                                "face": detected_face})
        faces["detected_faces"] = final_faces

    def output_faces(self, filename, faces):
        """ Output faces to save thread """
        final_faces = list()
        for idx, detected_face in enumerate(faces["detected_faces"]):
            output_file = detected_face["file_location"]
            extension = Path(filename).suffix
            out_filename = "{}_{}{}".format(str(output_file), str(idx), extension)

            face = detected_face["face"]
            resized_face = face.aligned_face

            face.hash, img = hash_encode_image(resized_face, extension)
            self.save_queue.put((out_filename, img))
            final_faces.append(face.to_alignment())
        self.alignments.data[os.path.basename(filename)] = final_faces
Example #18
0
class Extract():
    """ The extract process. """
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s", self.__class__.__name__,
                     arguments)
        self.args = arguments
        Utils.set_verbosity(self.args.loglevel)
        self.output_dir = get_folder(self.args.output_dir)
        logger.info("Output Directory: %s", self.args.output_dir)
        self.images = Images(self.args)
        self.alignments = Alignments(self.args, True, self.images.is_video)
        self.post_process = PostProcess(arguments)
        self.extractor = Extractor(self.args.detector, self.args.aligner,
                                   self.args.loglevel, self.args.multiprocess,
                                   self.args.rotate_images, self.args.min_size)

        self.save_queue = queue_manager.get_queue("extract_save")
        self.verify_output = False
        self.save_interval = None
        if hasattr(self.args, "save_interval"):
            self.save_interval = self.args.save_interval
        logger.debug("Initialized %s", self.__class__.__name__)

    @property
    def skip_num(self):
        """ Number of frames to skip if extract_every_n is passed """
        return self.args.extract_every_n if hasattr(self.args,
                                                    "extract_every_n") else 1

    def process(self):
        """ Perform the extraction process """
        logger.info('Starting, this may take a while...')
        # queue_manager.debug_monitor(3)
        self.threaded_io("load")
        save_thread = self.threaded_io("save")
        self.run_extraction()
        save_thread.join()
        self.alignments.save()
        Utils.finalize(self.images.images_found // self.skip_num,
                       self.alignments.faces_count, self.verify_output)

    def threaded_io(self, task, io_args=None):
        """ Perform I/O task in a background thread """
        logger.debug("Threading task: (Task: '%s')", task)
        io_args = tuple() if io_args is None else (io_args, )
        if task == "load":
            func = self.load_images
        elif task == "save":
            func = self.save_faces
        elif task == "reload":
            func = self.reload_images
        io_thread = MultiThread(func, *io_args, thread_count=1)
        io_thread.start()
        return io_thread

    def load_images(self):
        """ Load the images """
        logger.debug("Load Images: Start")
        load_queue = self.extractor.input_queue
        idx = 0
        for filename, image in self.images.load():
            idx += 1
            if load_queue.shutdown.is_set():
                logger.debug("Load Queue: Stop signal received. Terminating")
                break
            if idx % self.skip_num != 0:
                logger.trace("Skipping image '%s' due to extract_every_n = %s",
                             filename, self.skip_num)
                continue
            if image is None or not image.any():
                logger.warning("Unable to open image. Skipping: '%s'",
                               filename)
                continue
            imagename = os.path.basename(filename)
            if imagename in self.alignments.data.keys():
                logger.trace("Skipping image: '%s'", filename)
                continue
            item = {"filename": filename, "image": image}
            load_queue.put(item)
        load_queue.put("EOF")
        logger.debug("Load Images: Complete")

    def reload_images(self, detected_faces):
        """ Reload the images and pair to detected face """
        logger.debug("Reload Images: Start. Detected Faces Count: %s",
                     len(detected_faces))
        load_queue = self.extractor.input_queue
        for filename, image in self.images.load():
            if load_queue.shutdown.is_set():
                logger.debug("Reload Queue: Stop signal received. Terminating")
                break
            logger.trace("Reloading image: '%s'", filename)
            detect_item = detected_faces.pop(filename, None)
            if not detect_item:
                logger.warning("Couldn't find faces for: %s", filename)
                continue
            detect_item["image"] = image
            load_queue.put(detect_item)
        load_queue.put("EOF")
        logger.debug("Reload Images: Complete")

    def save_faces(self):
        """ Save the generated faces """
        logger.debug("Save Faces: Start")
        while True:
            if self.save_queue.shutdown.is_set():
                logger.debug("Save Queue: Stop signal received. Terminating")
                break
            item = self.save_queue.get()
            logger.trace(item)
            if item == "EOF":
                break
            filename, face = item

            logger.trace("Saving face: '%s'", filename)
            try:
                with open(filename, "wb") as out_file:
                    out_file.write(face)
            except Exception as err:  # pylint: disable=broad-except
                logger.error("Failed to save image '%s'. Original Error: %s",
                             filename, err)
                continue
        logger.debug("Save Faces: Complete")

    def process_item_count(self):
        """ Return the number of items to be processedd """
        processed = sum(
            os.path.basename(frame) in self.alignments.data.keys()
            for frame in self.images.input_images)
        logger.debug("Items already processed: %s", processed)

        if processed != 0 and self.args.skip_existing:
            logger.info("Skipping previously extracted frames: %s", processed)
        if processed != 0 and self.args.skip_faces:
            logger.info("Skipping frames with detected faces: %s", processed)

        to_process = (self.images.images_found - processed) // self.skip_num
        logger.debug("Items to be Processed: %s", to_process)
        if to_process == 0:
            logger.error("No frames to process. Exiting")
            queue_manager.terminate_queues()
            exit(0)
        return to_process

    def run_extraction(self):
        """ Run Face Detection """
        to_process = self.process_item_count()
        size = self.args.size if hasattr(self.args, "size") else 256
        align_eyes = self.args.align_eyes if hasattr(self.args,
                                                     "align_eyes") else False
        exception = False

        for phase in range(self.extractor.passes):
            if exception:
                break
            is_final = self.extractor.final_pass
            detected_faces = dict()
            self.extractor.launch()
            for idx, faces in enumerate(
                    tqdm(self.extractor.detected_faces(),
                         total=to_process,
                         file=sys.stdout,
                         desc="Running pass {} of {}: {}".format(
                             phase + 1, self.extractor.passes,
                             self.extractor.phase.title()))):

                exception = faces.get("exception", False)
                if exception:
                    break
                filename = faces["filename"]

                if self.extractor.final_pass:
                    self.output_processing(faces, align_eyes, size, filename)
                    self.output_faces(filename, faces)
                    if self.save_interval and (idx +
                                               1) % self.save_interval == 0:
                        self.alignments.save()
                else:
                    del faces["image"]
                    detected_faces[filename] = faces

            if is_final:
                logger.debug("Putting EOF to save")
                self.save_queue.put("EOF")
            else:
                logger.debug("Reloading images")
                self.threaded_io("reload", detected_faces)

    def output_processing(self, faces, align_eyes, size, filename):
        """ Prepare faces for output """
        self.align_face(faces, align_eyes, size, filename)
        self.post_process.do_actions(faces)

        faces_count = len(faces["detected_faces"])
        if faces_count == 0:
            logger.verbose("No faces were detected in image: %s",
                           os.path.basename(filename))

        if not self.verify_output and faces_count > 1:
            self.verify_output = True

    def align_face(self, faces, align_eyes, size, filename):
        """ Align the detected face and add the destination file path """
        final_faces = list()
        image = faces["image"]
        landmarks = faces["landmarks"]
        detected_faces = faces["detected_faces"]
        for idx, face in enumerate(detected_faces):
            detected_face = DetectedFace()
            detected_face.from_bounding_box(face, image)
            detected_face.landmarksXY = landmarks[idx]
            detected_face.load_aligned(image, size=size, align_eyes=align_eyes)
            final_faces.append({
                "file_location":
                self.output_dir / Path(filename).stem,
                "face":
                detected_face
            })
        faces["detected_faces"] = final_faces

    def output_faces(self, filename, faces):
        """ Output faces to save thread """
        final_faces = list()
        for idx, detected_face in enumerate(faces["detected_faces"]):
            output_file = detected_face["file_location"]
            extension = Path(filename).suffix
            out_filename = "{}_{}{}".format(str(output_file), str(idx),
                                            extension)

            face = detected_face["face"]
            resized_face = face.aligned_face

            face.hash, img = hash_encode_image(resized_face, extension)
            self.save_queue.put((out_filename, img))
            final_faces.append(face.to_alignment())
        self.alignments.data[os.path.basename(filename)] = final_faces
Example #19
0
class Extract():  # pylint:disable=too-few-public-methods
    """ Re-extract faces from source frames based on Alignment data

    Parameters
    ----------
    alignments: :class:`tools.lib_alignments.media.AlignmentData`
        The alignments data loaded from an alignments file for this rename job
    arguments: :class:`argparse.Namespace`
        The :mod:`argparse` arguments as passed in from :mod:`tools.py`
    """
    def __init__(self, alignments, arguments):
        logger.debug("Initializing %s: (arguments: %s)",
                     self.__class__.__name__, arguments)
        self._arguments = arguments
        self._alignments = alignments
        self._is_legacy = self._alignments.version == 1.0  # pylint:disable=protected-access
        self._mask_pipeline = None
        self._faces_dir = arguments.faces_dir
        self._frames = Frames(arguments.frames_dir)
        self._extracted_faces = ExtractedFaces(self._frames,
                                               self._alignments,
                                               size=arguments.size)
        self._saver = None
        logger.debug("Initialized %s", self.__class__.__name__)

    def process(self):
        """ Run the re-extraction from Alignments file process"""
        logger.info("[EXTRACT FACES]")  # Tidy up cli output
        self._check_folder()
        if self._is_legacy:
            self._legacy_check()
        self._saver = ImagesSaver(self._faces_dir, as_bytes=True)
        self._export_faces()

    def _check_folder(self):
        """ Check that the faces folder doesn't pre-exist and create. """
        err = None
        if not self._faces_dir:
            err = "ERROR: Output faces folder not provided."
        elif not os.path.isdir(self._faces_dir):
            logger.debug("Creating folder: '%s'", self._faces_dir)
            os.makedirs(self._faces_dir)
        elif os.listdir(self._faces_dir):
            err = "ERROR: Output faces folder should be empty: '{}'".format(
                self._faces_dir)
        if err:
            logger.error(err)
            sys.exit(0)
        logger.verbose("Creating output folder at '%s'", self._faces_dir)

    def _legacy_check(self):
        """ Check whether the alignments file was created with the legacy extraction method.

        If so, force user to re-extract all faces if any options have been specified, otherwise
        raise the appropriate warnings and set the legacy options.
        """
        if self._arguments.large or self._arguments.extract_every_n != 1:
            logger.warning(
                "This alignments file was generated with the legacy extraction method."
            )
            logger.warning(
                "You should run this extraction job, but with 'large' deselected and "
                "'extract-every-n' set to 1 to update the alignments file.")
            logger.warning(
                "You can then re-run this extraction job with your chosen options."
            )
            sys.exit(0)

        maskers = ["components", "extended"]
        nn_masks = [
            mask for mask in list(self._alignments.mask_summary)
            if mask not in maskers
        ]
        logtype = logger.warning if nn_masks else logger.info
        logtype(
            "This alignments file was created with the legacy extraction method and will be "
            "updated.")
        logtype(
            "Faces will be extracted using the new method and landmarks based masks will be "
            "regenerated.")
        if nn_masks:
            logtype(
                "However, the NN based masks '%s' will be cropped to the legacy extraction "
                "method, so you may want to run the mask tool to regenerate these "
                "masks.", "', '".join(nn_masks))
        self._mask_pipeline = Extractor(None, None, maskers, multiprocess=True)
        self._mask_pipeline.launch()
        # Update alignments versioning
        self._alignments._version = _VERSION  # pylint:disable=protected-access

    def _export_faces(self):
        """ Export the faces to the output folder. """
        extracted_faces = 0
        skip_list = self._set_skip_list()
        count = self._frames.count if skip_list is None else self._frames.count - len(
            skip_list)
        for filename, image in tqdm(self._frames.stream(skip_list=skip_list),
                                    total=count,
                                    desc="Saving extracted faces"):
            frame_name = os.path.basename(filename)
            if not self._alignments.frame_exists(frame_name):
                logger.verbose("Skipping '%s' - Alignments not found",
                               frame_name)
                continue
            extracted_faces += self._output_faces(frame_name, image)
        if self._is_legacy and extracted_faces != 0 and not self._arguments.large:
            self._alignments.save()
        logger.info("%s face(s) extracted", extracted_faces)

    def _set_skip_list(self):
        """ Set the indices for frames that should be skipped based on the `extract_every_n`
        command line option.

        Returns
        -------
        list or ``None``
            A list of indices to be skipped if extract_every_n is not `1` otherwise
            returns ``None``
        """
        skip_num = self._arguments.extract_every_n
        if skip_num == 1:
            logger.debug("Not skipping any frames")
            return None
        skip_list = []
        for idx, item in enumerate(self._frames.file_list_sorted):
            if idx % skip_num != 0:
                logger.trace(
                    "Adding image '%s' to skip list due to extract_every_n = %s",
                    item["frame_fullname"], skip_num)
                skip_list.append(idx)
        logger.debug("Adding skip list: %s", skip_list)
        return skip_list

    def _output_faces(self, filename, image):
        """ For each frame save out the faces

        Parameters
        ----------
        filename: str
            The filename (without the full path) of the current frame
        image: :class:`numpy.ndarray`
            The full frame that faces are to be extracted from

        Returns
        -------
        int
            The total number of faces that have been extracted
        """
        logger.trace("Outputting frame: %s", filename)
        face_count = 0
        frame_name = os.path.splitext(filename)[0]
        faces = self._select_valid_faces(filename, image)
        if not faces:
            return face_count
        if self._is_legacy:
            faces = self._process_legacy(filename, image, faces)

        for idx, face in enumerate(faces):
            output = "{}_{}.png".format(frame_name, str(idx))
            meta = dict(alignments=face.to_png_meta(),
                        source=dict(
                            alignments_version=self._alignments.version,
                            original_filename=output,
                            face_index=idx,
                            source_filename=filename,
                            source_is_video=self._frames.is_video))
            self._saver.save(
                output, encode_image(face.aligned.face, ".png", metadata=meta))
            if not self._arguments.large and self._is_legacy:
                face.thumbnail = generate_thumbnail(face.aligned.face,
                                                    size=96,
                                                    quality=60)
                self._alignments.data[filename]["faces"][
                    idx] = face.to_alignment()
            face_count += 1
        self._saver.close()
        return face_count

    def _select_valid_faces(self, frame, image):
        """ Return the aligned faces from a frame that meet the selection criteria,

        Parameters
        ----------
        frame: str
            The filename (without the full path) of the current frame
        image: :class:`numpy.ndarray`
            The full frame that faces are to be extracted from

        Returns
        -------
        list:
            List of valid :class:`lib,align.DetectedFace` objects
        """
        faces = self._extracted_faces.get_faces_in_frame(frame, image=image)
        if not self._arguments.large:
            valid_faces = faces
        else:
            sizes = self._extracted_faces.get_roi_size_for_frame(frame)
            valid_faces = [
                faces[idx] for idx, size in enumerate(sizes)
                if size >= self._extracted_faces.size
            ]
        logger.trace("frame: '%s', total_faces: %s, valid_faces: %s", frame,
                     len(faces), len(valid_faces))
        return valid_faces

    def _process_legacy(self, filename, image, detected_faces):
        """ Process legacy face extractions to new extraction method.

        Updates stored masks to new extract size

        Parameters
        ----------
        filename: str
            The current frame filename
        image: :class:`numpy.ndarray`
            The current image the contains the faces
        detected_faces: list
            list of :class:`lib.align.DetectedFace` objects for the current frame
        """
        # Update landmarks based masks for face centering
        mask_item = ExtractMedia(filename,
                                 image,
                                 detected_faces=detected_faces)
        self._mask_pipeline.input_queue.put(mask_item)
        faces = next(self._mask_pipeline.detected_faces()).detected_faces

        # Pad and shift Neural Network based masks to face centering
        for face in faces:
            self._pad_legacy_masks(face)
        return faces

    @classmethod
    def _pad_legacy_masks(cls, detected_face):
        """ Recenter legacy Neural Network based masks from legacy centering to face centering
        and pad accordingly.

        Update the masks back into the detected face objects.

        Parameters
        ----------
        detected_face: :class:`lib.align.DetectedFace`
            The detected face to update the masks for
        """
        offset = detected_face.aligned.pose.offset["face"]
        for name, mask in detected_face.mask.items(
        ):  # Re-center mask and pad to face size
            if name in ("components", "extended"):
                continue
            old_mask = mask.mask.astype("float32") / 255.0
            size = old_mask.shape[0]
            new_size = int(size + (size * _EXTRACT_RATIOS["face"]) / 2)

            shift = np.rint(offset *
                            (size -
                             (size * _EXTRACT_RATIOS["face"]))).astype("int32")
            pos = np.array([(new_size // 2 - size // 2) - shift[1],
                            (new_size // 2) + (size // 2) - shift[1],
                            (new_size // 2 - size // 2) - shift[0],
                            (new_size // 2) + (size // 2) - shift[0]])
            bounds = np.array([
                max(0, pos[0]),
                min(new_size, pos[1]),
                max(0, pos[2]),
                min(new_size, pos[3])
            ])

            slice_in = [
                slice(0 - (pos[0] - bounds[0]), size - (pos[1] - bounds[1])),
                slice(0 - (pos[2] - bounds[2]), size - (pos[3] - bounds[3]))
            ]
            slice_out = [
                slice(bounds[0], bounds[1]),
                slice(bounds[2], bounds[3])
            ]

            new_mask = np.zeros((new_size, new_size, 1), dtype="float32")
            new_mask[slice_out[0], slice_out[1], :] = old_mask[slice_in[0],
                                                               slice_in[1], :]

            mask.replace_mask(new_mask)
            # Get the affine matrix from recently generated components mask
            # pylint:disable=protected-access
            mask._affine_matrix = detected_face.mask[
                "components"].affine_matrix
Example #20
0
 def launch_aligner():
     """ Load the aligner plugin to retrieve landmarks """
     extractor = Extractor(None, "fan", None, normalize_method="hist")
     extractor.set_batchsize("align", 1)
     extractor.launch()
     return extractor