Пример #1
0
 def start_thread(self):
     """ Put the training process in a thread so we can keep control """
     logger.debug("Launching Trainer thread")
     thread = MultiThread(target=self.training)
     thread.start()
     logger.debug("Launched Trainer thread")
     return thread
Пример #2
0
 def detect_faces(self, *args, **kwargs):
     """ Detect faces in Multiple Threads """
     super().detect_faces(*args, **kwargs)
     workers = MultiThread(target=self.detect_thread, thread_count=self.batch_size)
     workers.start()
     workers.join()
     sentinel = self.queues["in"].get()
     self.queues["out"].put(sentinel)
     logger.debug("Detecting Faces complete")
Пример #3
0
 def start_thread(self, task):
     """ Start the DiskIO thread """
     logger.debug("Starting thread: '%s'", task)
     args = self.completion_event if task == "save" else None
     func = getattr(self, task)
     io_thread = MultiThread(func, args, thread_count=1)
     io_thread.start()
     setattr(self, "{}_thread".format(task), io_thread)
     logger.debug("Started thread: '%s'", task)
Пример #4
0
 def threaded_io(self, task, io_args=None):
     """ Perform I/O task in a background thread """
     logger.debug("Threading task: (Task: '%s')", task)
     io_args = tuple() if io_args is None else (io_args, )
     if task == "load":
         func = self.load_images
     elif task == "save":
         func = self.save_faces
     elif task == "reload":
         func = self.reload_images
     io_thread = MultiThread(func, *io_args, thread_count=1)
     io_thread.start()
     return io_thread
Пример #5
0
    def __init__(self, in_queue, queue_size, arguments):
        logger.debug("Initializing %s: (args: %s, queue_size: %s, in_queue: %s)",
                     self.__class__.__name__, arguments, queue_size, in_queue)
        self.batchsize = min(queue_size, 16)
        self.args = arguments
        self.in_queue = in_queue
        self.out_queue = queue_manager.get_queue("patch")
        self.serializer = Serializer.get_serializer("json")
        self.faces_count = 0
        self.verify_output = False
        self.model = self.load_model()
        self.predictor = self.model.converter(self.args.swap_model)
        self.queues = dict()

        self.thread = MultiThread(self.predict_faces, thread_count=1)
        self.thread.start()
        logger.debug("Initialized %s: (out_queue: %s)", self.__class__.__name__, self.out_queue)
Пример #6
0
class Predict():
    """ Predict faces from incoming queue """
    def __init__(self, in_queue, queue_size, arguments):
        logger.debug(
            "Initializing %s: (args: %s, queue_size: %s, in_queue: %s)",
            self.__class__.__name__, arguments, queue_size, in_queue)
        self.batchsize = self.get_batchsize(queue_size)
        self.args = arguments
        self.in_queue = in_queue
        self.out_queue = queue_manager.get_queue("patch")
        self.serializer = Serializer.get_serializer("json")
        self.faces_count = 0
        self.verify_output = False
        self.model = self.load_model()
        self.output_indices = {
            "face": self.model.largest_face_index,
            "mask": self.model.largest_mask_index
        }
        self.predictor = self.model.converter(self.args.swap_model)
        self.queues = dict()

        self.thread = MultiThread(self.predict_faces, thread_count=1)
        self.thread.start()
        logger.debug("Initialized %s: (out_queue: %s)",
                     self.__class__.__name__, self.out_queue)

    @property
    def coverage_ratio(self):
        """ Return coverage ratio from training options """
        return self.model.training_opts["coverage_ratio"]

    @property
    def input_size(self):
        """ Return the model input size """
        return self.model.input_shape[0]

    @property
    def output_size(self):
        """ Return the model output size """
        return self.model.output_shape[0]

    @property
    def input_mask(self):
        """ Return the input mask """
        mask = np.zeros((1, ) + self.model.state.mask_shapes[0],
                        dtype="float32")
        return mask

    @property
    def has_predicted_mask(self):
        """ Return whether this model has a predicted mask """
        return bool(self.model.state.mask_shapes)

    @staticmethod
    def get_batchsize(queue_size):
        """ Get the batchsize """
        logger.debug("Getting batchsize")
        is_cpu = GPUStats().device_count == 0
        batchsize = 1 if is_cpu else 16
        batchsize = min(queue_size, batchsize)
        logger.debug("Batchsize: %s", batchsize)
        logger.debug("Got batchsize: %s", batchsize)
        return batchsize

    def load_model(self):
        """ Load the model requested for conversion """
        logger.debug("Loading Model")
        model_dir = get_folder(self.args.model_dir, make_folder=False)
        if not model_dir:
            logger.error("%s does not exist.", self.args.model_dir)
            exit(1)
        trainer = self.get_trainer(model_dir)
        gpus = 1 if not hasattr(self.args, "gpus") else self.args.gpus
        model = PluginLoader.get_model(trainer)(model_dir, gpus, predict=True)
        logger.debug("Loaded Model")
        return model

    def get_trainer(self, model_dir):
        """ Return the trainer name if provided, or read from state file """
        if hasattr(self.args, "trainer") and self.args.trainer:
            logger.debug("Trainer name provided: '%s'", self.args.trainer)
            return self.args.trainer

        statefile = [
            fname for fname in os.listdir(str(model_dir))
            if fname.endswith("_state.json")
        ]
        if len(statefile) != 1:
            logger.error(
                "There should be 1 state file in your model folder. %s were found. "
                "Specify a trainer with the '-t', '--trainer' option.",
                len(statefile))
            exit(1)
        statefile = os.path.join(str(model_dir), statefile[0])

        with open(statefile, "rb") as inp:
            state = self.serializer.unmarshal(inp.read().decode("utf-8"))
            trainer = state.get("name", None)

        if not trainer:
            logger.error(
                "Trainer name could not be read from state file. "
                "Specify a trainer with the '-t', '--trainer' option.")
            exit(1)
        logger.debug("Trainer from state file: '%s'", trainer)
        return trainer

    def predict_faces(self):
        """ Get detected faces from images """
        faces_seen = 0
        consecutive_no_faces = 0
        batch = list()
        is_plaidml = GPUStats().is_plaidml
        while True:
            item = self.in_queue.get()
            if item != "EOF":
                logger.trace("Got from queue: '%s'", item["filename"])
                faces_count = len(item["detected_faces"])

                # Safety measure. If a large stream of frames appear that do not have faces,
                # these will stack up into RAM. Keep a count of consecutive frames with no faces.
                # If self.batchsize number of frames appear, force the current batch through
                # to clear RAM.
                consecutive_no_faces = consecutive_no_faces + 1 if faces_count == 0 else 0
                self.faces_count += faces_count
                if faces_count > 1:
                    self.verify_output = True
                    logger.verbose(
                        "Found more than one face in an image! '%s'",
                        os.path.basename(item["filename"]))

                self.load_aligned(item)

                faces_seen += faces_count
                batch.append(item)

            if item != "EOF" and (faces_seen < self.batchsize
                                  and consecutive_no_faces < self.batchsize):
                logger.trace(
                    "Continuing. Current batchsize: %s, consecutive_no_faces: %s",
                    faces_seen, consecutive_no_faces)
                continue

            if batch:
                logger.trace("Batching to predictor. Frames: %s, Faces: %s",
                             len(batch), faces_seen)
                detected_batch = [
                    detected_face for item in batch
                    for detected_face in item["detected_faces"]
                ]
                if faces_seen != 0:
                    feed_faces = self.compile_feed_faces(detected_batch)
                    batch_size = None
                    if is_plaidml and feed_faces.shape[0] != self.batchsize:
                        logger.verbose("Fallback to BS=1")
                        batch_size = 1
                    predicted = self.predict(feed_faces, batch_size)
                else:
                    predicted = list()

                self.queue_out_frames(batch, predicted)

            consecutive_no_faces = 0
            faces_seen = 0
            batch = list()
            if item == "EOF":
                logger.debug("EOF Received")
                break
        logger.debug("Putting EOF")
        self.out_queue.put("EOF")
        logger.debug("Load queue complete")

    def load_aligned(self, item):
        """ Load the feed faces and reference output faces """
        logger.trace("Loading aligned faces: '%s'", item["filename"])
        for detected_face in item["detected_faces"]:
            detected_face.load_feed_face(item["image"],
                                         size=self.input_size,
                                         coverage_ratio=self.coverage_ratio,
                                         dtype="float32")
            if self.input_size == self.output_size:
                detected_face.reference = detected_face.feed
            else:
                detected_face.load_reference_face(
                    item["image"],
                    size=self.output_size,
                    coverage_ratio=self.coverage_ratio,
                    dtype="float32")
        logger.trace("Loaded aligned faces: '%s'", item["filename"])

    @staticmethod
    def compile_feed_faces(detected_faces):
        """ Compile the faces for feeding into the predictor """
        logger.trace("Compiling feed face. Batchsize: %s", len(detected_faces))
        feed_faces = np.stack(
            [detected_face.feed_face for detected_face in detected_faces])
        logger.trace("Compiled Feed faces. Shape: %s", feed_faces.shape)
        return feed_faces

    def predict(self, feed_faces, batch_size=None):
        """ Perform inference on the feed """
        logger.trace("Predicting: Batchsize: %s", len(feed_faces))
        feed = [feed_faces]
        if self.has_predicted_mask:
            feed.append(np.repeat(self.input_mask, feed_faces.shape[0],
                                  axis=0))
        logger.trace("Input shape(s): %s", [item.shape for item in feed])

        predicted = self.predictor(feed, batch_size=batch_size)
        predicted = predicted if isinstance(predicted, list) else [predicted]
        logger.trace("Output shape(s): %s",
                     [predict.shape for predict in predicted])

        predicted = self.filter_multi_out(predicted)

        # Compile masks into alpha channel or keep raw faces
        predicted = np.concatenate(
            predicted, axis=-1) if len(predicted) == 2 else predicted[0]
        predicted = predicted.astype("float32")

        logger.trace("Final shape: %s", predicted.shape)
        return predicted

    def filter_multi_out(self, predicted):
        """ Filter the predicted output to the final output """
        if not predicted:
            return predicted
        face = predicted[self.output_indices["face"]]
        mask_idx = self.output_indices["mask"]
        mask = predicted[mask_idx] if mask_idx is not None else None
        predicted = [face, mask] if mask is not None else [face]
        logger.trace("Filtered output shape(s): %s",
                     [predict.shape for predict in predicted])
        return predicted

    def queue_out_frames(self, batch, swapped_faces):
        """ Compile the batch back to original frames and put to out_queue """
        logger.trace("Queueing out batch. Batchsize: %s", len(batch))
        pointer = 0
        for item in batch:
            num_faces = len(item["detected_faces"])
            if num_faces == 0:
                item["swapped_faces"] = np.array(list())
            else:
                item["swapped_faces"] = swapped_faces[pointer:pointer +
                                                      num_faces]

            logger.trace(
                "Putting to queue. ('%s', detected_faces: %s, swapped_faces: %s)",
                item["filename"], len(item["detected_faces"]),
                item["swapped_faces"].shape[0])
            pointer += num_faces
        self.out_queue.put(batch)
        logger.trace("Queued out batch. Batchsize: %s", len(batch))
Пример #7
0
 def in_thread(self, action):
     """ Perform selected action inside a thread """
     logger.debug("Performing help action: %s", action)
     thread = MultiThread(getattr(self, action), thread_count=1)
     thread.start()
     logger.debug("Performed help action: %s", action)
Пример #8
0
class Convert():  # pylint:disable=too-few-public-methods
    """ The Faceswap Face Conversion Process.

    The conversion process is responsible for swapping the faces on source frames with the output
    from a trained model.

    It leverages a series of user selected post-processing plugins, executed from
    :class:`lib.convert.Converter`.

    The convert process is self contained and should not be referenced by any other scripts, so it
    contains no public properties.

    Parameters
    ----------
    arguments: :class:`argparse.Namespace`
        The arguments to be passed to the convert process as generated from Faceswap's command
        line arguments
    """
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s)", self.__class__.__name__,
                     arguments)
        self._args = arguments

        self._patch_threads = None
        self._images = ImagesLoader(self._args.input_dir, fast_count=True)
        self._alignments = Alignments(self._args, False, self._images.is_video)

        self._opts = OptionalActions(self._args, self._images.file_list,
                                     self._alignments)

        self._add_queues()
        self._disk_io = DiskIO(self._alignments, self._images, arguments)
        self._predictor = Predict(self._disk_io.load_queue, self._queue_size,
                                  arguments)
        self._validate()
        get_folder(self._args.output_dir)

        configfile = self._args.configfile if hasattr(self._args,
                                                      "configfile") else None
        self._converter = Converter(self._predictor.output_size,
                                    self._predictor.coverage_ratio,
                                    self._disk_io.draw_transparent,
                                    self._disk_io.pre_encode,
                                    arguments,
                                    configfile=configfile)

        logger.debug("Initialized %s", self.__class__.__name__)

    @property
    def _queue_size(self):
        """ int: Size of the converter queues. 16 for single process otherwise 32 """
        if self._args.singleprocess:
            retval = 16
        else:
            retval = 32
        logger.debug(retval)
        return retval

    @property
    def _pool_processes(self):
        """ int: The number of threads to run in parallel. Based on user options and number of
        available processors. """
        if self._args.singleprocess:
            retval = 1
        elif self._args.jobs > 0:
            retval = min(self._args.jobs, total_cpus(), self._images.count)
        else:
            retval = min(total_cpus(), self._images.count)
        retval = 1 if retval == 0 else retval
        logger.debug(retval)
        return retval

    def _validate(self):
        """ Validate the Command Line Options.

        Ensure that certain cli selections are valid and won't result in an error. Checks:
            * If frames have been passed in with video output, ensure user supplies reference
            video.
            * If a mask-type is selected, ensure it exists in the alignments file.
            * If a predicted mask-type is selected, ensure model has been trained with a mask
            otherwise attempt to select first available masks, otherwise raise error.

        Raises
        ------
        FaceswapError
            If an invalid selection has been found.

        """
        if (self._args.writer == "ffmpeg" and not self._images.is_video
                and self._args.reference_video is None):
            raise FaceswapError(
                "Output as video selected, but using frames as input. You must "
                "provide a reference video ('-ref', '--reference-video').")
        if (self._args.mask_type not in ("none", "predicted")
                and not self._alignments.mask_is_valid(self._args.mask_type)):
            msg = (
                "You have selected the Mask Type `{}` but at least one face does not have this "
                "mask stored in the Alignments File.\nYou should generate the required masks "
                "with the Mask Tool or set the Mask Type option to an existing Mask Type.\nA "
                "summary of existing masks is as follows:\nTotal faces: {}, Masks: "
                "{}".format(self._args.mask_type, self._alignments.faces_count,
                            self._alignments.mask_summary))
            raise FaceswapError(msg)
        if self._args.mask_type == "predicted" and not self._predictor.has_predicted_mask:
            available_masks = [
                k for k, v in self._alignments.mask_summary.items()
                if k != "none" and v == self._alignments.faces_count
            ]
            if not available_masks:
                msg = (
                    "Predicted Mask selected, but the model was not trained with a mask and no "
                    "masks are stored in the Alignments File.\nYou should generate the "
                    "required masks with the Mask Tool or set the Mask Type to `none`."
                )
                raise FaceswapError(msg)
            mask_type = available_masks[0]
            logger.warning(
                "Predicted Mask selected, but the model was not trained with a "
                "mask. Selecting first available mask: '%s'", mask_type)
            self._args.mask_type = mask_type

    def _add_queues(self):
        """ Add the queues for in, patch and out. """
        logger.debug("Adding queues. Queue size: %s", self._queue_size)
        for qname in ("convert_in", "convert_out", "patch"):
            queue_manager.add_queue(qname, self._queue_size)

    def process(self):
        """ The entry point for triggering the Conversion Process.

        Should only be called from  :class:`lib.cli.launcher.ScriptExecutor`
        """
        logger.debug("Starting Conversion")
        # queue_manager.debug_monitor(5)
        try:
            self._convert_images()
            self._disk_io.save_thread.join()
            queue_manager.terminate_queues()

            finalize(self._images.count, self._predictor.faces_count,
                     self._predictor.verify_output)
            logger.debug("Completed Conversion")
        except MemoryError as err:
            msg = (
                "Faceswap ran out of RAM running convert. Conversion is very system RAM "
                "heavy, so this can happen in certain circumstances when you have a lot of "
                "cpus but not enough RAM to support them all."
                "\nYou should lower the number of processes in use by either setting the "
                "'singleprocess' flag (-sp) or lowering the number of parallel jobs (-j)."
            )
            raise FaceswapError(msg) from err

    def _convert_images(self):
        """ Start the multi-threaded patching process, monitor all threads for errors and join on
        completion. """
        logger.debug("Converting images")
        save_queue = queue_manager.get_queue("convert_out")
        patch_queue = queue_manager.get_queue("patch")
        self._patch_threads = MultiThread(self._converter.process,
                                          patch_queue,
                                          save_queue,
                                          thread_count=self._pool_processes,
                                          name="patch")

        self._patch_threads.start()
        while True:
            self._check_thread_error()
            if self._disk_io.completion_event.is_set():
                logger.debug("DiskIO completion event set. Joining Pool")
                break
            if self._patch_threads.completed():
                logger.debug("All patch threads completed")
                break
            sleep(1)
        self._patch_threads.join()

        logger.debug("Putting EOF")
        save_queue.put("EOF")
        logger.debug("Converted images")

    def _check_thread_error(self):
        """ Monitor all running threads for errors, and raise accordingly. """
        for thread in (self._predictor.thread, self._disk_io.load_thread,
                       self._disk_io.save_thread, self._patch_threads):
            thread.check_and_raise_error()
Пример #9
0
class Convert():
    """ The convert process. """
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s)", self.__class__.__name__,
                     arguments)
        self.args = arguments
        Utils.set_verbosity(self.args.loglevel)

        self.patch_threads = None
        self.images = Images(self.args)
        self.validate()
        self.alignments = Alignments(self.args, False, self.images.is_video)
        # Update Legacy alignments
        Legacy(self.alignments, self.images.input_images,
               arguments.input_aligned_dir)
        self.opts = OptionalActions(self.args, self.images.input_images,
                                    self.alignments)

        self.add_queues()
        self.disk_io = DiskIO(self.alignments, self.images, arguments)
        self.predictor = Predict(self.disk_io.load_queue, self.queue_size,
                                 arguments)

        configfile = self.args.configfile if hasattr(self.args,
                                                     "configfile") else None
        self.converter = Converter(get_folder(self.args.output_dir),
                                   self.predictor.output_size,
                                   self.predictor.has_predicted_mask,
                                   self.disk_io.draw_transparent,
                                   self.disk_io.pre_encode,
                                   arguments,
                                   configfile=configfile)

        logger.debug("Initialized %s", self.__class__.__name__)

    @property
    def queue_size(self):
        """ Set 16 for singleprocess otherwise 32 """
        if self.args.singleprocess:
            retval = 16
        else:
            retval = 32
        logger.debug(retval)
        return retval

    @property
    def pool_processes(self):
        """ return the maximum number of pooled processes to use """
        if self.args.singleprocess:
            retval = 1
        elif self.args.jobs > 0:
            retval = min(self.args.jobs, total_cpus(),
                         self.images.images_found)
        else:
            retval = min(total_cpus(), self.images.images_found)
        retval = 1 if retval == 0 else retval
        logger.debug(retval)
        return retval

    def validate(self):
        """ Make the output folder if it doesn't exist and check that video flag is
            a valid choice """
        if (self.args.writer == "ffmpeg" and not self.images.is_video
                and self.args.reference_video is None):
            raise FaceswapError(
                "Output as video selected, but using frames as input. You must "
                "provide a reference video ('-ref', '--reference-video').")
        output_dir = get_folder(self.args.output_dir)
        logger.info("Output Directory: %s", output_dir)

    def add_queues(self):
        """ Add the queues for convert """
        logger.debug("Adding queues. Queue size: %s", self.queue_size)
        for qname in ("convert_in", "convert_out", "patch"):
            queue_manager.add_queue(qname,
                                    self.queue_size,
                                    multiprocessing_queue=False)

    def process(self):
        """ Process the conversion """
        logger.debug("Starting Conversion")
        # queue_manager.debug_monitor(5)
        try:
            self.convert_images()
            self.disk_io.save_thread.join()
            queue_manager.terminate_queues()

            Utils.finalize(self.images.images_found,
                           self.predictor.faces_count,
                           self.predictor.verify_output)
            logger.debug("Completed Conversion")
        except MemoryError as err:
            msg = (
                "Faceswap ran out of RAM running convert. Conversion is very system RAM "
                "heavy, so this can happen in certain circumstances when you have a lot of "
                "cpus but not enough RAM to support them all."
                "\nYou should lower the number of processes in use by either setting the "
                "'singleprocess' flag (-sp) or lowering the number of parallel jobs (-j)."
            )
            raise FaceswapError(msg) from err

    def convert_images(self):
        """ Convert the images """
        logger.debug("Converting images")
        save_queue = queue_manager.get_queue("convert_out")
        patch_queue = queue_manager.get_queue("patch")
        self.patch_threads = MultiThread(self.converter.process,
                                         patch_queue,
                                         save_queue,
                                         thread_count=self.pool_processes,
                                         name="patch")

        self.patch_threads.start()
        while True:
            self.check_thread_error()
            if self.disk_io.completion_event.is_set():
                logger.debug("DiskIO completion event set. Joining Pool")
                break
            sleep(1)
        self.patch_threads.join()

        logger.debug("Putting EOF")
        save_queue.put("EOF")
        logger.debug("Converted images")

    def check_thread_error(self):
        """ Check and raise thread errors """
        for thread in (self.predictor.thread, self.disk_io.load_thread,
                       self.disk_io.save_thread, self.patch_threads):
            thread.check_and_raise_error()
Пример #10
0
class ImageIO():
    """ Perform disk IO for images or videos in a background thread.

    This is the parent thread for :class:`ImagesLoader` and :class:`ImagesSaver` and should not
    be called directly.

    Parameters
    ----------
    path: str or list
        The path to load or save images to/from. For loading this can be a folder which contains
        images, video file or a list of image files. For saving this must be an existing folder.
    queue_size: int
        The amount of images to hold in the internal buffer.
    args: tuple, optional
        The arguments to be passed to the loader or saver thread. Default: ``None``

    See Also
    --------
    lib.image.ImagesLoader : Background Image Loader inheriting from this class.
    lib.image.ImagesSaver : Background Image Saver inheriting from this class.
    """
    def __init__(self, path, queue_size, args=None):
        logger.debug("Initializing %s: (path: %s, queue_size: %s, args: %s)",
                     self.__class__.__name__, path, queue_size, args)

        self._args = tuple() if args is None else args

        self._location = path
        self._check_location_exists()

        self._queue = queue_manager.get_queue(name=self.__class__.__name__,
                                              maxsize=queue_size)
        self._thread = None

    @property
    def location(self):
        """ str: The folder or video that was passed in as the :attr:`path` parameter. """
        return self._location

    def _check_location_exists(self):
        """ Check whether the input location exists.

        Raises
        ------
        FaceswapError
            If the given location does not exist
        """
        if isinstance(self.location,
                      str) and not os.path.exists(self.location):
            raise FaceswapError("The location '{}' does not exist".format(
                self.location))
        if isinstance(self.location, (list, tuple)) and not all(
                os.path.exists(location) for location in self.location):
            raise FaceswapError("Not all locations in the input list exist")

    def _set_thread(self):
        """ Set the background thread for the load and save iterators and launch it. """
        logger.debug("Setting thread")
        if self._thread is not None and self._thread.is_alive():
            logger.debug("Thread pre-exists and is alive: %s", self._thread)
            return
        self._thread = MultiThread(self._process,
                                   self._queue,
                                   name=self.__class__.__name__,
                                   thread_count=1)
        logger.debug("Set thread: %s", self._thread)
        self._thread.start()

    def _process(self, queue):
        """ Image IO process to be run in a thread. Override for loader/saver process.

        Parameters
        ----------
        queue: queue.Queue()
            The ImageIO Queue
        """
        raise NotImplementedError

    def close(self):
        """ Closes down and joins the internal threads """
        logger.debug("Received Close")
        if self._thread is not None:
            self._thread.join()
        self._thread = None
        logger.debug("Closed")
Пример #11
0
class Patch():
    """ The patch pipeline
        To be run within it's own thread """
    def __init__(self, arguments, samples, display, lock, trigger,
                 config_tools, tk_vars):
        logger.debug(
            "Initializing %s: (arguments: '%s', samples: %s: display: %s, lock: %s,"
            " trigger: %s, config_tools: %s, tk_vars %s)",
            self.__class__.__name__, arguments, samples, display, lock,
            trigger, config_tools, tk_vars)
        self.samples = samples
        self.queue_patch_in = queue_manager.get_queue("preview_patch_in")
        self.display = display
        self.lock = lock
        self.trigger = trigger
        self.current_config = config_tools.config
        self.converter_arguments = None  # Updated converter arguments dict

        configfile = arguments.configfile if hasattr(arguments,
                                                     "configfile") else None
        self.converter = Converter(
            output_dir=None,
            output_size=self.samples.predictor.output_size,
            output_has_mask=self.samples.predictor.has_predicted_mask,
            draw_transparent=False,
            pre_encode=None,
            configfile=configfile,
            arguments=self.generate_converter_arguments(arguments))

        self.shutdown = Event()

        self.thread = MultiThread(self.process,
                                  self.trigger,
                                  self.shutdown,
                                  self.queue_patch_in,
                                  self.samples,
                                  tk_vars,
                                  thread_count=1,
                                  name="patch_thread")
        self.thread.start()

    @staticmethod
    def generate_converter_arguments(arguments):
        """ Get the default converter arguments """
        converter_arguments = ConvertArgs(None,
                                          "convert").get_optional_arguments()
        for item in converter_arguments:
            value = item.get("default", None)
            # Skip options without a default value
            if value is None:
                continue
            option = item.get("dest", item["opts"][1].replace("--", ""))
            # Skip options already in arguments
            if hasattr(arguments, option):
                continue
            # Add option to arguments
            setattr(arguments, option, value)
        logger.debug(arguments)
        return arguments

    def process(self, trigger_event, shutdown_event, patch_queue_in, samples,
                tk_vars):
        """ Wait for event trigger and run when process when set """
        patch_queue_out = queue_manager.get_queue("preview_patch_out")
        while True:
            trigger = trigger_event.wait(1)
            if shutdown_event.is_set():
                logger.debug("Shutdown received")
                break
            if not trigger:
                continue
            # Clear trigger so calling process can set it during this run
            trigger_event.clear()
            tk_vars["busy"].set(True)
            queue_manager.flush_queue("preview_patch_in")
            self.feed_swapped_faces(patch_queue_in, samples)
            with self.lock:
                self.update_converter_arguments()
                self.converter.reinitialize(config=self.current_config)
            swapped = self.patch_faces(patch_queue_in, patch_queue_out,
                                       samples.sample_size)
            with self.lock:
                self.display.destination = swapped
            tk_vars["refresh"].set(True)
            tk_vars["busy"].set(False)

    def update_converter_arguments(self):
        """ Update the converter arguments """
        logger.debug("Updating Converter cli arguments")
        if self.converter_arguments is None:
            logger.debug("No arguments to update")
            return
        for key, val in self.converter_arguments.items():
            logger.debug("Updating %s to %s", key, val)
            setattr(self.converter.args, key, val)
        logger.debug("Updated Converter cli arguments")

    @staticmethod
    def feed_swapped_faces(patch_queue_in, samples):
        """ Feed swapped faces to the converter and trigger a run """
        logger.trace("feeding swapped faces to converter")
        for item in samples.predicted_images:
            patch_queue_in.put(item)
        logger.trace("fed %s swapped faces to converter",
                     len(samples.predicted_images))
        logger.trace("Putting EOF to converter")
        patch_queue_in.put("EOF")

    def patch_faces(self, queue_in, queue_out, sample_size):
        """ Patch faces """
        logger.trace("Patching faces")
        self.converter.process(queue_in, queue_out)
        swapped = list()
        idx = 0
        while idx < sample_size:
            logger.trace("Patching image %s of %s", idx + 1, sample_size)
            item = queue_out.get()
            swapped.append(item[1])
            logger.trace("Patched image %s of %s", idx + 1, sample_size)
            idx += 1
        logger.trace("Patched faces")
        return swapped
Пример #12
0
class Predict():
    """ Predict faces from incoming queue """
    def __init__(self, in_queue, queue_size, arguments):
        logger.debug(
            "Initializing %s: (args: %s, queue_size: %s, in_queue: %s)",
            self.__class__.__name__, arguments, queue_size, in_queue)
        self.batchsize = min(queue_size, 16)
        self.args = arguments
        self.in_queue = in_queue
        self.out_queue = queue_manager.get_queue("patch")
        self.serializer = Serializer.get_serializer("json")
        self.faces_count = 0
        self.verify_output = False
        self.model = self.load_model()
        self.predictor = self.model.converter(self.args.swap_model)
        self.queues = dict()

        self.thread = MultiThread(self.predict_faces, thread_count=1)
        self.thread.start()
        logger.debug("Initialized %s: (out_queue: %s)",
                     self.__class__.__name__, self.out_queue)

    @property
    def coverage_ratio(self):
        """ Return coverage ratio from training options """
        return self.model.training_opts["coverage_ratio"]

    @property
    def input_size(self):
        """ Return the model input size """
        return self.model.input_shape[0]

    @property
    def output_size(self):
        """ Return the model output size """
        return self.model.output_shape[0]

    @property
    def input_mask(self):
        """ Return the input mask """
        mask = np.zeros(self.model.state.mask_shapes[0], dtype="float32")
        retval = np.expand_dims(mask, 0)
        return retval

    @property
    def has_predicted_mask(self):
        """ Return whether this model has a predicted mask """
        return bool(self.model.state.mask_shapes)

    def load_model(self):
        """ Load the model requested for conversion """
        logger.debug("Loading Model")
        model_dir = get_folder(self.args.model_dir, make_folder=False)
        if not model_dir:
            logger.error("%s does not exist.", self.args.model_dir)
            exit(1)
        trainer = self.get_trainer(model_dir)
        model = PluginLoader.get_model(trainer)(model_dir,
                                                self.args.gpus,
                                                predict=True)
        logger.debug("Loaded Model")
        return model

    def get_trainer(self, model_dir):
        """ Return the trainer name if provided, or read from state file """
        if self.args.trainer:
            logger.debug("Trainer name provided: '%s'", self.args.trainer)
            return self.args.trainer

        statefile = [
            fname for fname in os.listdir(str(model_dir))
            if fname.endswith("_state.json")
        ]
        if len(statefile) != 1:
            logger.error(
                "There should be 1 state file in your model folder. %s were found. "
                "Specify a trainer with the '-t', '--trainer' option.")
            exit(1)
        statefile = os.path.join(str(model_dir), statefile[0])

        with open(statefile, "rb") as inp:
            state = self.serializer.unmarshal(inp.read().decode("utf-8"))
            trainer = state.get("name", None)

        if not trainer:
            logger.error(
                "Trainer name could not be read from state file. "
                "Specify a trainer with the '-t', '--trainer' option.")
            exit(1)
        logger.debug("Trainer from state file: '%s'", trainer)
        return trainer

    def predict_faces(self):
        """ Get detected faces from images """
        faces_seen = 0
        batch = list()
        while True:
            item = self.in_queue.get()
            if item != "EOF":
                logger.trace("Got from queue: '%s'", item["filename"])
                faces_count = len(item["detected_faces"])
                self.faces_count += faces_count
                if faces_count > 1:
                    self.verify_output = True
                    logger.verbose(
                        "Found more than one face in an image! '%s'",
                        os.path.basename(item["filename"]))

                self.load_aligned(item)

                faces_seen += faces_count
                batch.append(item)

            if faces_seen < self.batchsize and item != "EOF":
                logger.trace("Continuing. Current batchsize: %s", faces_seen)
                continue

            if batch:
                logger.trace("Batching to predictor. Frames: %s, Faces: %s",
                             len(batch), faces_seen)
                detected_batch = [
                    detected_face for item in batch
                    for detected_face in item["detected_faces"]
                ]
                if faces_seen != 0:
                    feed_faces = self.compile_feed_faces(detected_batch)
                    predicted = self.predict(feed_faces)
                else:
                    predicted = list()

                self.queue_out_frames(batch, predicted)

            faces_seen = 0
            batch = list()
            if item == "EOF":
                logger.debug("Load queue complete")
                break
        self.out_queue.put("EOF")

    def load_aligned(self, item):
        """ Load the feed faces and reference output faces """
        logger.trace("Loading aligned faces: '%s'", item["filename"])
        for detected_face in item["detected_faces"]:
            detected_face.load_feed_face(item["image"],
                                         size=self.input_size,
                                         coverage_ratio=self.coverage_ratio,
                                         dtype="float32")
            if self.input_size == self.output_size:
                detected_face.reference = detected_face.feed
            else:
                detected_face.load_reference_face(
                    item["image"],
                    size=self.output_size,
                    coverage_ratio=self.coverage_ratio,
                    dtype="float32")
        logger.trace("Loaded aligned faces: '%s'", item["filename"])

    @staticmethod
    def compile_feed_faces(detected_faces):
        """ Compile the faces for feeding into the predictor """
        logger.trace("Compiling feed face. Batchsize: %s", len(detected_faces))
        feed_faces = np.stack(
            [detected_face.feed_face for detected_face in detected_faces])
        logger.trace("Compiled Feed faces. Shape: %s", feed_faces.shape)
        return feed_faces

    def predict(self, feed_faces):
        """ Perform inference on the feed """
        logger.trace("Predicting: Batchsize: %s", len(feed_faces))
        feed = [feed_faces]
        if self.has_predicted_mask:
            feed.append(np.repeat(self.input_mask, feed_faces.shape[0],
                                  axis=0))
        logger.trace("Input shape(s): %s", [item.shape for item in feed])

        predicted = self.predictor(feed)
        predicted = predicted if isinstance(predicted, list) else [predicted]
        logger.trace("Output shape(s): %s",
                     [predict.shape for predict in predicted])

        # Compile masks into alpha channel or keep raw faces
        predicted = np.concatenate(
            predicted, axis=-1) if len(predicted) == 2 else predicted[0]
        predicted = predicted.astype("float32")

        logger.trace("Final shape: %s", predicted.shape)
        return predicted

    def queue_out_frames(self, batch, swapped_faces):
        """ Compile the batch back to original frames and put to out_queue """
        logger.trace("Queueing out batch. Batchsize: %s", len(batch))
        pointer = 0
        for item in batch:
            num_faces = len(item["detected_faces"])
            if num_faces == 0:
                item["swapped_faces"] = np.array(list())
            else:
                item["swapped_faces"] = swapped_faces[pointer:pointer +
                                                      num_faces]

            logger.trace(
                "Putting to queue. ('%s', detected_faces: %s, swapped_faces: %s)",
                item["filename"], len(item["detected_faces"]),
                item["swapped_faces"].shape[0])
            self.out_queue.put(item)
            pointer += num_faces
        logger.trace("Queued out batch. Batchsize: %s", len(batch))
Пример #13
0
class Predict():
    """ Predict faces from incoming queue """
    def __init__(self, in_queue, queue_size, arguments):
        logger.debug("Initializing %s: (args: %s, queue_size: %s, in_queue: %s)",
                     self.__class__.__name__, arguments, queue_size, in_queue)
        self.batchsize = min(queue_size, 16)
        self.args = arguments
        self.in_queue = in_queue
        self.out_queue = queue_manager.get_queue("patch")
        self.serializer = Serializer.get_serializer("json")
        self.faces_count = 0
        self.verify_output = False
        self.model = self.load_model()
        self.predictor = self.model.converter(self.args.swap_model)
        self.queues = dict()

        self.thread = MultiThread(self.predict_faces, thread_count=1)
        self.thread.start()
        logger.debug("Initialized %s: (out_queue: %s)", self.__class__.__name__, self.out_queue)

    @property
    def coverage_ratio(self):
        """ Return coverage ratio from training options """
        return self.model.training_opts["coverage_ratio"]

    @property
    def input_size(self):
        """ Return the model input size """
        return self.model.input_shape[0]

    @property
    def output_size(self):
        """ Return the model output size """
        return self.model.output_shape[0]

    @property
    def input_mask(self):
        """ Return the input mask """
        mask = np.zeros(self.model.state.mask_shapes[0], dtype="float32")
        retval = np.expand_dims(mask, 0)
        return retval

    @property
    def has_predicted_mask(self):
        """ Return whether this model has a predicted mask """
        return bool(self.model.state.mask_shapes)

    def load_model(self):
        """ Load the model requested for conversion """
        logger.debug("Loading Model")
        model_dir = get_folder(self.args.model_dir, make_folder=False)
        if not model_dir:
            logger.error("%s does not exist.", self.args.model_dir)
            exit(1)
        trainer = self.get_trainer(model_dir)
        model = PluginLoader.get_model(trainer)(model_dir, self.args.gpus, predict=True)
        logger.debug("Loaded Model")
        return model

    def get_trainer(self, model_dir):
        """ Return the trainer name if provided, or read from state file """
        if self.args.trainer:
            logger.debug("Trainer name provided: '%s'", self.args.trainer)
            return self.args.trainer

        statefile = [fname for fname in os.listdir(str(model_dir))
                     if fname.endswith("_state.json")]
        if len(statefile) != 1:
            logger.error("There should be 1 state file in your model folder. %s were found. "
                         "Specify a trainer with the '-t', '--trainer' option.")
            exit(1)
        statefile = os.path.join(str(model_dir), statefile[0])

        with open(statefile, "rb") as inp:
            state = self.serializer.unmarshal(inp.read().decode("utf-8"))
            trainer = state.get("name", None)

        if not trainer:
            logger.error("Trainer name could not be read from state file. "
                         "Specify a trainer with the '-t', '--trainer' option.")
            exit(1)
        logger.debug("Trainer from state file: '%s'", trainer)
        return trainer

    def predict_faces(self):
        """ Get detected faces from images """
        faces_seen = 0
        batch = list()
        while True:
            item = self.in_queue.get()
            if item != "EOF":
                logger.trace("Got from queue: '%s'", item["filename"])
                faces_count = len(item["detected_faces"])
                self.faces_count += faces_count
                if faces_count > 1:
                    self.verify_output = True
                    logger.verbose("Found more than one face in an image! '%s'",
                                   os.path.basename(item["filename"]))

                self.load_aligned(item)

                faces_seen += faces_count
                batch.append(item)

            if faces_seen < self.batchsize and item != "EOF":
                logger.trace("Continuing. Current batchsize: %s", faces_seen)
                continue

            if batch:
                logger.trace("Batching to predictor. Frames: %s, Faces: %s",
                             len(batch), faces_seen)
                detected_batch = [detected_face for item in batch
                                  for detected_face in item["detected_faces"]]
                if faces_seen != 0:
                    feed_faces = self.compile_feed_faces(detected_batch)
                    predicted = self.predict(feed_faces)
                else:
                    predicted = list()

                self.queue_out_frames(batch, predicted)

            faces_seen = 0
            batch = list()
            if item == "EOF":
                logger.debug("Load queue complete")
                break
        self.out_queue.put("EOF")

    def load_aligned(self, item):
        """ Load the feed faces and reference output faces """
        logger.trace("Loading aligned faces: '%s'", item["filename"])
        for detected_face in item["detected_faces"]:
            detected_face.load_feed_face(item["image"],
                                         size=self.input_size,
                                         coverage_ratio=self.coverage_ratio,
                                         dtype="float32")
            if self.input_size == self.output_size:
                detected_face.reference = detected_face.feed
            else:
                detected_face.load_reference_face(item["image"],
                                                  size=self.output_size,
                                                  coverage_ratio=self.coverage_ratio,
                                                  dtype="float32")
        logger.trace("Loaded aligned faces: '%s'", item["filename"])

    @staticmethod
    def compile_feed_faces(detected_faces):
        """ Compile the faces for feeding into the predictor """
        logger.trace("Compiling feed face. Batchsize: %s", len(detected_faces))
        feed_faces = np.stack([detected_face.feed_face for detected_face in detected_faces])
        logger.trace("Compiled Feed faces. Shape: %s", feed_faces.shape)
        return feed_faces

    def predict(self, feed_faces):
        """ Perform inference on the feed """
        logger.trace("Predicting: Batchsize: %s", len(feed_faces))
        feed = [feed_faces]
        if self.has_predicted_mask:
            feed.append(np.repeat(self.input_mask, feed_faces.shape[0], axis=0))
        logger.trace("Input shape(s): %s", [item.shape for item in feed])

        predicted = self.predictor(feed)
        predicted = predicted if isinstance(predicted, list) else [predicted]
        logger.trace("Output shape(s): %s", [predict.shape for predict in predicted])

        # Compile masks into alpha channel or keep raw faces
        predicted = np.concatenate(predicted, axis=-1) if len(predicted) == 2 else predicted[0]
        predicted = predicted.astype("float32")

        logger.trace("Final shape: %s", predicted.shape)
        return predicted

    def queue_out_frames(self, batch, swapped_faces):
        """ Compile the batch back to original frames and put to out_queue """
        logger.trace("Queueing out batch. Batchsize: %s", len(batch))
        pointer = 0
        for item in batch:
            num_faces = len(item["detected_faces"])
            if num_faces == 0:
                item["swapped_faces"] = np.array(list())
            else:
                item["swapped_faces"] = swapped_faces[pointer:pointer + num_faces]

            logger.trace("Putting to queue. ('%s', detected_faces: %s, swapped_faces: %s)",
                         item["filename"], len(item["detected_faces"]),
                         item["swapped_faces"].shape[0])
            self.out_queue.put(item)
            pointer += num_faces
        logger.trace("Queued out batch. Batchsize: %s", len(batch))