Пример #1
0
    def _end_thread(self, thread: MultiThread, err: bool) -> None:
        """ Output message and join thread back to main on termination.

        Parameters
        ----------
        thread: :class:`lib.multithreading.MultiThread`
            The background training thread
        err: bool
            Whether an error has been detected in :func:`_monitor`
        """
        logger.debug("Ending Training thread")
        if err:
            msg = "Error caught! Exiting..."
            log = logger.critical
        else:
            msg = ("Exit requested! The trainer will complete its current cycle, "
                   "save the models and quit (This can take a couple of minutes "
                   "depending on your training speed).")
            if not self._args.redirect_gui:
                msg += " If you want to kill it now, press Ctrl + c"
            log = logger.info
        log(msg)
        self._stop = True
        thread.join()
        sys.stdout.flush()
        logger.debug("Ended training thread")
Пример #2
0
    def process(self):
        """
            main function of this class, IO thread terminate when 
            the main function finished
        """
        IOthread = MultiThread(self.IOthread)
        IOthread.start()
        for rawfile, filterfile, bamfile in tqdm(self.files.load()):
            logger.info("read file {}".format(rawfile))
            vcf1, vcf2 = VCF(rawfile), VCF_filter(filterfile)
            Alignmentfile = pysam.AlignmentFile(bamfile)

            detail = Detail(Alignmentfile)
            Positive_sites, Negtive_sites = self.diff(vcf1.get_sites, vcf2.get_sites)
            assert len(Negtive_sites) != 0, filterfile
            assert len(Positive_sites) != 0, rawfile
            for site in Positive_sites:
                site['confidence'] = 1
                s = self.serialize(site, detail)
                self.IOqueue.put(s)

            for site in Negtive_sites:
                site['confidence'] = 0
                s = self.serialize(site, detail)
                self.IOqueue.put(s)
        else:
            self.IOqueue.put('EOF')
        IOthread.join()
Пример #3
0
 def detect_faces(self, *args, **kwargs):
     """ Detect faces in Multiple Threads """
     super().detect_faces(*args, **kwargs)
     workers = MultiThread(target=self.detect_thread, thread_count=self.batch_size)
     workers.start()
     workers.join()
     sentinel = self.queues["in"].get()
     self.queues["out"].put(sentinel)
     logger.debug("Detecting Faces complete")
Пример #4
0
 def detect_faces(self, *args, **kwargs):
     """ Detect faces in Multiple Threads """
     super().detect_faces(*args, **kwargs)
     workers = MultiThread(target=self.detect_thread, thread_count=self.batch_size)
     workers.start()
     workers.join()
     sentinel = self.queues["in"].get()
     self.queues["out"].put(sentinel)
     logger.debug("Detecting Faces complete")
Пример #5
0
class Convert():
    """ The convert process. """
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s)", self.__class__.__name__,
                     arguments)
        self.args = arguments
        Utils.set_verbosity(self.args.loglevel)

        self.patch_threads = None
        self.images = Images(self.args)
        self.validate()
        self.alignments = Alignments(self.args, False, self.images.is_video)
        self.opts = OptionalActions(self.args, self.images.input_images,
                                    self.alignments)

        self.add_queues()
        self.disk_io = DiskIO(self.alignments, self.images, arguments)
        self.predictor = Predict(self.disk_io.load_queue, self.queue_size,
                                 arguments)

        configfile = self.args.configfile if hasattr(self.args,
                                                     "configfile") else None
        self.converter = Converter(get_folder(self.args.output_dir),
                                   self.predictor.output_size,
                                   self.predictor.has_predicted_mask,
                                   self.disk_io.draw_transparent,
                                   self.disk_io.pre_encode,
                                   arguments,
                                   configfile=configfile)

        logger.debug("Initialized %s", self.__class__.__name__)

    @property
    def queue_size(self):
        """ Set 16 for singleprocess otherwise 32 """
        if self.args.singleprocess:
            retval = 16
        else:
            retval = 32
        logger.debug(retval)
        return retval

    @property
    def pool_processes(self):
        """ return the maximum number of pooled processes to use """
        if self.args.singleprocess:
            retval = 1
        elif self.args.jobs > 0:
            retval = min(self.args.jobs, total_cpus(),
                         self.images.images_found)
        else:
            retval = min(total_cpus(), self.images.images_found)
        retval = 1 if retval == 0 else retval
        logger.debug(retval)
        return retval

    def validate(self):
        """ Make the output folder if it doesn't exist and check that video flag is
            a valid choice """
        if (self.args.writer == "ffmpeg" and not self.images.is_video
                and self.args.reference_video is None):
            raise FaceswapError(
                "Output as video selected, but using frames as input. You must "
                "provide a reference video ('-ref', '--reference-video').")
        output_dir = get_folder(self.args.output_dir)
        logger.info("Output Directory: %s", output_dir)

    def add_queues(self):
        """ Add the queues for convert """
        logger.debug("Adding queues. Queue size: %s", self.queue_size)
        for qname in ("convert_in", "convert_out", "patch"):
            queue_manager.add_queue(qname, self.queue_size)

    def process(self):
        """ Process the conversion """
        logger.debug("Starting Conversion")
        # queue_manager.debug_monitor(5)
        try:
            self.convert_images()
            self.disk_io.save_thread.join()
            queue_manager.terminate_queues()

            Utils.finalize(self.images.images_found,
                           self.predictor.faces_count,
                           self.predictor.verify_output)
            logger.debug("Completed Conversion")
        except MemoryError as err:
            msg = (
                "Faceswap ran out of RAM running convert. Conversion is very system RAM "
                "heavy, so this can happen in certain circumstances when you have a lot of "
                "cpus but not enough RAM to support them all."
                "\nYou should lower the number of processes in use by either setting the "
                "'singleprocess' flag (-sp) or lowering the number of parallel jobs (-j)."
            )
            raise FaceswapError(msg) from err

    def convert_images(self):
        """ Convert the images """
        logger.debug("Converting images")
        save_queue = queue_manager.get_queue("convert_out")
        patch_queue = queue_manager.get_queue("patch")
        self.patch_threads = MultiThread(self.converter.process,
                                         patch_queue,
                                         save_queue,
                                         thread_count=self.pool_processes,
                                         name="patch")

        self.patch_threads.start()
        while True:
            self.check_thread_error()
            if self.disk_io.completion_event.is_set():
                logger.debug("DiskIO completion event set. Joining Pool")
                break
            if self.patch_threads.completed():
                logger.debug("All patch threads completed")
                break
            sleep(1)
        self.patch_threads.join()

        logger.debug("Putting EOF")
        save_queue.put("EOF")
        logger.debug("Converted images")

    def check_thread_error(self):
        """ Check and raise thread errors """
        for thread in (self.predictor.thread, self.disk_io.load_thread,
                       self.disk_io.save_thread, self.patch_threads):
            thread.check_and_raise_error()
Пример #6
0
class ImageIO():
    """ Perform disk IO for images or videos in a background thread.

    This is the parent thread for :class:`ImagesLoader` and :class:`ImagesSaver` and should not
    be called directly.

    Parameters
    ----------
    path: str or list
        The path to load or save images to/from. For loading this can be a folder which contains
        images, video file or a list of image files. For saving this must be an existing folder.
    queue_size: int
        The amount of images to hold in the internal buffer.
    args: tuple, optional
        The arguments to be passed to the loader or saver thread. Default: ``None``

    See Also
    --------
    lib.image.ImagesLoader : Background Image Loader inheriting from this class.
    lib.image.ImagesSaver : Background Image Saver inheriting from this class.
    """

    def __init__(self, path, queue_size, args=None):
        logger.debug("Initializing %s: (path: %s, queue_size: %s, args: %s)",
                     self.__class__.__name__, path, queue_size, args)

        self._args = tuple() if args is None else args

        self._location = path
        self._check_location_exists()

        self._queue = queue_manager.get_queue(name=self.__class__.__name__, maxsize=queue_size)
        self._thread = None

    @property
    def location(self):
        """ str: The folder or video that was passed in as the :attr:`path` parameter. """
        return self._location

    def _check_location_exists(self):
        """ Check whether the input location exists.

        Raises
        ------
        FaceswapError
            If the given location does not exist
        """
        if isinstance(self.location, str) and not os.path.exists(self.location):
            raise FaceswapError("The location '{}' does not exist".format(self.location))
        if isinstance(self.location, (list, tuple)) and not all(os.path.exists(location)
                                                                for location in self.location):
            raise FaceswapError("Not all locations in the input list exist")

    def _set_thread(self):
        """ Set the load/save thread """
        logger.debug("Setting thread")
        if self._thread is not None and self._thread.is_alive():
            logger.debug("Thread pre-exists and is alive: %s", self._thread)
            return
        self._thread = MultiThread(self._process,
                                   self._queue,
                                   name=self.__class__.__name__,
                                   thread_count=1)
        logger.debug("Set thread: %s", self._thread)
        self._thread.start()

    def _process(self, queue):
        """ Image IO process to be run in a thread. Override for loader/saver process.

        Parameters
        ----------
        queue: queue.Queue()
            The ImageIO Queue
        """
        raise NotImplementedError

    def close(self):
        """ Closes down and joins the internal threads """
        logger.debug("Received Close")
        if self._thread is not None:
            self._thread.join()
        logger.debug("Closed")
Пример #7
0
class Convert():  # pylint:disable=too-few-public-methods
    """ The Faceswap Face Conversion Process.

    The conversion process is responsible for swapping the faces on source frames with the output
    from a trained model.

    It leverages a series of user selected post-processing plugins, executed from
    :class:`lib.convert.Converter`.

    The convert process is self contained and should not be referenced by any other scripts, so it
    contains no public properties.

    Parameters
    ----------
    arguments: :class:`argparse.Namespace`
        The arguments to be passed to the convert process as generated from Faceswap's command
        line arguments
    """
    def __init__(self, arguments):
        logger.debug("Initializing %s: (args: %s)", self.__class__.__name__,
                     arguments)
        self._args = arguments

        self._patch_threads = None
        self._images = ImagesLoader(self._args.input_dir, fast_count=True)
        self._alignments = Alignments(self._args, False, self._images.is_video)
        if self._alignments.version == 1.0:
            logger.error(
                "The alignments file format has been updated since the given alignments "
                "file was generated. You need to update the file to proceed.")
            logger.error(
                "To do this run the 'Alignments Tool' > 'Extract' Job.")
            sys.exit(1)

        self._opts = OptionalActions(self._args, self._images.file_list,
                                     self._alignments)

        self._add_queues()
        self._disk_io = DiskIO(self._alignments, self._images, arguments)
        self._predictor = Predict(self._disk_io.load_queue, self._queue_size,
                                  arguments)
        self._validate()
        get_folder(self._args.output_dir)

        configfile = self._args.configfile if hasattr(self._args,
                                                      "configfile") else None
        self._converter = Converter(self._predictor.output_size,
                                    self._predictor.coverage_ratio,
                                    self._predictor.centering,
                                    self._disk_io.draw_transparent,
                                    self._disk_io.pre_encode,
                                    arguments,
                                    configfile=configfile)

        logger.debug("Initialized %s", self.__class__.__name__)

    @property
    def _queue_size(self):
        """ int: Size of the converter queues. 16 for single process otherwise 32 """
        if self._args.singleprocess:
            retval = 16
        else:
            retval = 32
        logger.debug(retval)
        return retval

    @property
    def _pool_processes(self):
        """ int: The number of threads to run in parallel. Based on user options and number of
        available processors. """
        if self._args.singleprocess:
            retval = 1
        elif self._args.jobs > 0:
            retval = min(self._args.jobs, total_cpus(), self._images.count)
        else:
            retval = min(total_cpus(), self._images.count)
        retval = 1 if retval == 0 else retval
        logger.debug(retval)
        return retval

    def _validate(self):
        """ Validate the Command Line Options.

        Ensure that certain cli selections are valid and won't result in an error. Checks:
            * If frames have been passed in with video output, ensure user supplies reference
            video.
            * If "on-the-fly" and an NN mask is selected, output warning and switch to 'extended'
            * If a mask-type is selected, ensure it exists in the alignments file.
            * If a predicted mask-type is selected, ensure model has been trained with a mask
            otherwise attempt to select first available masks, otherwise raise error.

        Raises
        ------
        FaceswapError
            If an invalid selection has been found.

        """
        if (self._args.writer == "ffmpeg" and not self._images.is_video
                and self._args.reference_video is None):
            raise FaceswapError(
                "Output as video selected, but using frames as input. You must "
                "provide a reference video ('-ref', '--reference-video').")

        if (self._args.on_the_fly and self._args.mask_type
                not in ("none", "extended", "components")):
            logger.warning(
                "You have selected an incompatible mask type ('%s') for On-The-Fly "
                "conversion. Switching to 'extended'", self._args.mask_type)
            self._args.mask_type = "extended"

        if (not self._args.on_the_fly
                and self._args.mask_type not in ("none", "predicted")
                and not self._alignments.mask_is_valid(self._args.mask_type)):
            msg = (
                "You have selected the Mask Type `{}` but at least one face does not have this "
                "mask stored in the Alignments File.\nYou should generate the required masks "
                "with the Mask Tool or set the Mask Type option to an existing Mask Type.\nA "
                "summary of existing masks is as follows:\nTotal faces: {}, Masks: "
                "{}".format(self._args.mask_type, self._alignments.faces_count,
                            self._alignments.mask_summary))
            raise FaceswapError(msg)

        if self._args.mask_type == "predicted" and not self._predictor.has_predicted_mask:
            available_masks = [
                k for k, v in self._alignments.mask_summary.items()
                if k != "none" and v == self._alignments.faces_count
            ]
            if not available_masks:
                msg = (
                    "Predicted Mask selected, but the model was not trained with a mask and no "
                    "masks are stored in the Alignments File.\nYou should generate the "
                    "required masks with the Mask Tool or set the Mask Type to `none`."
                )
                raise FaceswapError(msg)
            mask_type = available_masks[0]
            logger.warning(
                "Predicted Mask selected, but the model was not trained with a "
                "mask. Selecting first available mask: '%s'", mask_type)
            self._args.mask_type = mask_type

    def _add_queues(self):
        """ Add the queues for in, patch and out. """
        logger.debug("Adding queues. Queue size: %s", self._queue_size)
        for qname in ("convert_in", "convert_out", "patch"):
            queue_manager.add_queue(qname, self._queue_size)

    def process(self):
        """ The entry point for triggering the Conversion Process.

        Should only be called from  :class:`lib.cli.launcher.ScriptExecutor`
        """
        logger.debug("Starting Conversion")
        # queue_manager.debug_monitor(5)
        try:
            self._convert_images()
            self._disk_io.save_thread.join()
            queue_manager.terminate_queues()

            finalize(self._images.count, self._predictor.faces_count,
                     self._predictor.verify_output)
            logger.debug("Completed Conversion")
        except MemoryError as err:
            msg = (
                "Faceswap ran out of RAM running convert. Conversion is very system RAM "
                "heavy, so this can happen in certain circumstances when you have a lot of "
                "cpus but not enough RAM to support them all."
                "\nYou should lower the number of processes in use by either setting the "
                "'singleprocess' flag (-sp) or lowering the number of parallel jobs (-j)."
            )
            raise FaceswapError(msg) from err

    def _convert_images(self):
        """ Start the multi-threaded patching process, monitor all threads for errors and join on
        completion. """
        logger.debug("Converting images")
        save_queue = queue_manager.get_queue("convert_out")
        patch_queue = queue_manager.get_queue("patch")
        self._patch_threads = MultiThread(self._converter.process,
                                          patch_queue,
                                          save_queue,
                                          thread_count=self._pool_processes,
                                          name="patch")

        self._patch_threads.start()
        while True:
            self._check_thread_error()
            if self._disk_io.completion_event.is_set():
                logger.debug("DiskIO completion event set. Joining Pool")
                break
            if self._patch_threads.completed():
                logger.debug("All patch threads completed")
                break
            sleep(1)
        self._patch_threads.join()

        logger.debug("Putting EOF")
        save_queue.put("EOF")
        logger.debug("Converted images")

    def _check_thread_error(self):
        """ Monitor all running threads for errors, and raise accordingly. """
        for thread in (self._predictor.thread, self._disk_io.load_thread,
                       self._disk_io.save_thread, self._patch_threads):
            thread.check_and_raise_error()