Пример #1
0
    def init_extractor(self, loglevel):
        """ Initialize FAN """
        logger.debug("Initialize Extractor")
        out_queue = queue_manager.get_queue("out")

        d_kwargs = {"in_queue": queue_manager.get_queue("in"),
                    "out_queue": queue_manager.get_queue("align")}
        a_kwargs = {"in_queue": queue_manager.get_queue("align"),
                    "out_queue": out_queue}

        detector = PluginLoader.get_detector("manual")(loglevel=loglevel)
        detect_process = SpawnProcess(detector.run, **d_kwargs)
        d_event = detect_process.event
        detect_process.start()

        for plugin in ("fan", "cv2_dnn"):
            aligner = PluginLoader.get_aligner(plugin)(loglevel=loglevel)
            align_process = SpawnProcess(aligner.run, **a_kwargs)
            a_event = align_process.event
            align_process.start()

            # Wait for Aligner to initialize
            # The first ever load of the model for FAN has reportedly taken
            # up to 3-4 minutes, hence high timeout.
            a_event.wait(300)
            if not a_event.is_set():
                if plugin == "fan":
                    align_process.join()
                    logger.error("Error initializing FAN. Trying CV2-DNN")
                    continue
                else:
                    raise ValueError("Error inititalizing Aligner")
            if plugin == "cv2_dnn":
                break

            try:
                err = None
                err = out_queue.get(True, 1)
            except QueueEmpty:
                pass
            if not err:
                break
            align_process.join()
            logger.error("Error initializing FAN. Trying CV2-DNN")

        d_event.wait(10)
        if not d_event.is_set():
            raise ValueError("Error inititalizing Detector")

        self.extractor["detect"] = detector
        self.extractor["align"] = aligner
        logger.debug("Initialized Extractor")
Пример #2
0
    def launch_aligner(self):
        """ Load the aligner plugin to retrieve landmarks """
        out_queue = queue_manager.get_queue("out")
        kwargs = {"in_queue": queue_manager.get_queue("in"),
                  "out_queue": out_queue}

        for plugin in ("fan", "cv2_dnn"):
            aligner = PluginLoader.get_aligner(plugin)(loglevel=self.args.loglevel)
            process = SpawnProcess(aligner.run, **kwargs)
            event = process.event
            process.start()
            # Wait for Aligner to take init
            # The first ever load of the model for FAN has reportedly taken
            # up to 3-4 minutes, hence high timeout.
            event.wait(300)

            if not event.is_set():
                if plugin == "fan":
                    process.join()
                    logger.error("Error initializing FAN. Trying CV2-DNN")
                    continue
                else:
                    raise ValueError("Error inititalizing Aligner")
            if plugin == "cv2_dnn":
                return

            try:
                err = None
                err = out_queue.get(True, 1)
            except QueueEmpty:
                pass
            if not err:
                break
            process.join()
            logger.error("Error initializing FAN. Trying CV2-DNN")
Пример #3
0
 def load_detector(detector, loglevel, rotation, min_size):
     """ Set global arguments and load detector plugin """
     detector_name = detector.replace("-", "_").lower()
     logger.debug("Loading Detector: '%s'", detector_name)
     detector = PluginLoader.get_detector(detector_name)(loglevel=loglevel,
                                                         rotation=rotation,
                                                         min_size=min_size)
     return detector
Пример #4
0
 def load_trainer(self, model):
     """ Load the trainer requested for training """
     logger.debug("Loading Trainer")
     trainer = PluginLoader.get_trainer(model.trainer)
     trainer = trainer(model,
                       self.images,
                       self.args.batch_size)
     logger.debug("Loaded Trainer")
     return trainer
Пример #5
0
 def load_model(self):
     """ Load the model requested for conversion """
     logger.debug("Loading Model")
     model_dir = get_folder(self.args.model_dir, make_folder=False)
     if not model_dir:
         logger.error("%s does not exist.", self.args.model_dir)
         exit(1)
     trainer = self.get_trainer(model_dir)
     model = PluginLoader.get_model(trainer)(model_dir, self.args.gpus, predict=True)
     logger.debug("Loaded Model")
     return model
Пример #6
0
 def get_writer(self):
     """ Return the writer plugin """
     args = [self.args.output_dir]
     if self.args.writer in ("ffmpeg", "gif"):
         args.append(self.total_count)
     if self.args.writer == "ffmpeg":
         if self.images.is_video:
             args.append(self.args.input_dir)
         else:
             args.append(self.args.reference_video)
     logger.debug("Writer args: %s", args)
     return PluginLoader.get_converter("writer", self.args.writer)(*args)
Пример #7
0
    def _load_model(self):
        """ Load the Faceswap model.

        Returns
        -------
        :mod:`plugins.train.model` plugin
            The trained model in the specified model folder
        """
        logger.debug("Loading Model")
        model_dir = get_folder(self._args.model_dir, make_folder=False)
        if not model_dir:
            raise FaceswapError("{} does not exist.".format(self._args.model_dir))
        trainer = self._get_model_name(model_dir)
        gpus = 1 if not hasattr(self._args, "gpus") else self._args.gpus
        model = PluginLoader.get_model(trainer)(model_dir, gpus, predict=True)
        logger.debug("Loaded Model")
        return model
Пример #8
0
 def add_comboboxes(self, parent, defaults):
     """ Add the comboboxes to the Action Frame """
     for opt in self.options:
         if opt == "mask_type":
             choices = get_available_masks() + ["predicted"]
         else:
             choices = PluginLoader.get_available_convert_plugins(opt, True)
         choices = [self.format_to_display(choice) for choice in choices]
         ctl = ControlBuilder(parent,
                              opt,
                              str,
                              defaults[opt],
                              choices=choices,
                              is_radio=False,
                              label_width=10,
                              control_width=12)
         self.tk_vars[opt] = ctl.tk_var
Пример #9
0
    def _load_model(self):
        """ Load the model requested for training.

        Returns
        -------
        :file:`plugins.train.model` plugin
            The requested model plugin
        """
        logger.debug("Loading Model")
        model_dir = str(get_folder(self._args.model_dir))
        model = PluginLoader.get_model(self._args.trainer)(
            model_dir,
            self._args,
            predict=False)
        model.build()
        logger.debug("Loaded Model")
        return model
Пример #10
0
    def load_detector(self):
        """ Set global arguments and load detector plugin """
        if not self.converter_args:
            detector_name = self.args.detector.replace("-", "_").lower()
        else:
            detector_name = self.converter_args["detector"]
        logger.debug("Loading Detector: '%s'", detector_name)
        # Rotation
        rotation = None
        if hasattr(self.args, "rotate_images"):
            rotation = self.args.rotate_images

        detector = PluginLoader.get_detector(detector_name)(
            loglevel=self.loglevel,
            rotation=rotation)

        return detector
Пример #11
0
    def load_aligner(self):
        """ Set global arguments and load aligner plugin """
        aligner_name = self.args.aligner.replace("-", "_").lower()

        # Align Eyes
        align_eyes = False
        if hasattr(self.args, 'align_eyes'):
            align_eyes = self.args.align_eyes

        # Extracted Face Size
        size = 256
        if hasattr(self.args, 'size'):
            size = self.args.size

        aligner = PluginLoader.get_aligner(aligner_name)(
            verbose=self.args.verbose, align_eyes=align_eyes, size=size)

        return aligner
Пример #12
0
 def load_model(self):
     """ Load the model requested for training """
     logger.debug("Loading Model")
     model_dir = get_folder(self.args.model_dir)
     model = PluginLoader.get_model(self.trainer_name)(
         model_dir,
         self.args.gpus,
         no_logs=self.args.no_logs,
         warp_to_landmarks=self.args.warp_to_landmarks,
         no_flip=self.args.no_flip,
         training_image_size=self.image_size,
         alignments_paths=self.alignments_paths,
         preview_scale=self.args.preview_scale,
         pingpong=self.args.pingpong,
         memory_saving_gradients=self.args.memory_saving_gradients,
         predict=False)
     logger.debug("Loaded Model")
     return model
Пример #13
0
 def load_model(self):
     """ Load the model requested for training """
     logger.debug("Loading Model")
     model_dir = get_folder(self.args.model_dir)
     model = PluginLoader.get_model(self.trainer_name)(
         model_dir,
         self.args.gpus,
         no_logs=self.args.no_logs,
         warp_to_landmarks=self.args.warp_to_landmarks,
         no_flip=self.args.no_flip,
         training_image_size=self.image_size,
         alignments_paths=self.alignments_paths,
         preview_scale=self.args.preview_scale,
         pingpong=self.args.pingpong,
         memory_saving_gradients=self.args.memory_saving_gradients,
         predict=False)
     logger.debug("Loaded Model")
     return model
Пример #14
0
    def load_converter(self, model):
        """ Load the requested converter for conversion """
        args = self.args
        conv = args.converter

        converter = PluginLoader.get_converter(conv)(
            model.converter(False),
            trainer=args.trainer,
            blur_size=args.blur_size,
            seamless_clone=args.seamless_clone,
            sharpen_image=args.sharpen_image,
            mask_type=args.mask_type,
            erosion_kernel_size=args.erosion_kernel_size,
            match_histogram=args.match_histogram,
            smooth_mask=args.smooth_mask,
            avg_color_adjust=args.avg_color_adjust,
            draw_transparent=args.draw_transparent)

        return converter
Пример #15
0
    def _load_trainer(self, model):
        """ Load the trainer requested for training.

        Parameters
        ----------
        model: :file:`plugins.train.model` plugin
            The requested model plugin

        Returns
        -------
        :file:`plugins.train.trainer` plugin
            The requested model trainer plugin
        """
        logger.debug("Loading Trainer")
        trainer = PluginLoader.get_trainer(model.trainer)
        trainer = trainer(model, self._images, self._args.batch_size,
                          self._args.configfile)
        logger.debug("Loaded Trainer")
        return trainer
Пример #16
0
    def _get_writer(self):
        """ Load the selected writer plugin.

        Returns
        -------
        :mod:`plugins.convert.writer` plugin
            The requested writer plugin
        """
        args = [self._args.output_dir]
        if self._args.writer in ("ffmpeg", "gif"):
            args.extend([self._total_count, self._frame_ranges])
        if self._args.writer == "ffmpeg":
            if self._images.is_video:
                args.append(self._args.input_dir)
            else:
                args.append(self._args.reference_video)
        logger.debug("Writer args: %s", args)
        configfile = self._args.configfile if hasattr(self._args, "configfile") else None
        return PluginLoader.get_converter("writer", self._args.writer)(*args,
                                                                       configfile=configfile)
Пример #17
0
    def launch_aligner(self):
        """ Load the aligner plugin to retrieve landmarks """
        out_queue = queue_manager.get_queue("out")
        kwargs = {
            "in_queue": queue_manager.get_queue("in"),
            "out_queue": out_queue
        }

        for plugin in ("fan", "dlib"):
            aligner = PluginLoader.get_aligner(plugin)(
                loglevel=self.args.loglevel)
            process = SpawnProcess(aligner.run, **kwargs)
            event = process.event
            process.start()
            # Wait for Aligner to take init
            # The first ever load of the model for FAN has reportedly taken
            # up to 3-4 minutes, hence high timeout.
            event.wait(300)

            if not event.is_set():
                if plugin == "fan":
                    process.join()
                    logger.error("Error initializing FAN. Trying Dlib")
                    continue
                else:
                    raise ValueError("Error inititalizing Aligner")
            if plugin == "dlib":
                return

            try:
                err = None
                err = out_queue.get(True, 1)
            except QueueEmpty:
                pass
            if not err:
                break
            process.join()
            logger.error("Error initializing FAN. Trying Dlib")
Пример #18
0
 def load_model(self):
     """ Load the model requested for training """
     logger.debug("Loading Model")
     model_dir = get_folder(self.args.model_dir)
     configfile = self.args.configfile if hasattr(self.args, "configfile") else None
     augment_color = not self.args.no_augment_color
     model = PluginLoader.get_model(self.trainer_name)(
         model_dir,
         self.args.gpus,
         configfile=configfile,
         snapshot_interval=self.args.snapshot_interval,
         no_logs=self.args.no_logs,
         warp_to_landmarks=self.args.warp_to_landmarks,
         augment_color=augment_color,
         no_flip=self.args.no_flip,
         training_image_size=self.image_size,
         alignments_paths=self.alignments_paths,
         preview_scale=self.args.preview_scale,
         pingpong=self.args.pingpong,
         memory_saving_gradients=self.args.memory_saving_gradients,
         predict=False)
     logger.debug("Loaded Model")
     return model
Пример #19
0
    def _load_model(self):
        """ Load the model requested for training.

        Returns
        -------
        :file:`plugins.train.model` plugin
            The requested model plugin
        """
        logger.debug("Loading Model")
        model_dir = get_folder(self._args.model_dir)
        configfile = self._args.configfile if hasattr(self._args,
                                                      "configfile") else None
        augment_color = not self._args.no_augment_color
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver()
        tf.config.experimental_connect_to_host(resolver.master())
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.experimental.TPUStrategy(resolver)
        with strategy.scope():
            model = PluginLoader.get_model(self.trainer_name)(
                model_dir,
                gpus=self._args.gpus,
                configfile=configfile,
                snapshot_interval=self._args.snapshot_interval,
                no_logs=self._args.no_logs,
                warp_to_landmarks=self._args.warp_to_landmarks,
                augment_color=augment_color,
                no_flip=self._args.no_flip,
                training_image_size=self._image_size,
                alignments_paths=self._alignments_paths,
                preview_scale=self._args.preview_scale,
                pingpong=self._args.pingpong,
                memory_saving_gradients=self._args.memory_saving_gradients,
                optimizer_savings=self._args.optimizer_savings,
                predict=False)
        logger.debug("Loaded Model")
        return model
Пример #20
0
    def get_argument_list(self):
        argument_list = list()
        argument_list.append({
            "opts": ("-a", "--alignments"),
            "action": FileFullPaths,
            "type": str,
            "group": "data",
            "required": True,
            "filetypes": "alignments",
            "help": "Full path to the alignments file to add the mask to. NB: if the mask already "
                    "exists in the alignments file it will be overwritten."})
        argument_list.append({
            "opts": ("-i", "--input"),
            "action": DirOrFileFullPaths,
            "type": str,
            "group": "data",
            "required": True,
            "help": "Directory containing extracted faces, source frames, or a video file."})
        argument_list.append({
            "opts": ("-it", "--input-type"),
            "action": Radio,
            "type": str.lower,
            "choices": ("faces", "frames"),
            "dest": "input_type",
            "group": "data",
            "default": "frames",
            "help": "R|Whether the `input` is a folder of faces or a folder frames/video"
                    "\nL|faces: The input is a folder containing extracted faces."
                    "\nL|frames: The input is a folder containing frames or is a video"})
        argument_list.append({
            "opts": ("-M", "--masker"),
            "action": Radio,
            "type": str.lower,
            "choices": PluginLoader.get_available_extractors("mask"),
            "default": "extended",
            "group": "process",
            "help": "R|Masker to use."
                    "\nL|components: Mask designed to provide facial segmentation based on the "
                    "positioning of landmark locations. A convex hull is constructed around the "
                    "exterior of the landmarks to create a mask."
                    "\nL|extended: Mask designed to provide facial segmentation based on the "
                    "positioning of landmark locations. A convex hull is constructed around the "
                    "exterior of the landmarks and the mask is extended upwards onto the forehead."
                    "\nL|vgg-clear: Mask designed to provide smart segmentation of mostly frontal "
                    "faces clear of obstructions. Profile faces and obstructions may result in "
                    "sub-par performance."
                    "\nL|vgg-obstructed: Mask designed to provide smart segmentation of mostly "
                    "frontal faces. The mask model has been specifically trained to recognize "
                    "some facial obstructions (hands and eyeglasses). Profile faces may result in "
                    "sub-par performance."
                    "\nL|unet-dfl: Mask designed to provide smart segmentation of mostly frontal "
                    "faces. The mask model has been trained by community members and will need "
                    "testing for further description. Profile faces may result in sub-par "
                    "performance."})
        argument_list.append({
            "opts": ("-p", "--processing"),
            "action": Radio,
            "type": str.lower,
            "choices": ("all", "missing", "output"),
            "default": "missing",
            "group": "process",
            "help": "R|Whether to update all masks in the alignments files, only those faces "
                    "that do not already have a mask of the given `mask type` or just to output "
                    "the masks to the `output` location."
                    "\nL|all: Update the mask for all faces in the alignments file."
                    "\nL|missing: Create a mask for all faces in the alignments file where a mask "
                    "does not previously exist."
                    "\nL|output: Don't update the masks, just output them for review in the given "
                    "output folder."})
        argument_list.append({
            "opts": ("-o", "--output-folder"),
            "action": DirFullPaths,
            "dest": "output",
            "type": str,
            "group": "output",
            "help": "Optional output location. If provided, a preview of the masks created will "
                    "be output in the given folder."})
        argument_list.append({
            "opts": ("-b", "--blur_kernel"),
            "action": Slider,
            "type": int,
            "group": "output",
            "min_max": (0, 9),
            "default": 3,
            "rounding": 1,
            "help": "Apply gaussian blur to the mask output. Has the effect of smoothing the "
                    "edges of the mask giving less of a hard edge. the size is in pixels. This "
                    "value should be odd, if an even number is passed in then it will be rounded "
                    "to the next odd number. NB: Only effects the output preview. Set to 0 for "
                    "off"})
        argument_list.append({
            "opts": ("-t", "--threshold"),
            "action": Slider,
            "type": int,
            "group": "output",
            "min_max": (0, 50),
            "default": 4,
            "rounding": 1,
            "help": "Helps reduce 'blotchiness' on some masks by making light shades white "
                    "and dark shades black. Higher values will impact more of the mask. NB: "
                    "Only effects the output preview. Set to 0 for off"})

        return argument_list
Пример #21
0
    def set_globals(self):
        """
        Set the global options for training

        Loss Documentation
        MAE https://heartbeat.fritz.ai/5-regression-loss-functions-all-machine
            -learners-should-know-4fb140e9d4b0
        MSE https://heartbeat.fritz.ai/5-regression-loss-functions-all-machine
            -learners-should-know-4fb140e9d4b0
        LogCosh https://heartbeat.fritz.ai/5-regression-loss-functions-all-machine
                -learners-should-know-4fb140e9d4b0
        Smooth L1 https://arxiv.org/pdf/1701.03077.pdf
        L_inf_norm https://medium.com/@montjoile/l0-norm-l1-norm-l2-norm-l-infinity
                   -norm-7a7d18a4f40c
        SSIM http://www.cns.nyu.edu/pub/eero/wang03-reprint.pdf
        GMSD https://arxiv.org/ftp/arxiv/papers/1308/1308.3052.pdf
        """
        logger.debug("Setting global config")
        section = "global"
        self.add_section(title=section,
                         info="Options that apply to all models" +
                         ADDITIONAL_INFO)
        self.add_item(
            section=section,
            title="coverage",
            datatype=float,
            default=68.75,
            min_max=(62.5, 100.0),
            rounding=2,
            fixed=True,
            group="face",
            info=
            "How much of the extracted image to train on. A lower coverage will limit the "
            "model's scope to a zoomed-in central area while higher amounts can include the "
            "entire face. A trade-off exists between lower amounts given more detail "
            "versus higher amounts avoiding noticeable swap transitions. Sensible values to "
            "use are:"
            "\n\t62.5%% spans from eyebrow to eyebrow."
            "\n\t75.0%% spans from temple to temple."
            "\n\t87.5%% spans from ear to ear."
            "\n\t100.0%% is a mugshot.")
        self.add_item(
            section=section,
            title="mask_type",
            datatype=str,
            default="extended",
            choices=PluginLoader.get_available_extractors("mask",
                                                          add_none=True),
            group="mask",
            gui_radio=True,
            info=
            "The mask to be used for training. If you have selected 'Learn Mask' or "
            "'Penalized Mask Loss' you must select a value other than 'none'. The required "
            "mask should have been selected as part of the Extract process. If it does not "
            "exist in the alignments file then it will be generated prior to training "
            "commencing."
            "\n\tnone: Don't use a mask."
            "\n\tcomponents: Mask designed to provide facial segmentation based on the "
            "positioning of landmark locations. A convex hull is constructed around the "
            "exterior of the landmarks to create a mask."
            "\n\textended: Mask designed to provide facial segmentation based on the "
            "positioning of landmark locations. A convex hull is constructed around the "
            "exterior of the landmarks and the mask is extended upwards onto the forehead."
            "\n\tvgg-clear: Mask designed to provide smart segmentation of mostly frontal "
            "faces clear of obstructions. Profile faces and obstructions may result in "
            "sub-par performance."
            "\n\tvgg-obstructed: Mask designed to provide smart segmentation of mostly "
            "frontal faces. The mask model has been specifically trained to recognize "
            "some facial obstructions (hands and eyeglasses). Profile faces may result in "
            "sub-par performance."
            "\n\tunet-dfl: Mask designed to provide smart segmentation of mostly frontal "
            "faces. The mask model has been trained by community members and will need "
            "testing for further description. Profile faces may result in sub-par "
            "performance.")
        self.add_item(
            section=section,
            title="mask_blur_kernel",
            datatype=int,
            min_max=(0, 9),
            rounding=1,
            default=3,
            group="mask",
            info=
            "Apply gaussian blur to the mask input. This has the effect of smoothing the "
            "edges of the mask, which can help with poorly calculated masks and give less "
            "of a hard edge to the predicted mask. The size is in pixels (calculated from "
            "a 128px mask). Set to 0 to not apply gaussian blur. This value should be odd, "
            "if an even number is passed in then it will be rounded to the next odd number."
        )
        self.add_item(
            section=section,
            title="mask_threshold",
            datatype=int,
            default=4,
            min_max=(0, 50),
            rounding=1,
            group="mask",
            info=
            "Sets pixels that are near white to white and near black to black. Set to 0 for "
            "off.")
        self.add_item(
            section=section,
            title="learn_mask",
            datatype=bool,
            default=False,
            group="mask",
            info=
            "Dedicate a portion of the model to learning how to duplicate the input "
            "mask. Increases VRAM usage in exchange for learning a quick ability to try "
            "to replicate more complex mask models.")
        self.add_item(
            section=section,
            title="penalized_mask_loss",
            datatype=bool,
            default=True,
            group="loss",
            info=
            "Image loss function is weighted by mask presence. For areas of "
            "the image without the facial mask, reconstuction errors will be "
            "ignored while the masked face area is prioritized. May increase "
            "overall quality by focusing attention on the core face area.")
        self.add_item(
            section=section,
            title="loss_function",
            datatype=str,
            group="loss",
            default="mae",
            choices=[
                "mae", "mse", "logcosh", "smooth_loss", "l_inf_norm", "ssim",
                "gmsd", "pixel_gradient_diff"
            ],
            info="The loss function to use."
            "\n\t MAE - Mean absolute error will guide reconstructions of each pixel "
            "towards its median value in the training dataset. Robust to outliers but as "
            "a median, it can potentially ignore some infrequent image types in the dataset."
            "\n\t MSE - Mean squared error will guide reconstructions of each pixel "
            "towards its average value in the training dataset. As an avg, it will be "
            "suspectible to outliers and typically produces slightly blurrier results."
            "\n\t LogCosh - log(cosh(x)) acts similiar to MSE for small errors and to "
            "MAE for large errors. Like MSE, it is very stable and prevents overshoots "
            "when errors are near zero. Like MAE, it is robust to outliers. NB: Due to a bug "
            "in PlaidML, this loss does not work on AMD cards."
            "\n\t Smooth_L1 --- Modification of the MAE loss to correct two of its "
            "disadvantages. This loss has improved stability and guidance for small errors."
            "\n\t L_inf_norm --- The L_inf norm will reduce the largest individual pixel "
            "error in an image. As each largest error is minimized sequentially, the "
            "overall error is improved. This loss will be extremely focused on outliers."
            "\n\t SSIM - Structural Similarity Index Metric is a perception-based "
            "loss that considers changes in texture, luminance, contrast, and local spatial "
            "statistics of an image. Potentially delivers more realistic looking images."
            "\n\t GMSD - Gradient Magnitude Similarity Deviation seeks to match "
            "the global standard deviation of the pixel to pixel differences between two "
            "images. Similiar in approach to SSIM. NB: This loss does not currently work on "
            "AMD cards."
            "\n\t Pixel_Gradient_Difference - Instead of minimizing the difference between "
            "the absolute value of each pixel in two reference images, compute the pixel to "
            "pixel spatial difference in each image and then minimize that difference "
            "between two images. Allows for large color shifts,but maintains the structure "
            "of the image.\n")
        self.add_item(
            section=section,
            title="icnr_init",
            datatype=bool,
            default=False,
            group="initialization",
            info=
            "Use ICNR to tile the default initializer in a repeating pattern. "
            "This strategy is designed for pairing with sub-pixel / pixel shuffler "
            "to reduce the 'checkerboard effect' in image reconstruction. "
            "\n\t https://arxiv.org/ftp/arxiv/papers/1707/1707.02937.pdf")
        self.add_item(
            section=section,
            title="conv_aware_init",
            datatype=bool,
            default=False,
            group="initialization",
            info=
            "Use Convolution Aware Initialization for convolutional layers. "
            "This can help eradicate the vanishing and exploding gradient problem "
            "as well as lead to higher accuracy, lower loss and faster convergence.\nNB:"
            "\n\t This can use more VRAM when creating a new model so you may want to "
            "lower the batch size for the first run. The batch size can be raised "
            "again when reloading the model. "
            "\n\t Multi-GPU is not supported for this option, so you should start the model "
            "on a single GPU. Once training has started, you can stop training, enable "
            "multi-GPU and resume."
            "\n\t Building the model will likely take several minutes as the calculations "
            "for this initialization technique are expensive. This will only impact starting "
            "a new model.")
        self.add_item(
            section=section,
            title="optimizer",
            datatype=str,
            gui_radio=True,
            group="optimizer",
            default="adam",
            choices=["adam", "nadam", "rms-prop"],
            info="The optimizer to use."
            "\n\t adam - Adaptive Moment Optimization. A stochastic gradient descent method "
            "that is based on adaptive estimation of first-order and second-order moments."
            "\n\t nadam - Adaptive Moment Optimization with Nesterov Momentum. Much like "
            "Adam but uses a different formula for calculating momentum."
            "\n\t rms-prop - Root Mean Square Propogation. Maintains a moving (discounted) "
            "average of the square of the gradients. Divides the gradient by the root of "
            "this average.")
        self.add_item(
            section=section,
            title="learning_rate",
            datatype=float,
            default=5e-5,
            min_max=(1e-6, 1e-4),
            rounding=6,
            fixed=False,
            group="optimizer",
            info=
            "Learning rate - how fast your network will learn (how large are the "
            "modifications to the model weights after one batch of training). Values that "
            "are too large might result in model crashes and the inability of the model to "
            "find the best solution. Values that are too small might be unable to escape "
            "from dead-ends and find the best global minimum.")
        self.add_item(
            section=section,
            title="reflect_padding",
            datatype=bool,
            default=False,
            group="network",
            info=
            "Use reflection padding rather than zero padding with convolutions. "
            "Each convolution must pad the image boundaries to maintain the proper "
            "sizing. More complex padding schemes can reduce artifacts at the "
            "border of the image."
            "\n\t http://www-cs.engr.ccny.cuny.edu/~wolberg/cs470/hw/hw2_pad.txt"
        )
        self.add_item(
            section=section,
            title="allow_growth",
            datatype=bool,
            default=False,
            group="network",
            fixed=False,
            info=
            "[Nvidia Only]. Enable the Tensorflow GPU 'allow_growth' configuration option. "
            "This option prevents Tensorflow from allocating all of the GPU VRAM at launch "
            "but can lead to higher VRAM fragmentation and slower performance. Should only "
            "be enabled if you are receiving errors regarding 'cuDNN fails to initialize' "
            "when commencing training.")
        self.add_item(
            section=section,
            title="mixed_precision",
            datatype=bool,
            default=False,
            group="network",
            info=
            "R|[Nvidia Only], NVIDIA GPUs can run operations in float16 faster than in "
            "float32. Mixed precision allows you to use a mix of float16 with float32, to "
            "get the performance benefits from float16 and the numeric stability benefits "
            "from float32.\nWhile mixed precision will run on most Nvidia models, it will "
            "only speed up training on more recent GPUs. Those with compute capability 7.0 "
            "or higher will see the greatest performance benefit from mixed precision "
            "because they have Tensor Cores. Older GPUs offer no math performance benefit "
            "for using mixed precision, however memory and bandwidth savings can enable some "
            "speedups. Generally RTX GPUs and later will offer the most benefit."
        )
        self.add_item(
            section=section,
            title="convert_batchsize",
            datatype=int,
            default=16,
            min_max=(1, 32),
            rounding=1,
            fixed=False,
            group="convert",
            info=
            "[GPU Only]. The number of faces to feed through the model at once when running "
            "the Convert process.\n\nNB: Increasing this figure is unlikely to improve "
            "convert speed, however, if you are getting Out of Memory errors, then you may "
            "want to reduce the batch size.")
Пример #22
0
        self.args.final_process = _final.replace('-', '_')

        self.sort_process()

    def launch_aligner(self):
        """ Load the aligner plugin to retrieve landmarks """
        out_queue = queue_manager.get_queue("out")
        kwargs = {"in_queue": queue_manager.get_queue("in"),
                  "out_queue": out_queue}

<<<<<<< HEAD
        for plugin in ("fan", "dlib"):
=======
        for plugin in ("fan", "cv2_dnn"):
>>>>>>> upstream/master
            aligner = PluginLoader.get_aligner(plugin)(loglevel=self.args.loglevel)
            process = SpawnProcess(aligner.run, **kwargs)
            event = process.event
            process.start()
            # Wait for Aligner to take init
            # The first ever load of the model for FAN has reportedly taken
            # up to 3-4 minutes, hence high timeout.
            event.wait(300)

            if not event.is_set():
                if plugin == "fan":
                    process.join()
                    logger.error("Error initializing FAN. Trying CV2-DNN")
                    continue
                else:
                    raise ValueError("Error inititalizing Aligner")
Пример #23
0
    training_image_size = 256
    alignments_paths = None
    preview_scale = 50
    pingpong = False
    memory_saving_gradients = False
    optimizer_savings = False
    predict = False

    model = PluginLoader.get_model(trainer_name)(
        model_dir,
        gpus=gpus,
        configfile=configfile,
        snapshot_interval=snapshot_interval,
        no_logs=no_logs,
        warp_to_landmarks=warp_to_landmarks,
        augment_color=augment_color,
        no_flip=no_flip,
        training_image_size=training_image_size,
        alignments_paths=alignments_paths,
        preview_scale=preview_scale,
        pingpong=pingpong,
        memory_saving_gradients=memory_saving_gradients,
        optimizer_savings=optimizer_savings,
        predict=predict)

model_sources = [
    np.zeros(
        (batch_size * 64, input_shape[0], input_shape[1], input_shape[2])),
    np.zeros((batch_size * 64, input_shape[0], input_shape[1], 1))
]

model_targets = [
Пример #24
0
 def get_argument_list():
     """ Put the arguments in a list so that they are accessible from both
     argparse and gui """
     argument_list = list()
     argument_list.append({
         "opts": ("-A", "--input-A"),
         "action":
         DirFullPaths,
         "dest":
         "input_a",
         "default":
         "input_a",
         "help":
         "Input directory. A directory "
         "containing training images for face A. "
         "Defaults to 'input'"
     })
     argument_list.append({
         "opts": ("-B", "--input-B"),
         "action":
         DirFullPaths,
         "dest":
         "input_b",
         "default":
         "input_b",
         "help":
         "Input directory. A directory "
         "containing training images for face B. "
         "Defaults to 'input'"
     })
     argument_list.append({
         "opts": ("-ala", "--alignments-A"),
         "action":
         FileFullPaths,
         "filetypes":
         'alignments',
         "type":
         str,
         "dest":
         "alignments_path_a",
         "default":
         None,
         "help":
         "Path to alignments file for training set A. Only required "
         "if you are using a masked model or warp-to-landmarks is "
         "enabled. Defaults to <input-A>/alignments.json if not "
         "provided."
     })
     argument_list.append({
         "opts": ("-alb", "--alignments-B"),
         "action":
         FileFullPaths,
         "filetypes":
         'alignments',
         "type":
         str,
         "dest":
         "alignments_path_b",
         "default":
         None,
         "help":
         "Path to alignments file for training set B. Only required "
         "if you are using a masked model or warp-to-landmarks is "
         "enabled. Defaults to <input-B>/alignments.json if not "
         "provided."
     })
     argument_list.append({
         "opts": ("-m", "--model-dir"),
         "action":
         DirFullPaths,
         "dest":
         "model_dir",
         "default":
         "models",
         "help":
         "Model directory. This is where the "
         "training data will be stored. "
         "Defaults to 'model'"
     })
     argument_list.append({
         "opts": ("-t", "--trainer"),
         "action":
         Radio,
         "type":
         str.lower,
         "choices":
         PluginLoader.get_available_models(),
         "default":
         PluginLoader.get_default_model(),
         "help":
         "R|Select which trainer to use. Trainers can be"
         "\nconfigured from the edit menu or the config folder."
         "\n'original': The original model created by /u/deepfakes."
         "\n'dfaker': 64px in/128px out model from dfaker."
         "\n\tEnable 'warp-to-landmarks' for full dfaker method."
         "\n'dfl-h128'. 128px in/out model from deepfacelab"
         "\n'iae': A model that uses intermediate layers to try to"
         "\n\tget better details"
         "\n'lightweight': A lightweight model for low-end cards."
         "\n\tDon't expect great results. Can train as low as 1.6GB"
         "\n\twith batch size 8."
         "\n'unbalanced': 128px in/out model from andenixa. The"
         "\n\tautoencoders are unbalanced so B>A swaps won't work so"
         "\n\twell. Very configurable,"
         "\n'villain': 128px in/out model from villainguy. Very"
         "\n\tresource hungry (11GB for batchsize 16). Good for"
         "\n\tdetails, but more susceptible to color differences"
     })
     argument_list.append({
         "opts": ("-s", "--save-interval"),
         "type":
         int,
         "action":
         Slider,
         "min_max": (10, 1000),
         "rounding":
         10,
         "dest":
         "save_interval",
         "default":
         100,
         "help":
         "Sets the number of iterations before saving the model"
     })
     argument_list.append({
         "opts": ("-bs", "--batch-size"),
         "type":
         int,
         "action":
         Slider,
         "min_max": (2, 256),
         "rounding":
         2,
         "dest":
         "batch_size",
         "default":
         64,
         "help":
         "Batch size, as a power of 2 (64, 128, 256, etc)"
     })
     argument_list.append({
         "opts": ("-it", "--iterations"),
         "type": int,
         "action": Slider,
         "min_max": (0, 5000000),
         "rounding": 20000,
         "default": 1000000,
         "help": "Length of training in iterations."
     })
     argument_list.append({
         "opts": ("-g", "--gpus"),
         "type": int,
         "action": Slider,
         "min_max": (1, 10),
         "rounding": 1,
         "default": 1,
         "help": "Number of GPUs to use for training"
     })
     argument_list.append({
         "opts": ("-ps", "--preview-scale"),
         "type":
         int,
         "action":
         Slider,
         "dest":
         "preview_scale",
         "min_max": (25, 200),
         "rounding":
         25,
         "default":
         50,
         "help":
         "Percentage amount to scale the preview by."
     })
     argument_list.append({
         "opts": ("-p", "--preview"),
         "action":
         "store_true",
         "dest":
         "preview",
         "default":
         False,
         "help":
         "Show preview output. If not specified, "
         "write progress to file"
     })
     argument_list.append({
         "opts": ("-w", "--write-image"),
         "action":
         "store_true",
         "dest":
         "write_image",
         "default":
         False,
         "help":
         "Writes the training result to a file "
         "even on preview mode"
     })
     argument_list.append({
         "opts": ("-ag", "--allow-growth"),
         "action":
         "store_true",
         "dest":
         "allow_growth",
         "default":
         False,
         "help":
         "Sets allow_growth option of Tensorflow "
         "to spare memory on some configs"
     })
     argument_list.append({
         "opts": ("-nl", "--no-logs"),
         "action":
         "store_true",
         "dest":
         "no_logs",
         "default":
         False,
         "help":
         "Disables TensorBoard logging. NB: Disabling logs means "
         "that you will not be able to use the graph or analysis "
         "for this session in the GUI."
     })
     argument_list.append({
         "opts": ("-pp", "--ping-pong"),
         "action":
         "store_true",
         "dest":
         "pingpong",
         "default":
         False,
         "help":
         "Enable ping pong training. Trains one side at a time, "
         "switching sides at each save iteration. Training will take "
         "2 to 4 times longer, with about a 30%%-50%% reduction in "
         "VRAM useage. NB: Preview won't show until both sides have "
         "been trained once."
     })
     argument_list.append({
         "opts": ("-msg", "--memory-saving-gradients"),
         "action":
         "store_true",
         "dest":
         "memory_saving_gradients",
         "default":
         False,
         "help":
         "Trades off VRAM useage against computation time. Can fit "
         "larger models into memory at a cost of slower training "
         "speed. 50%%-150%% batch size increase for 20%%-50%% longer "
         "training time. NB: Launch time will be significantly "
         "delayed. Switching sides using ping-pong training will "
         "take longer."
     })
     argument_list.append({
         "opts": ("-wl", "--warp-to-landmarks"),
         "action":
         "store_true",
         "dest":
         "warp_to_landmarks",
         "default":
         False,
         "help":
         "Warps training faces to closely matched Landmarks from the "
         "opposite face-set rather than randomly warping the face. "
         "This is the 'dfaker' way of doing warping. Alignments "
         "files for both sets of faces must be provided if using "
         "this option."
     })
     argument_list.append({
         "opts": ("-nf", "--no-flip"),
         "action":
         "store_true",
         "dest":
         "no_flip",
         "default":
         False,
         "help":
         "To effectively learn, a random set of images are flipped "
         "horizontally. Sometimes it is desirable for this not to "
         "occur. Generally this should be left off except for "
         "during 'fit training'."
     })
     argument_list.append({
         "opts": ("-tia", "--timelapse-input-A"),
         "action":
         DirFullPaths,
         "dest":
         "timelapse_input_a",
         "default":
         None,
         "help":
         "For if you want a timelapse: "
         "The input folder for the timelapse. "
         "This folder should contain faces of A "
         "which will be converted for the "
         "timelapse. You must supply a "
         "--timelapse-output and a "
         "--timelapse-input-B parameter."
     })
     argument_list.append({
         "opts": ("-tib", "--timelapse-input-B"),
         "action":
         DirFullPaths,
         "dest":
         "timelapse_input_b",
         "default":
         None,
         "help":
         "For if you want a timelapse: "
         "The input folder for the timelapse. "
         "This folder should contain faces of B "
         "which will be converted for the "
         "timelapse. You must supply a "
         "--timelapse-output and a "
         "--timelapse-input-A parameter."
     })
     argument_list.append({
         "opts": ("-to", "--timelapse-output"),
         "action":
         DirFullPaths,
         "dest":
         "timelapse_output",
         "default":
         None,
         "help":
         "The output folder for the timelapse. "
         "If the input folders are supplied but "
         "no output folder, it will default to "
         "your model folder /timelapse/"
     })
     return argument_list
Пример #25
0
 def _load_trainer(self, model):
     trainer = PluginLoader.get_trainer(model.trainer)
     trainer = trainer(model, self._data_dir, self._args.batch_size)
     return trainer
Пример #26
0
 def get_optional_arguments():
     """ Put the arguments in a list so that they are accessible from both
     argparse and gui """
     argument_list = []
     argument_list.append({"opts": ("-m", "--model-dir"),
                           "action": DirFullPaths,
                           "dest": "model_dir",
                           "default": "models",
                           "help": "Model directory. A directory "
                                   "containing the trained model you wish "
                                   "to process. Defaults to 'models'"})
     argument_list.append({"opts": ("-a", "--input-aligned-dir"),
                           "action": DirFullPaths,
                           "dest": "input_aligned_dir",
                           "default": None,
                           "help": "Input \"aligned directory\". A "
                                   "directory that should contain the "
                                   "aligned faces extracted from the input "
                                   "files. If you delete faces from this "
                                   "folder, they'll be skipped during "
                                   "conversion. If no aligned dir is "
                                   "specified, all faces will be "
                                   "converted"})
     argument_list.append({"opts": ("-t", "--trainer"),
                           "type": str.lower,
                           "choices": PluginLoader.get_available_models(),
                           "default": PluginLoader.get_default_model(),
                           "help": "Select the trainer that was used to "
                                   "create the model"})
     argument_list.append({"opts": ("-c", "--converter"),
                           "type": str.lower,
                           "choices": PluginLoader.get_available_converters(),
                           "default": "masked",
                           "help": "Converter to use"})
     argument_list.append({
         "opts": ("-M", "--mask-type"),
         "type": str.lower,
         "dest": "mask_type",
         "choices": ["ellipse",
                     "facehull",
                     "dfl",
                     #  "cnn",  Removed until implemented
                     "none"],
         "default": "facehull",
         "help": "R|Mask to use to replace faces."
                 "\nellipse: Oval around face."
                 "\nfacehull: Face cutout based on landmarks."
                 "\ndfl: A Face Hull mask from DeepFaceLabs."
                 #  "\ncnn: Not yet implemented"  Removed until implemented
                 "\nnone: No mask. Can still use blur and erode on the edges of the swap box."})
     argument_list.append({"opts": ("-b", "--blur-size"),
                           "type": float,
                           "action": Slider,
                           "min_max": (0.0, 100.0),
                           "rounding": 2,
                           "default": 5.0,
                           "help": "Blur kernel size as a percentage of the swap area. Smooths "
                                   "the transition between the swapped face and the background "
                                   "image."})
     argument_list.append({"opts": ("-e", "--erosion-size"),
                           "dest": "erosion_size",
                           "type": float,
                           "action": Slider,
                           "min_max": (-100.0, 100.0),
                           "rounding": 2,
                           "default": 0.0,
                           "help": "Erosion kernel size as a percentage of the mask radius "
                                   "area. Positive values apply erosion which reduces the size "
                                   "of the swapped area. Negative values apply dilation which "
                                   "increases the swapped area"})
     argument_list.append({"opts": ("-g", "--gpus"),
                           "type": int,
                           "action": Slider,
                           "min_max": (1, 10),
                           "rounding": 1,
                           "default": 1,
                           "help": "Number of GPUs to use for conversion"})
     argument_list.append({"opts": ("-sh", "--sharpen"),
                           "type": str.lower,
                           "dest": "sharpen_image",
                           "choices": ["box_filter", "gaussian_filter", "none"],
                           "default": "none",
                           "help": "Sharpen the masked facial region of "
                                   "the converted images. Choice of filter "
                                   "to use in sharpening process -- box"
                                   "filter or gaussian filter."})
     argument_list.append({"opts": ("-fr", "--frame-ranges"),
                           "nargs": "+",
                           "type": str,
                           "help": "frame ranges to apply transfer to e.g. "
                                   "For frames 10 to 50 and 90 to 100 use "
                                   "--frame-ranges 10-50 90-100. Files "
                                   "must have the frame-number as the last "
                                   "number in the name!"})
     argument_list.append({"opts": ("-d", "--discard-frames"),
                           "action": "store_true",
                           "dest": "discard_frames",
                           "default": False,
                           "help": "When used with --frame-ranges discards "
                                   "frames that are not processed instead "
                                   "of writing them out unchanged"})
     argument_list.append({"opts": ("-s", "--swap-model"),
                           "action": "store_true",
                           "dest": "swap_model",
                           "default": False,
                           "help": "Swap the model. Instead of A -> B, "
                                   "swap B -> A"})
     argument_list.append({"opts": ("-S", "--seamless"),
                           "action": "store_true",
                           "dest": "seamless_clone",
                           "default": False,
                           "help": "Use cv2's seamless clone function to "
                                   "remove extreme gradients at the mask "
                                   "seam by smoothing colors."})
     argument_list.append({"opts": ("-mh", "--match-histogram"),
                           "action": "store_true",
                           "dest": "match_histogram",
                           "default": False,
                           "help": "Adjust the histogram of each color "
                                   "channel in the swapped reconstruction "
                                   "to equal the histogram of the masked "
                                   "area in the orginal image"})
     argument_list.append({"opts": ("-aca", "--avg-color-adjust"),
                           "action": "store_true",
                           "dest": "avg_color_adjust",
                           "default": False,
                           "help": "Adjust the mean of each color channel "
                                   " in the swapped reconstruction to "
                                   "equal the mean of the masked area in "
                                   "the orginal image"})
     argument_list.append({"opts": ("-sb", "--smooth-box"),
                           "action": "store_true",
                           "dest": "smooth_box",
                           "default": False,
                           "help": "Perform a Gaussian blur on the edges of the face box "
                                   "received from the model. Helps reduce pronounced edges "
                                   "of the swap area"})
     argument_list.append({"opts": ("-dt", "--draw-transparent"),
                           "action": "store_true",
                           "dest": "draw_transparent",
                           "default": False,
                           "help": "Place the swapped face on a "
                                   "transparent layer rather than the "
                                   "original frame."})
     return argument_list
Пример #27
0
 def get_optional_arguments():
     """ Put the arguments in a list so that they are accessible from both
     argparse and gui """
     argument_list = []
     argument_list.append({
         "opts": ("--serializer", ),
         "type":
         str.lower,
         "dest":
         "serializer",
         "default":
         "json",
         "choices": ("json", "pickle", "yaml"),
         "help":
         "Serializer for alignments file. If "
         "yaml is chosen and not available, then "
         "json will be used as the default "
         "fallback."
     })
     argument_list.append({
         "opts": ("-D", "--detector"),
         "type":
         str,
         "choices":
         PluginLoader.get_available_extractors("detect"),
         "default":
         "mtcnn",
         "help":
         "R|Detector to use."
         "\n'dlib-hog': uses least resources, but is the"
         "\n\tleast reliable."
         "\n'dlib-cnn': faster than mtcnn but detects"
         "\n\tfewer faces and fewer false positives."
         "\n'mtcnn': slower than dlib, but uses fewer"
         "\n\tresources whilst detecting more faces and"
         "\n\tmore false positives. Has superior"
         "\n\talignment to dlib"
     })
     argument_list.append({
         "opts": ("-A", "--aligner"),
         "type":
         str,
         "choices":
         PluginLoader.get_available_extractors("align"),
         "default":
         "fan",
         "help":
         "R|Aligner to use."
         "\n'dlib': Dlib Pose Predictor. Faster, less "
         "\n\tresource intensive, but less accurate."
         "\n'fan': Face Alignment Network. Best aligner."
         "\n\tGPU heavy."
     })
     argument_list.append({
         "opts": ("-mtms", "--mtcnn-minsize"),
         "type":
         int,
         "dest":
         "mtcnn_minsize",
         "default":
         20,
         "help":
         "The minimum size of a face to be "
         "accepted. Lower values use "
         "significantly more VRAM. Minimum "
         "value is 10. Default is 20 "
         "(MTCNN detector only)"
     })
     argument_list.append({
         "opts": ("-mtth", "--mtcnn-threshold"),
         "nargs":
         "+",
         "type":
         str,
         "dest":
         "mtcnn_threshold",
         "default": ["0.6", "0.7", "0.7"],
         "help":
         "R|Three step threshold for face "
         "detection. Should be\nthree decimal "
         "numbers each less than 1. Eg:\n"
         "'--mtcnn-threshold 0.6 0.7 0.7'.\n"
         "1st stage: obtains face candidates.\n"
         "2nd stage: refinement of face "
         "candidates.\n3rd stage: further "
         "refinement of face candidates.\n"
         "Default is 0.6 0.7 0.7 "
         "(MTCNN detector only)"
     })
     argument_list.append({
         "opts": ("-mtsc", "--mtcnn-scalefactor"),
         "type":
         float,
         "dest":
         "mtcnn_scalefactor",
         "default":
         0.709,
         "help":
         "The scale factor for the image "
         "pyramid. Should be a decimal number "
         "less than one. Default is 0.709 "
         "(MTCNN detector only)"
     })
     argument_list.append({
         "opts": ("-r", "--rotate-images"),
         "type":
         str,
         "dest":
         "rotate_images",
         "default":
         None,
         "help":
         "If a face isn't found, rotate the "
         "images to try to find a face. Can find "
         "more faces at the cost of extraction "
         "speed. Pass in a single number to use "
         "increments of that size up to 360, or "
         "pass in a list of numbers to enumerate "
         "exactly what angles to check"
     })
     argument_list.append({
         "opts": ("-bt", "--blur-threshold"),
         "type":
         int,
         "dest":
         "blur_thresh",
         "default":
         None,
         "help":
         "Automatically discard images blurrier "
         "than the specified threshold. "
         "Discarded images are moved into a "
         "\"blurry\" sub-folder. Lower values "
         "allow more blur"
     })
     argument_list.append({
         "opts": ("-mp", "--multiprocess"),
         "action":
         "store_true",
         "default":
         False,
         "help":
         "Run extraction in parallel. Offers "
         "speed up for some extractor/detector "
         "combinations, less so for others. "
         "Only has an effect if both the "
         "aligner and detector use the GPU, "
         "otherwise this is automatic."
     })
     argument_list.append({
         "opts": ("-sz", "--size"),
         "type":
         int,
         "default":
         256,
         "help":
         "The output size of extracted faces. "
         "Make sure that the model you intend "
         "to train supports your required "
         "size. This will only need to be "
         "changed for hi-res models."
     })
     argument_list.append({
         "opts": ("-s", "--skip-existing"),
         "action":
         "store_true",
         "dest":
         "skip_existing",
         "default":
         False,
         "help":
         "Skips frames that have already been "
         "extracted and exist in the alignments "
         "file"
     })
     argument_list.append({
         "opts": ("-sf", "--skip-existing-faces"),
         "action":
         "store_true",
         "dest":
         "skip_faces",
         "default":
         False,
         "help":
         "Skip frames that already have "
         "detected faces in the alignments "
         "file"
     })
     argument_list.append({
         "opts": ("-dl", "--debug-landmarks"),
         "action":
         "store_true",
         "dest":
         "debug_landmarks",
         "default":
         False,
         "help":
         "Draw landmarks on the ouput faces for "
         "debug"
     })
     argument_list.append({
         "opts": ("-ae", "--align-eyes"),
         "action":
         "store_true",
         "dest":
         "align_eyes",
         "default":
         False,
         "help":
         "Perform extra alignment to ensure "
         "left/right eyes are  at the same "
         "height"
     })
     argument_list.append({
         "opts": ("-si", "--save-interval"),
         "dest":
         "save_interval",
         "type":
         int,
         "default":
         None,
         "help":
         "Automatically save the alignments file "
         "after a set amount of frames. Will "
         "only save at the end of extracting by "
         "default. WARNING: Don't interrupt the "
         "script when writing the file because "
         "it might get corrupted."
     })
     return argument_list
Пример #28
0
 def load_aligner(aligner, loglevel):
     """ Set global arguments and load aligner plugin """
     aligner_name = aligner.replace("-", "_").lower()
     logger.debug("Loading Aligner: '%s'", aligner_name)
     aligner = PluginLoader.get_aligner(aligner_name)(loglevel=loglevel)
     return aligner
Пример #29
0
 def load_aligner(aligner, loglevel):
     """ Set global arguments and load aligner plugin """
     aligner_name = aligner.replace("-", "_").lower()
     logger.debug("Loading Aligner: '%s'", aligner_name)
     aligner = PluginLoader.get_aligner(aligner_name)(loglevel=loglevel)
     return aligner
Пример #30
0
    def convert(self,
                video_file,
                swap_model=False,
                duration=None,
                start_time=None,
                use_gan=False,
                face_filter=False,
                photos=True,
                crop_x=None,
                width=None,
                side_by_side=False):
        # Magic incantation to not have tensorflow blow up with an out of memory error.
        import tensorflow as tf
        import keras.backend.tensorflow_backend as K
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.visible_device_list = "0"
        K.set_session(tf.Session(config=config))

        # Load model
        model_name = "Original"
        converter_name = "Masked"
        if use_gan:
            model_name = "GAN"
            converter_name = "GAN"
        model = PluginLoader.get_model(model_name)(Path(
            self._model_path(use_gan)))
        if not model.load(swap_model):
            print(
                'model Not Found! A valid model must be provided to continue!')
            exit(1)

        # Load converter
        converter = PluginLoader.get_converter(converter_name)
        converter = converter(model.converter(False),
                              blur_size=8,
                              seamless_clone=True,
                              mask_type="facehullandrect",
                              erosion_kernel_size=None,
                              smooth_mask=True,
                              avg_color_adjust=True)

        # Load face filter
        filter_person = self._person_a
        if swap_model:
            filter_person = self._person_b
        filter = FaceFilter(self._people[filter_person]['faces'])

        # Define conversion method per frame
        def _convert_frame(frame, convert_colors=True):
            if convert_colors:
                frame = cv2.cvtColor(
                    frame,
                    cv2.COLOR_BGR2RGB)  # Swap RGB to BGR to work with OpenCV
            for face in DetectedFace(frame, "cnn"):
                if (not face_filter) or (face_filter and filter.check(face)):
                    frame = converter.patch_image(frame, face)
                    frame = frame.astype(numpy.float32)
            if convert_colors:
                frame = cv2.cvtColor(
                    frame,
                    cv2.COLOR_BGR2RGB)  # Swap RGB to BGR to work with OpenCV
            return frame

        def _convert_helper(get_frame, t):
            return _convert_frame(get_frame(t))

        media_path = self._video_path({'name': video_file})
        if not photos:
            # Process video; start loading the video clip
            video = VideoFileClip(media_path)

            # If a duration is set, trim clip
            if duration:
                video = video.subclip(start_time, start_time + duration)

            # Resize clip before processing
            if width:
                video = video.resize(width=width)

            # Crop clip if desired
            if crop_x:
                video = video.fx(crop, x2=video.w / 2)

            # Kick off convert frames for each frame
            new_video = video.fl(_convert_helper)

            # Stack clips side by side
            if side_by_side:

                def add_caption(caption, clip):
                    text = (TextClip(caption,
                                     font='Amiri-regular',
                                     color='white',
                                     fontsize=80).margin(40).set_duration(
                                         clip.duration).on_color(
                                             color=(0, 0, 0), col_opacity=0.6))
                    return CompositeVideoClip([clip, text])

                video = add_caption("Original", video)
                new_video = add_caption("Swapped", new_video)
                final_video = clips_array([[video], [new_video]])
            else:
                final_video = new_video

            # Resize clip after processing
            #final_video = final_video.resize(width = (480 * 2))

            # Write video
            output_path = os.path.join(self.OUTPUT_PATH, video_file)
            final_video.write_videofile(output_path, rewrite_audio=True)

            # Clean up
            del video
            del new_video
            del final_video
        else:
            # Process a directory of photos
            for face_file in os.listdir(media_path):
                face_path = os.path.join(media_path, face_file)
                image = cv2.imread(face_path)
                image = _convert_frame(image, convert_colors=False)
                cv2.imwrite(os.path.join(self.OUTPUT_PATH, face_file), image)
Пример #31
0
 def _load_feature_name(self):
     inspector = PluginLoader.get_inspector(self._location)()
     feature_name = inspector.get_feature_name()
     return feature_name
Пример #32
0
 def get_optional_arguments():
     """ Put the arguments in a list so that they are accessible from both
     argparse and gui """
     argument_list = []
     argument_list.append({
         "opts": ("-m", "--model-dir"),
         "action":
         DirFullPaths,
         "dest":
         "model_dir",
         "default":
         "models",
         "help":
         "Model directory. A directory "
         "containing the trained model you wish "
         "to process. Defaults to 'models'"
     })
     argument_list.append({
         "opts": ("-a", "--input-aligned-dir"),
         "action":
         DirFullPaths,
         "dest":
         "input_aligned_dir",
         "default":
         None,
         "help":
         "Input \"aligned directory\". A "
         "directory that should contain the "
         "aligned faces extracted from the input "
         "files. If you delete faces from this "
         "folder, they'll be skipped during "
         "conversion. If no aligned dir is "
         "specified, all faces will be "
         "converted"
     })
     argument_list.append({
         "opts": ("-t", "--trainer"),
         "type":
         str,
         # case sensitive because this is used to
         # load a plug-in.
         "choices":
         PluginLoader.get_available_models(),
         "default":
         PluginLoader.get_default_model(),
         "help":
         "Select the trainer that was used to "
         "create the model"
     })
     argument_list.append({
         "opts": ("-c", "--converter"),
         "type": str,
         # case sensitive because this is used
         # to load a plugin.
         "choices": ("Masked", "Adjust"),
         "default": "Masked",
         "help": "Converter to use"
     })
     argument_list.append({
         "opts": ("-b", "--blur-size"),
         "type": int,
         "default": 2,
         "help": "Blur size. (Masked converter only)"
     })
     argument_list.append({
         "opts": ("-e", "--erosion-kernel-size"),
         "dest":
         "erosion_kernel_size",
         "type":
         int,
         "default":
         None,
         "help":
         "Erosion kernel size. Positive values "
         "apply erosion which reduces the edge "
         "of the swapped face. Negative values "
         "apply dilation which allows the "
         "swapped face to cover more space. "
         "(Masked converter only)"
     })
     argument_list.append({
         "opts": ("-M", "--mask-type"),
         # lowercase this, because it's just a
         # string later on.
         "type":
         str.lower,
         "dest":
         "mask_type",
         "choices": ["rect", "facehull", "facehullandrect"],
         "default":
         "facehullandrect",
         "help":
         "Mask to use to replace faces. "
         "(Masked converter only)"
     })
     argument_list.append({
         "opts": ("-sh", "--sharpen"),
         "type":
         str.lower,
         "dest":
         "sharpen_image",
         "choices": ["bsharpen", "gsharpen"],
         "default":
         None,
         "help":
         "Use Sharpen Image. bsharpen for Box "
         "Blur, gsharpen for Gaussian Blur "
         "(Masked converter only)"
     })
     argument_list.append({
         "opts": ("-g", "--gpus"),
         "type": int,
         "default": 1,
         "help": "Number of GPUs to use for conversion"
     })
     argument_list.append({
         "opts": ("-fr", "--frame-ranges"),
         "nargs":
         "+",
         "type":
         str,
         "help":
         "frame ranges to apply transfer to e.g. "
         "For frames 10 to 50 and 90 to 100 use "
         "--frame-ranges 10-50 90-100. Files "
         "must have the frame-number as the last "
         "number in the name!"
     })
     argument_list.append({
         "opts": ("-d", "--discard-frames"),
         "action":
         "store_true",
         "dest":
         "discard_frames",
         "default":
         False,
         "help":
         "When used with --frame-ranges discards "
         "frames that are not processed instead "
         "of writing them out unchanged"
     })
     argument_list.append({
         "opts": ("-s", "--swap-model"),
         "action":
         "store_true",
         "dest":
         "swap_model",
         "default":
         False,
         "help":
         "Swap the model. Instead of A -> B, "
         "swap B -> A"
     })
     argument_list.append({
         "opts": ("-S", "--seamless"),
         "action":
         "store_true",
         "dest":
         "seamless_clone",
         "default":
         False,
         "help":
         "Use cv2's seamless clone. "
         "(Masked converter only)"
     })
     argument_list.append({
         "opts": ("-mh", "--match-histogram"),
         "action":
         "store_true",
         "dest":
         "match_histogram",
         "default":
         False,
         "help":
         "Use histogram matching. "
         "(Masked converter only)"
     })
     argument_list.append({
         "opts": ("-sm", "--smooth-mask"),
         "action": "store_true",
         "dest": "smooth_mask",
         "default": False,
         "help": "Smooth mask (Adjust converter only)"
     })
     argument_list.append({
         "opts": ("-aca", "--avg-color-adjust"),
         "action":
         "store_true",
         "dest":
         "avg_color_adjust",
         "default":
         False,
         "help":
         "Average color adjust. "
         "(Adjust converter only)"
     })
     argument_list.append({
         "opts": ("-dt", "--draw-transparent"),
         "action":
         "store_true",
         "dest":
         "draw_transparent",
         "default":
         False,
         "help":
         "Place the swapped face on a "
         "transparent layer rather than the "
         "original frame."
     })
     return argument_list
Пример #33
0
 def get_optional_arguments():
     """ Put the arguments in a list so that they are accessible from both
     argparse and gui """
     argument_list = []
     argument_list.append({"opts": ("--serializer", ),
                           "type": str.lower,
                           "dest": "serializer",
                           "default": "json",
                           "choices": ("json", "pickle", "yaml"),
                           "help": "Serializer for alignments file. If "
                                   "yaml is chosen and not available, then "
                                   "json will be used as the default "
                                   "fallback."})
     argument_list.append({
         "opts": ("-D", "--detector"),
         "type": str.lower,
         "choices":  PluginLoader.get_available_extractors(
             "detect"),
         "default": "mtcnn",
         "help": "R|Detector to use."
                 "\n'dlib-hog': uses least resources, but is the"
                 "\n\tleast reliable."
                 "\n'dlib-cnn': faster than mtcnn but detects"
                 "\n\tfewer faces and fewer false positives."
                 "\n'mtcnn': slower than dlib, but uses fewer"
                 "\n\tresources whilst detecting more faces and"
                 "\n\tmore false positives. Has superior"
                 "\n\talignment to dlib"})
     argument_list.append({
         "opts": ("-A", "--aligner"),
         "type": str.lower,
         "choices": PluginLoader.get_available_extractors(
             "align"),
         "default": "fan",
         "help": "R|Aligner to use."
                 "\n'dlib': Dlib Pose Predictor. Faster, less "
                 "\n\tresource intensive, but less accurate."
                 "\n'fan': Face Alignment Network. Best aligner."
                 "\n\tGPU heavy, slow when not running on GPU"})
     argument_list.append({"opts": ("-r", "--rotate-images"),
                           "type": str,
                           "dest": "rotate_images",
                           "default": None,
                           "help": "If a face isn't found, rotate the "
                                   "images to try to find a face. Can find "
                                   "more faces at the cost of extraction "
                                   "speed. Pass in a single number to use "
                                   "increments of that size up to 360, or "
                                   "pass in a list of numbers to enumerate "
                                   "exactly what angles to check"})
     argument_list.append({"opts": ("-bt", "--blur-threshold"),
                           "type": float,
                           "action": Slider,
                           "min_max": (0.0, 100.0),
                           "rounding": 1,
                           "dest": "blur_thresh",
                           "default": 0.0,
                           "help": "Automatically discard images blurrier than the specified "
                                   "threshold. Discarded images are moved into a \"blurry\" "
                                   "sub-folder. Lower values allow more blur. Set to 0.0 to "
                                   "turn off."})
     argument_list.append({"opts": ("-mp", "--multiprocess"),
                           "action": "store_true",
                           "default": False,
                           "help": "Run extraction in parallel. Offers "
                                   "speed up for some extractor/detector "
                                   "combinations, less so for others. "
                                   "Only has an effect if both the "
                                   "aligner and detector use the GPU, "
                                   "otherwise this is automatic."})
     argument_list.append({"opts": ("-sz", "--size"),
                           "type": int,
                           "action": Slider,
                           "min_max": (128, 512),
                           "default": 256,
                           "rounding": 64,
                           "help": "The output size of extracted faces. Make sure that the "
                                   "model you intend to train supports your required size. "
                                   "This will only need to be changed for hi-res models."})
     argument_list.append({"opts": ("-min", "--min-size"),
                           "type": int,
                           "action": Slider,
                           "dest": "min_size",
                           "min_max": (0, 1080),
                           "default": 0,
                           "rounding": 20,
                           "help": "Filters out faces detected below this size. Length, in "
                                   "pixels across the diagonal of the bounding box. Set to 0 "
                                   "for off"})
     argument_list.append({"opts": ("-s", "--skip-existing"),
                           "action": "store_true",
                           "dest": "skip_existing",
                           "default": False,
                           "help": "Skips frames that have already been "
                                   "extracted and exist in the alignments "
                                   "file"})
     argument_list.append({"opts": ("-sf", "--skip-existing-faces"),
                           "action": "store_true",
                           "dest": "skip_faces",
                           "default": False,
                           "help": "Skip frames that already have "
                                   "detected faces in the alignments "
                                   "file"})
     argument_list.append({"opts": ("-dl", "--debug-landmarks"),
                           "action": "store_true",
                           "dest": "debug_landmarks",
                           "default": False,
                           "help": "Draw landmarks on the ouput faces for "
                                   "debug"})
     argument_list.append({"opts": ("-ae", "--align-eyes"),
                           "action": "store_true",
                           "dest": "align_eyes",
                           "default": False,
                           "help": "Perform extra alignment to ensure "
                                   "left/right eyes are  at the same "
                                   "height"})
     argument_list.append({"opts": ("-si", "--save-interval"),
                           "dest": "save_interval",
                           "type": int,
                           "action": Slider,
                           "min_max": (0, 1000),
                           "rounding": 10,
                           "default": 0,
                           "help": "Automatically save the alignments file after a set amount "
                                   "of frames. Will only save at the end of extracting by "
                                   "default. WARNING: Don't interrupt the script when writing "
                                   "the file because it might get corrupted. Set to 0 to turn "
                                   "off"})
     return argument_list
Пример #34
0
 def get_argument_list():
     """ Put the arguments in a list so that they are accessible from both
     argparse and gui """
     argument_list = list()
     argument_list.append({
         "opts": ("-A", "--input-A"),
         "action":
         DirFullPaths,
         "dest":
         "input_A",
         "default":
         "input_A",
         "help":
         "Input directory. A directory "
         "containing training images for face A. "
         "Defaults to 'input'"
     })
     argument_list.append({
         "opts": ("-B", "--input-B"),
         "action":
         DirFullPaths,
         "dest":
         "input_B",
         "default":
         "input_B",
         "help":
         "Input directory. A directory "
         "containing training images for face B. "
         "Defaults to 'input'"
     })
     argument_list.append({
         "opts": ("-m", "--model-dir"),
         "action":
         DirFullPaths,
         "dest":
         "model_dir",
         "default":
         "models",
         "help":
         "Model directory. This is where the "
         "training data will be stored. "
         "Defaults to 'model'"
     })
     argument_list.append({
         "opts": ("-s", "--save-interval"),
         "type":
         int,
         "dest":
         "save_interval",
         "default":
         100,
         "help":
         "Sets the number of iterations before "
         "saving the model"
     })
     argument_list.append({
         "opts": ("-t", "--trainer"),
         "type":
         str,
         "choices":
         PluginLoader.get_available_models(),
         "default":
         PluginLoader.get_default_model(),
         "help":
         "Select which trainer to use, Use "
         "LowMem for cards with less than 2GB of "
         "VRAM"
     })
     argument_list.append({
         "opts": ("-bs", "--batch-size"),
         "type":
         int,
         "default":
         64,
         "help":
         "Batch size, as a power of 2 "
         "(64, 128, 256, etc)"
     })
     argument_list.append({
         "opts": ("-it", "--iterations"),
         "type": int,
         "default": 1000000,
         "help": "Length of training in iterations"
     })
     argument_list.append({
         "opts": ("-g", "--gpus"),
         "type": int,
         "default": 1,
         "help": "Number of GPUs to use for training"
     })
     argument_list.append({
         "opts": ("-p", "--preview"),
         "action":
         "store_true",
         "dest":
         "preview",
         "default":
         False,
         "help":
         "Show preview output. If not specified, "
         "write progress to file"
     })
     argument_list.append({
         "opts": ("-w", "--write-image"),
         "action":
         "store_true",
         "dest":
         "write_image",
         "default":
         False,
         "help":
         "Writes the training result to a file "
         "even on preview mode"
     })
     argument_list.append({
         "opts": ("-pl", "--use-perceptual-loss"),
         "action": "store_true",
         "dest": "perceptual_loss",
         "default": False,
         "help": "Use perceptual loss while training"
     })
     argument_list.append({
         "opts": ("-ag", "--allow-growth"),
         "action":
         "store_true",
         "dest":
         "allow_growth",
         "default":
         False,
         "help":
         "Sets allow_growth option of Tensorflow "
         "to spare memory on some configs"
     })
     argument_list.append({
         "opts": ("-v", "--verbose"),
         "action": "store_true",
         "dest": "verbose",
         "default": False,
         "help": "Show verbose output"
     })
     argument_list.append({
         "opts": ("-tia", "--timelapse-input-A"),
         "action":
         DirFullPaths,
         "dest":
         "timelapse_input_A",
         "default":
         None,
         "help":
         "For if you want a timelapse: "
         "The input folder for the timelapse. "
         "This folder should contain faces of A "
         "which will be converted for the "
         "timelapse. You must supply a "
         "--timelapse-output and a "
         "--timelapse-input-B parameter."
     })
     argument_list.append({
         "opts": ("-tib", "--timelapse-input-B"),
         "action":
         DirFullPaths,
         "dest":
         "timelapse_input_B",
         "default":
         None,
         "help":
         "For if you want a timelapse: "
         "The input folder for the timelapse. "
         "This folder should contain faces of B "
         "which will be converted for the "
         "timelapse. You must supply a "
         "--timelapse-output and a "
         "--timelapse-input-A parameter."
     })
     argument_list.append({
         "opts": ("-to", "--timelapse-output"),
         "action":
         DirFullPaths,
         "dest":
         "timelapse_output",
         "default":
         None,
         "help":
         "The output folder for the timelapse. "
         "If the input folders are supplied but "
         "no output folder, it will default to "
         "your model folder /timelapse/"
     })
     # This is a hidden argument to indicate that the GUI is being used,
     # so the preview window should be redirected Accordingly
     argument_list.append({
         "opts": ("-gui", "--gui"),
         "action": "store_true",
         "dest": "redirect_gui",
         "default": False,
         "help": argparse.SUPPRESS
     })
     return argument_list
Пример #35
0
 def get_argument_list():
     """ Put the arguments in a list so that they are accessible from both
     argparse and gui """
     argument_list = list()
     argument_list.append({"opts": ("-A", "--input-A"),
                           "action": DirFullPaths,
                           "dest": "input_a",
                           "default": "input_a",
                           "help": "Input directory. A directory "
                                   "containing training images for face A. "
                                   "Defaults to 'input'"})
     argument_list.append({"opts": ("-B", "--input-B"),
                           "action": DirFullPaths,
                           "dest": "input_b",
                           "default": "input_b",
                           "help": "Input directory. A directory "
                                   "containing training images for face B. "
                                   "Defaults to 'input'"})
     argument_list.append({"opts": ("-ala", "--alignments-A"),
                           "action": FileFullPaths,
                           "filetypes": 'alignments',
                           "type": str,
                           "dest": "alignments_path_a",
                           "default": None,
                           "help": "Path to alignments file for training set A. Only required "
                                   "if you are using a masked model or warp-to-landmarks is "
                                   "enabled. Defaults to <input-A>/alignments.json if not "
                                   "provided."})
     argument_list.append({"opts": ("-alb", "--alignments-B"),
                           "action": FileFullPaths,
                           "filetypes": 'alignments',
                           "type": str,
                           "dest": "alignments_path_b",
                           "default": None,
                           "help": "Path to alignments file for training set B. Only required "
                                   "if you are using a masked model or warp-to-landmarks is "
                                   "enabled. Defaults to <input-B>/alignments.json if not "
                                   "provided."})
     argument_list.append({"opts": ("-m", "--model-dir"),
                           "action": DirFullPaths,
                           "dest": "model_dir",
                           "default": "models",
                           "help": "Model directory. This is where the "
                                   "training data will be stored. "
                                   "Defaults to 'model'"})
     argument_list.append({"opts": ("-t", "--trainer"),
                           "type": str.lower,
                           "choices": PluginLoader.get_available_models(),
                           "default": PluginLoader.get_default_model(),
                           "help": "Select which trainer to use, Use "
                                   "LowMem for cards with less than 2GB of "
                                   "VRAM"})
     argument_list.append({"opts": ("-s", "--save-interval"),
                           "type": int,
                           "action": Slider,
                           "min_max": (10, 1000),
                           "rounding": 10,
                           "dest": "save_interval",
                           "default": 100,
                           "help": "Sets the number of iterations before saving the model"})
     argument_list.append({"opts": ("-bs", "--batch-size"),
                           "type": int,
                           "action": Slider,
                           "min_max": (2, 256),
                           "rounding": 2,
                           "dest": "batch_size",
                           "default": 64,
                           "help": "Batch size, as a power of 2 (64, 128, 256, etc)"})
     argument_list.append({"opts": ("-it", "--iterations"),
                           "type": int,
                           "action": Slider,
                           "min_max": (0, 5000000),
                           "rounding": 20000,
                           "default": 1000000,
                           "help": "Length of training in iterations."})
     argument_list.append({"opts": ("-g", "--gpus"),
                           "type": int,
                           "action": Slider,
                           "min_max": (1, 10),
                           "rounding": 1,
                           "default": 1,
                           "help": "Number of GPUs to use for training"})
     argument_list.append({"opts": ("-ps", "--preview-scale"),
                           "type": int,
                           "action": Slider,
                           "dest": "preview_scale",
                           "min_max": (25, 200),
                           "rounding": 25,
                           "default": 100,
                           "help": "Percentage amount to scale the preview by."})
     argument_list.append({"opts": ("-p", "--preview"),
                           "action": "store_true",
                           "dest": "preview",
                           "default": False,
                           "help": "Show preview output. If not specified, "
                                   "write progress to file"})
     argument_list.append({"opts": ("-w", "--write-image"),
                           "action": "store_true",
                           "dest": "write_image",
                           "default": False,
                           "help": "Writes the training result to a file "
                                   "even on preview mode"})
     argument_list.append({"opts": ("-ag", "--allow-growth"),
                           "action": "store_true",
                           "dest": "allow_growth",
                           "default": False,
                           "help": "Sets allow_growth option of Tensorflow "
                                   "to spare memory on some configs"})
     argument_list.append({"opts": ("-nl", "--no-logs"),
                           "action": "store_true",
                           "dest": "no_logs",
                           "default": False,
                           "help": "Disables TensorBoard logging. NB: Disabling logs means "
                                   "that you will not be able to use the graph or analysis "
                                   "for this session in the GUI."})
     argument_list.append({"opts": ("-wl", "--warp-to-landmarks"),
                           "action": "store_true",
                           "dest": "warp_to_landmarks",
                           "default": False,
                           "help": "Warps training faces to closely matched Landmarks from the "
                                   "opposite face-set rather than randomly warping the face. "
                                   "This is the 'dfaker' way of doing warping. Alignments "
                                   "files for both sets of faces must be provided if using "
                                   "this option."})
     argument_list.append({"opts": ("-nf", "--no-flip"),
                           "action": "store_true",
                           "dest": "no_flip",
                           "default": False,
                           "help": "To effectively learn, a random set of images are flipped "
                                   "horizontally. Sometimes it is desirable for this not to "
                                   "occur. Generally this should be left off except for "
                                   "during 'fit training'."})
     argument_list.append({"opts": ("-tia", "--timelapse-input-A"),
                           "action": DirFullPaths,
                           "dest": "timelapse_input_a",
                           "default": None,
                           "help": "For if you want a timelapse: "
                                   "The input folder for the timelapse. "
                                   "This folder should contain faces of A "
                                   "which will be converted for the "
                                   "timelapse. You must supply a "
                                   "--timelapse-output and a "
                                   "--timelapse-input-B parameter."})
     argument_list.append({"opts": ("-tib", "--timelapse-input-B"),
                           "action": DirFullPaths,
                           "dest": "timelapse_input_b",
                           "default": None,
                           "help": "For if you want a timelapse: "
                                   "The input folder for the timelapse. "
                                   "This folder should contain faces of B "
                                   "which will be converted for the "
                                   "timelapse. You must supply a "
                                   "--timelapse-output and a "
                                   "--timelapse-input-A parameter."})
     argument_list.append({"opts": ("-to", "--timelapse-output"),
                           "action": DirFullPaths,
                           "dest": "timelapse_output",
                           "default": None,
                           "help": "The output folder for the timelapse. "
                                   "If the input folders are supplied but "
                                   "no output folder, it will default to "
                                   "your model folder /timelapse/"})
     return argument_list
Пример #36
0
    def _set_loss(self) -> None:
        # pylint:disable=line-too-long
        """ Set the default loss options.

        Loss Documentation
        MAE https://heartbeat.fritz.ai/5-regression-loss-functions-all-machine-learners-should-know-4fb140e9d4b0
        MSE https://heartbeat.fritz.ai/5-regression-loss-functions-all-machine-learners-should-know-4fb140e9d4b0
        LogCosh https://heartbeat.fritz.ai/5-regression-loss-functions-all-machine-learners-should-know-4fb140e9d4b0
        Smooth L1 https://arxiv.org/pdf/1701.03077.pdf
        L_inf_norm https://medium.com/@montjoile/l0-norm-l1-norm-l2-norm-l-infinity-norm-7a7d18a4f40c
        SSIM http://www.cns.nyu.edu/pub/eero/wang03-reprint.pdf
        MSSIM https://www.cns.nyu.edu/pub/eero/wang03b.pdf
        GMSD https://arxiv.org/ftp/arxiv/papers/1308/1308.3052.pdf
        """  # noqa
        # pylint:enable=line-too-long
        logger.debug("Setting Loss config")
        section = "global.loss"
        self.add_section(
            title=section,
            info="Loss configuration options\n"
            "Loss is the mechanism by which a Neural Network judges how well it "
            "thinks that it is recreating a face." + ADDITIONAL_INFO)
        self.add_item(section=section,
                      title="loss_function",
                      datatype=str,
                      group="loss",
                      default="ssim",
                      fixed=False,
                      choices=[
                          x for x in sorted(_LOSS_HELP)
                          if x not in _NON_PRIMARY_LOSS
                      ],
                      info="The loss function to use.\n\n\t" +
                      "\n\t".join(f"{k}: {v}"
                                  for k, v in sorted(_LOSS_HELP.items())
                                  if k not in _NON_PRIMARY_LOSS))
        self.add_item(
            section=section,
            title="loss_function_2",
            datatype=str,
            group="loss",
            default="mse",
            fixed=False,
            choices=list(sorted(_LOSS_HELP)),
            info=
            "The second loss function to use. If using a structural based loss (such as "
            "SSIM, MS-SSIM or GMSD) it is common to add an L1 regularization(MAE) or L2 "
            "regularization (MSE) function. You can adjust the weighting of this loss "
            "function with the loss_weight_2 option.\n\n\t" +
            "\n\t".join(f"{k}: {v}" for k, v in sorted(_LOSS_HELP.items())))
        self.add_item(
            section=section,
            title="loss_weight_2",
            datatype=int,
            group="loss",
            min_max=(0, 400),
            rounding=1,
            default=100,
            fixed=False,
            info="The amount of weight to apply to the second loss function.\n\n"
            "\n\nThe value given here is as a percentage denoting how much the selected "
            "function should contribute to the overall loss cost of the model. For example:"
            "\n\t 100 - The loss calculated for the second loss function will be applied at "
            "its full amount towards the overall loss score. "
            "\n\t 25 - The loss calculated for the second loss function will be reduced by a "
            "quarter prior to adding to the overall loss score. "
            "\n\t 400 - The loss calculated for the second loss function will be mulitplied "
            "4 times prior to adding to the overall loss score. "
            "\n\t 0 - Disables the second loss function altogether.")
        self.add_item(
            section=section,
            title="loss_function_3",
            datatype=str,
            group="loss",
            default="none",
            fixed=False,
            choices=list(sorted(_LOSS_HELP)),
            info=
            "The third loss function to use. You can adjust the weighting of this loss "
            "function with the loss_weight_3 option.\n\n\t" +
            "\n\t".join(f"{k}: {v}" for k, v in sorted(_LOSS_HELP.items())))
        self.add_item(
            section=section,
            title="loss_weight_3",
            datatype=int,
            group="loss",
            min_max=(0, 400),
            rounding=1,
            default=0,
            fixed=False,
            info="The amount of weight to apply to the third loss function.\n\n"
            "\n\nThe value given here is as a percentage denoting how much the selected "
            "function should contribute to the overall loss cost of the model. For example:"
            "\n\t 100 - The loss calculated for the third loss function will be applied at "
            "its full amount towards the overall loss score. "
            "\n\t 25 - The loss calculated for the third loss function will be reduced by a "
            "quarter prior to adding to the overall loss score. "
            "\n\t 400 - The loss calculated for the third loss function will be mulitplied 4 "
            "times prior to adding to the overall loss score. "
            "\n\t 0 - Disables the third loss function altogether.")
        self.add_item(
            section=section,
            title="loss_function_4",
            datatype=str,
            group="loss",
            default="none",
            fixed=False,
            choices=list(sorted(_LOSS_HELP)),
            info=
            "The fourth loss function to use. You can adjust the weighting of this loss "
            "function with the loss_weight_3 option.\n\n\t" +
            "\n\t".join(f"{k}: {v}" for k, v in sorted(_LOSS_HELP.items())))
        self.add_item(
            section=section,
            title="loss_weight_4",
            datatype=int,
            group="loss",
            min_max=(0, 400),
            rounding=1,
            default=0,
            fixed=False,
            info="The amount of weight to apply to the fourth loss function.\n\n"
            "\n\nThe value given here is as a percentage denoting how much the selected "
            "function should contribute to the overall loss cost of the model. For example:"
            "\n\t 100 - The loss calculated for the fourth loss function will be applied at "
            "its full amount towards the overall loss score. "
            "\n\t 25 - The loss calculated for the fourth loss function will be reduced by a "
            "quarter prior to adding to the overall loss score. "
            "\n\t 400 - The loss calculated for the fourth loss function will be mulitplied "
            "4 times prior to adding to the overall loss score. "
            "\n\t 0 - Disables the fourth loss function altogether.")
        self.add_item(
            section=section,
            title="mask_loss_function",
            datatype=str,
            group="loss",
            default="mse",
            fixed=False,
            choices=["mae", "mse"],
            info="The loss function to use when learning a mask."
            "\n\t MAE - Mean absolute error will guide reconstructions of each pixel "
            "towards its median value in the training dataset. Robust to outliers but as "
            "a median, it can potentially ignore some infrequent image types in the dataset."
            "\n\t MSE - Mean squared error will guide reconstructions of each pixel "
            "towards its average value in the training dataset. As an average, it will be "
            "susceptible to outliers and typically produces slightly blurrier results."
        )
        self.add_item(
            section=section,
            title="eye_multiplier",
            datatype=int,
            group="loss",
            min_max=(1, 40),
            rounding=1,
            default=3,
            fixed=False,
            info=
            "The amount of priority to give to the eyes.\n\nThe value given here is as a "
            "multiplier of the main loss score. For example:"
            "\n\t 1 - The eyes will receive the same priority as the rest of the face. "
            "\n\t 10 - The eyes will be given a score 10 times higher than the rest of the "
            "face."
            "\n\nNB: Penalized Mask Loss must be enable to use this option.")
        self.add_item(
            section=section,
            title="mouth_multiplier",
            datatype=int,
            group="loss",
            min_max=(1, 40),
            rounding=1,
            default=2,
            fixed=False,
            info=
            "The amount of priority to give to the mouth.\n\nThe value given here is as a "
            "multiplier of the main loss score. For Example:"
            "\n\t 1 - The mouth will receive the same priority as the rest of the face. "
            "\n\t 10 - The mouth will be given a score 10 times higher than the rest of the "
            "face."
            "\n\nNB: Penalized Mask Loss must be enable to use this option.")
        self.add_item(
            section=section,
            title="penalized_mask_loss",
            datatype=bool,
            default=True,
            group="loss",
            info=
            "Image loss function is weighted by mask presence. For areas of "
            "the image without the facial mask, reconstruction errors will be "
            "ignored while the masked face area is prioritized. May increase "
            "overall quality by focusing attention on the core face area.")
        self.add_item(
            section=section,
            title="mask_type",
            datatype=str,
            default="extended",
            choices=PluginLoader.get_available_extractors("mask",
                                                          add_none=True,
                                                          extend_plugin=True),
            group="mask",
            gui_radio=True,
            info=
            "The mask to be used for training. If you have selected 'Learn Mask' or "
            "'Penalized Mask Loss' you must select a value other than 'none'. The required "
            "mask should have been selected as part of the Extract process. If it does not "
            "exist in the alignments file then it will be generated prior to training "
            "commencing."
            "\n\tnone: Don't use a mask."
            "\n\tbisenet-fp-face: Relatively lightweight NN based mask that provides more "
            "refined control over the area to be masked (configurable in mask settings). "
            "Use this version of bisenet-fp if your model is trained with 'face' or "
            "'legacy' centering."
            "\n\tbisenet-fp-head: Relatively lightweight NN based mask that provides more "
            "refined control over the area to be masked (configurable in mask settings). "
            "Use this version of bisenet-fp if your model is trained with 'head' centering."
            "\n\tcomponents: Mask designed to provide facial segmentation based on the "
            "positioning of landmark locations. A convex hull is constructed around the "
            "exterior of the landmarks to create a mask."
            "\n\textended: Mask designed to provide facial segmentation based on the "
            "positioning of landmark locations. A convex hull is constructed around the "
            "exterior of the landmarks and the mask is extended upwards onto the forehead."
            "\n\tvgg-clear: Mask designed to provide smart segmentation of mostly frontal "
            "faces clear of obstructions. Profile faces and obstructions may result in "
            "sub-par performance."
            "\n\tvgg-obstructed: Mask designed to provide smart segmentation of mostly "
            "frontal faces. The mask model has been specifically trained to recognize "
            "some facial obstructions (hands and eyeglasses). Profile faces may result in "
            "sub-par performance."
            "\n\tunet-dfl: Mask designed to provide smart segmentation of mostly frontal "
            "faces. The mask model has been trained by community members and will need "
            "testing for further description. Profile faces may result in sub-par "
            "performance.")
        self.add_item(
            section=section,
            title="mask_blur_kernel",
            datatype=int,
            min_max=(0, 9),
            rounding=1,
            default=3,
            group="mask",
            info=
            "Apply gaussian blur to the mask input. This has the effect of smoothing the "
            "edges of the mask, which can help with poorly calculated masks and give less "
            "of a hard edge to the predicted mask. The size is in pixels (calculated from "
            "a 128px mask). Set to 0 to not apply gaussian blur. This value should be odd, "
            "if an even number is passed in then it will be rounded to the next odd number."
        )
        self.add_item(
            section=section,
            title="mask_threshold",
            datatype=int,
            default=4,
            min_max=(0, 50),
            rounding=1,
            group="mask",
            info=
            "Sets pixels that are near white to white and near black to black. Set to 0 for "
            "off.")
        self.add_item(
            section=section,
            title="learn_mask",
            datatype=bool,
            default=False,
            group="mask",
            info=
            "Dedicate a portion of the model to learning how to duplicate the input "
            "mask. Increases VRAM usage in exchange for learning a quick ability to try "
            "to replicate more complex mask models.")
Пример #37
0
    def _set_loss(self):
        """ Set the default loss options.

        Loss Documentation
        MAE https://heartbeat.fritz.ai/5-regression-loss-functions-all-machine
            -learners-should-know-4fb140e9d4b0
        MSE https://heartbeat.fritz.ai/5-regression-loss-functions-all-machine
            -learners-should-know-4fb140e9d4b0
        LogCosh https://heartbeat.fritz.ai/5-regression-loss-functions-all-machine
                -learners-should-know-4fb140e9d4b0
        Smooth L1 https://arxiv.org/pdf/1701.03077.pdf
        L_inf_norm https://medium.com/@montjoile/l0-norm-l1-norm-l2-norm-l-infinity
                   -norm-7a7d18a4f40c
        SSIM http://www.cns.nyu.edu/pub/eero/wang03-reprint.pdf
        GMSD https://arxiv.org/ftp/arxiv/papers/1308/1308.3052.pdf
        """
        logger.debug("Setting Loss config")
        section = "global.loss"
        self.add_section(title=section,
                         info="Loss configuration options\n"
                              "Loss is the mechanism by which a Neural Network judges how well it "
                              "thinks that it is recreating a face." + ADDITIONAL_INFO)
        self.add_item(
            section=section,
            title="loss_function",
            datatype=str,
            group="loss",
            default="ssim",
            choices=["mae", "mse", "logcosh", "smooth_loss", "l_inf_norm", "ssim", "gmsd",
                     "pixel_gradient_diff"],
            info="The loss function to use."
                 "\n\t MAE - Mean absolute error will guide reconstructions of each pixel "
                 "towards its median value in the training dataset. Robust to outliers but as "
                 "a median, it can potentially ignore some infrequent image types in the dataset."
                 "\n\t MSE - Mean squared error will guide reconstructions of each pixel "
                 "towards its average value in the training dataset. As an avg, it will be "
                 "suspectible to outliers and typically produces slightly blurrier results."
                 "\n\t LogCosh - log(cosh(x)) acts similiar to MSE for small errors and to "
                 "MAE for large errors. Like MSE, it is very stable and prevents overshoots "
                 "when errors are near zero. Like MAE, it is robust to outliers. NB: Due to a bug "
                 "in PlaidML, this loss does not work on AMD cards."
                 "\n\t Smooth_L1 --- Modification of the MAE loss to correct two of its "
                 "disadvantages. This loss has improved stability and guidance for small errors."
                 "\n\t L_inf_norm --- The L_inf norm will reduce the largest individual pixel "
                 "error in an image. As each largest error is minimized sequentially, the "
                 "overall error is improved. This loss will be extremely focused on outliers."
                 "\n\t SSIM - Structural Similarity Index Metric is a perception-based "
                 "loss that considers changes in texture, luminance, contrast, and local spatial "
                 "statistics of an image. Potentially delivers more realistic looking images."
                 "\n\t GMSD - Gradient Magnitude Similarity Deviation seeks to match "
                 "the global standard deviation of the pixel to pixel differences between two "
                 "images. Similiar in approach to SSIM. NB: This loss does not currently work on "
                 "AMD cards."
                 "\n\t Pixel_Gradient_Difference - Instead of minimizing the difference between "
                 "the absolute value of each pixel in two reference images, compute the pixel to "
                 "pixel spatial difference in each image and then minimize that difference "
                 "between two images. Allows for large color shifts,but maintains the structure "
                 "of the image.")
        self.add_item(
            section=section,
            title="mask_loss_function",
            datatype=str,
            group="loss",
            default="mse",
            choices=["mae", "mse"],
            info="The loss function to use when learning a mask."
                 "\n\t MAE - Mean absolute error will guide reconstructions of each pixel "
                 "towards its median value in the training dataset. Robust to outliers but as "
                 "a median, it can potentially ignore some infrequent image types in the dataset."
                 "\n\t MSE - Mean squared error will guide reconstructions of each pixel "
                 "towards its average value in the training dataset. As an avg, it will be "
                 "suspectible to outliers and typically produces slightly blurrier results.")
        self.add_item(
            section=section,
            title="l2_reg_term",
            datatype=int,
            group="loss",
            min_max=(0, 400),
            rounding=1,
            default=100,
            info="The amount of L2 Regularization to apply as a penalty to Structural Similarity "
                 "loss functions.\n\nNB: You should only adjust this if you know what you are "
                 "doing!\n\n"
                 "L2 regularization applies a penalty term to the given Loss function. This "
                 "penalty will only be applied if SSIM or GMSD is selected for the main loss "
                 "function, otherwise it is ignored.\n\nThe value given here is as a percentage "
                 "weight of the main loss function. For example:"
                 "\n\t 100 - Will give equal weighting to the main loss and the penalty function. "
                 "\n\t 25 - Will give the penalty function 1/4 of the weight of the main loss "
                 "function. "
                 "\n\t 400 - Will give the penalty function 4x as much importance as the main "
                 "loss function."
                 "\n\t 0 - Disables L2 Regularization altogether.")
        self.add_item(
            section=section,
            title="eye_multiplier",
            datatype=int,
            group="loss",
            min_max=(1, 40),
            rounding=1,
            default=3,
            fixed=False,
            info="The amount of priority to give to the eyes.\n\nThe value given here is as a "
                 "multiplier of the main loss score. For example:"
                 "\n\t 1 - The eyes will receive the same priority as the rest of the face. "
                 "\n\t 10 - The eyes will be given a score 10 times higher than the rest of the "
                 "face."
                 "\n\nNB: Penalized Mask Loss must be enable to use this option.")
        self.add_item(
            section=section,
            title="mouth_multiplier",
            datatype=int,
            group="loss",
            min_max=(1, 40),
            rounding=1,
            default=2,
            fixed=False,
            info="The amount of priority to give to the mouth.\n\nThe value given here is as a "
                 "multiplier of the main loss score. For Example:"
                 "\n\t 1 - The mouth will receive the same priority as the rest of the face. "
                 "\n\t 10 - The mouth will be given a score 10 times higher than the rest of the "
                 "face."
                 "\n\nNB: Penalized Mask Loss must be enable to use this option.")
        self.add_item(
            section=section,
            title="penalized_mask_loss",
            datatype=bool,
            default=True,
            group="loss",
            info="Image loss function is weighted by mask presence. For areas of "
                 "the image without the facial mask, reconstuction errors will be "
                 "ignored while the masked face area is prioritized. May increase "
                 "overall quality by focusing attention on the core face area.")
        self.add_item(
            section=section,
            title="mask_type",
            datatype=str,
            default="extended",
            choices=PluginLoader.get_available_extractors("mask", add_none=True),
            group="mask",
            gui_radio=True,
            info="The mask to be used for training. If you have selected 'Learn Mask' or "
                 "'Penalized Mask Loss' you must select a value other than 'none'. The required "
                 "mask should have been selected as part of the Extract process. If it does not "
                 "exist in the alignments file then it will be generated prior to training "
                 "commencing."
                 "\n\tnone: Don't use a mask."
                 "\n\tcomponents: Mask designed to provide facial segmentation based on the "
                 "positioning of landmark locations. A convex hull is constructed around the "
                 "exterior of the landmarks to create a mask."
                 "\n\textended: Mask designed to provide facial segmentation based on the "
                 "positioning of landmark locations. A convex hull is constructed around the "
                 "exterior of the landmarks and the mask is extended upwards onto the forehead."
                 "\n\tvgg-clear: Mask designed to provide smart segmentation of mostly frontal "
                 "faces clear of obstructions. Profile faces and obstructions may result in "
                 "sub-par performance."
                 "\n\tvgg-obstructed: Mask designed to provide smart segmentation of mostly "
                 "frontal faces. The mask model has been specifically trained to recognize "
                 "some facial obstructions (hands and eyeglasses). Profile faces may result in "
                 "sub-par performance."
                 "\n\tunet-dfl: Mask designed to provide smart segmentation of mostly frontal "
                 "faces. The mask model has been trained by community members and will need "
                 "testing for further description. Profile faces may result in sub-par "
                 "performance.")
        self.add_item(
            section=section,
            title="mask_blur_kernel",
            datatype=int,
            min_max=(0, 9),
            rounding=1,
            default=3,
            group="mask",
            info="Apply gaussian blur to the mask input. This has the effect of smoothing the "
                 "edges of the mask, which can help with poorly calculated masks and give less "
                 "of a hard edge to the predicted mask. The size is in pixels (calculated from "
                 "a 128px mask). Set to 0 to not apply gaussian blur. This value should be odd, "
                 "if an even number is passed in then it will be rounded to the next odd number.")
        self.add_item(
            section=section,
            title="mask_threshold",
            datatype=int,
            default=4,
            min_max=(0, 50),
            rounding=1,
            group="mask",
            info="Sets pixels that are near white to white and near black to black. Set to 0 for "
                 "off.")
        self.add_item(
            section=section,
            title="learn_mask",
            datatype=bool,
            default=False,
            group="mask",
            info="Dedicate a portion of the model to learning how to duplicate the input "
                 "mask. Increases VRAM usage in exchange for learning a quick ability to try "
                 "to replicate more complex mask models.")
Пример #38
0
    def get_argument_list(self):
        argument_list = list()
        argument_list.append(dict(
            opts=("-a", "--alignments"),
            action=FileFullPaths,
            type=str,
            group=_("data"),
            required=True,
            filetypes="alignments",
            help=_("Full path to the alignments file to add the mask to. NB: if the mask already "
                   "exists in the alignments file it will be overwritten.")))
        argument_list.append(dict(
            opts=("-i", "--input"),
            action=DirOrFileFullPaths,
            type=str,
            group=_("data"),
            filetypes="video",
            required=True,
            help=_("Directory containing extracted faces, source frames, or a video file.")))
        argument_list.append(dict(
            opts=("-it", "--input-type"),
            action=Radio,
            type=str.lower,
            choices=("faces", "frames"),
            dest="input_type",
            group=_("data"),
            default="frames",
            help=_("R|Whether the `input` is a folder of faces or a folder frames/video"
                   "\nL|faces: The input is a folder containing extracted faces."
                   "\nL|frames: The input is a folder containing frames or is a video")))
        argument_list.append(dict(
            opts=("-M", "--masker"),
            action=Radio,
            type=str.lower,
            choices=PluginLoader.get_available_extractors("mask"),
            default="extended",
            group=_("process"),
            help=_("R|Masker to use."
                   "\nL|bisenet-fp: Relatively lightweight NN based mask that provides more "
                   "refined control over the area to be masked including full head masking "
                   "(configurable in mask settings)."
                   "\nL|components: Mask designed to provide facial segmentation based on the "
                   "positioning of landmark locations. A convex hull is constructed around the "
                   "exterior of the landmarks to create a mask."
                   "\nL|extended: Mask designed to provide facial segmentation based on the "
                   "positioning of landmark locations. A convex hull is constructed around the "
                   "exterior of the landmarks and the mask is extended upwards onto the forehead."
                   "\nL|vgg-clear: Mask designed to provide smart segmentation of mostly frontal "
                   "faces clear of obstructions. Profile faces and obstructions may result in "
                   "sub-par performance."
                   "\nL|vgg-obstructed: Mask designed to provide smart segmentation of mostly "
                   "frontal faces. The mask model has been specifically trained to recognize "
                   "some facial obstructions (hands and eyeglasses). Profile faces may result in "
                   "sub-par performance."
                   "\nL|unet-dfl: Mask designed to provide smart segmentation of mostly frontal "
                   "faces. The mask model has been trained by community members and will need "
                   "testing for further description. Profile faces may result in sub-par "
                   "performance.")))
        argument_list.append(dict(
            opts=("-p", "--processing"),
            action=Radio,
            type=str.lower,
            choices=("all", "missing", "output"),
            default="missing",
            group=_("process"),
            help=_("R|Whether to update all masks in the alignments files, only those faces "
                   "that do not already have a mask of the given `mask type` or just to output "
                   "the masks to the `output` location."
                   "\nL|all: Update the mask for all faces in the alignments file."
                   "\nL|missing: Create a mask for all faces in the alignments file where a mask "
                   "does not previously exist."
                   "\nL|output: Don't update the masks, just output them for review in the given "
                   "output folder.")))
        argument_list.append(dict(
            opts=("-o", "--output-folder"),
            action=DirFullPaths,
            dest="output",
            type=str,
            group=_("output"),
            help=_("Optional output location. If provided, a preview of the masks created will "
                   "be output in the given folder.")))
        argument_list.append(dict(
            opts=("-b", "--blur_kernel"),
            action=Slider,
            type=int,
            group=_("output"),
            min_max=(0, 9),
            default=3,
            rounding=1,
            help=_("Apply gaussian blur to the mask output. Has the effect of smoothing the "
                   "edges of the mask giving less of a hard edge. the size is in pixels. This "
                   "value should be odd, if an even number is passed in then it will be rounded "
                   "to the next odd number. NB: Only effects the output preview. Set to 0 for "
                   "off")))
        argument_list.append(dict(
            opts=("-t", "--threshold"),
            action=Slider,
            type=int,
            group=_("output"),
            min_max=(0, 50),
            default=4,
            rounding=1,
            help=_("Helps reduce 'blotchiness' on some masks by making light shades white "
                   "and dark shades black. Higher values will impact more of the mask. NB: "
                   "Only effects the output preview. Set to 0 for off")))
        argument_list.append(dict(
            opts=("-ot", "--output-type"),
            action=Radio,
            type=str.lower,
            choices=("combined", "masked", "mask"),
            default="combined",
            group=_("output"),
            help=_("R|How to format the output when processing is set to 'output'."
                   "\nL|combined: The image contains the face/frame, face mask and masked face."
                   "\nL|masked: Output the face/frame as rgba image with the face masked."
                   "\nL|mask: Only output the mask as a single channel image.")))
        argument_list.append(dict(
            opts=("-f", "--full-frame"),
            action="store_true",
            default=False,
            group=_("output"),
            help=_("R|Whether to output the whole frame or only the face box when using "
                   "output processing. Only has an effect when using frames as input.")))

        return argument_list