Exemple #1
0
 def get_optional_arguments():
     """ Put the arguments in a list so that they are accessible from both
     argparse and gui """
     argument_list = []
     argument_list.append({"opts": ("--serializer", ),
                           "type": str.lower,
                           "dest": "serializer",
                           "default": "json",
                           "choices": ("json", "pickle", "yaml"),
                           "help": "Serializer for alignments file. If "
                                   "yaml is chosen and not available, then "
                                   "json will be used as the default "
                                   "fallback."})
     argument_list.append({
         "opts": ("-D", "--detector"),
         "type": str.lower,
         "choices":  PluginLoader.get_available_extractors(
             "detect"),
         "default": "mtcnn",
         "help": "R|Detector to use."
                 "\n'dlib-hog': uses least resources, but is the"
                 "\n\tleast reliable."
                 "\n'dlib-cnn': faster than mtcnn but detects"
                 "\n\tfewer faces and fewer false positives."
                 "\n'mtcnn': slower than dlib, but uses fewer"
                 "\n\tresources whilst detecting more faces and"
                 "\n\tmore false positives. Has superior"
                 "\n\talignment to dlib"})
     argument_list.append({
         "opts": ("-A", "--aligner"),
         "type": str.lower,
         "choices": PluginLoader.get_available_extractors(
             "align"),
         "default": "fan",
         "help": "R|Aligner to use."
                 "\n'dlib': Dlib Pose Predictor. Faster, less "
                 "\n\tresource intensive, but less accurate."
                 "\n'fan': Face Alignment Network. Best aligner."
                 "\n\tGPU heavy, slow when not running on GPU"})
     argument_list.append({"opts": ("-r", "--rotate-images"),
                           "type": str,
                           "dest": "rotate_images",
                           "default": None,
                           "help": "If a face isn't found, rotate the "
                                   "images to try to find a face. Can find "
                                   "more faces at the cost of extraction "
                                   "speed. Pass in a single number to use "
                                   "increments of that size up to 360, or "
                                   "pass in a list of numbers to enumerate "
                                   "exactly what angles to check"})
     argument_list.append({"opts": ("-bt", "--blur-threshold"),
                           "type": float,
                           "action": Slider,
                           "min_max": (0.0, 100.0),
                           "rounding": 1,
                           "dest": "blur_thresh",
                           "default": 0.0,
                           "help": "Automatically discard images blurrier than the specified "
                                   "threshold. Discarded images are moved into a \"blurry\" "
                                   "sub-folder. Lower values allow more blur. Set to 0.0 to "
                                   "turn off."})
     argument_list.append({"opts": ("-mp", "--multiprocess"),
                           "action": "store_true",
                           "default": False,
                           "help": "Run extraction in parallel. Offers "
                                   "speed up for some extractor/detector "
                                   "combinations, less so for others. "
                                   "Only has an effect if both the "
                                   "aligner and detector use the GPU, "
                                   "otherwise this is automatic."})
     argument_list.append({"opts": ("-sz", "--size"),
                           "type": int,
                           "action": Slider,
                           "min_max": (128, 512),
                           "default": 256,
                           "rounding": 64,
                           "help": "The output size of extracted faces. Make sure that the "
                                   "model you intend to train supports your required size. "
                                   "This will only need to be changed for hi-res models."})
     argument_list.append({"opts": ("-min", "--min-size"),
                           "type": int,
                           "action": Slider,
                           "dest": "min_size",
                           "min_max": (0, 1080),
                           "default": 0,
                           "rounding": 20,
                           "help": "Filters out faces detected below this size. Length, in "
                                   "pixels across the diagonal of the bounding box. Set to 0 "
                                   "for off"})
     argument_list.append({"opts": ("-s", "--skip-existing"),
                           "action": "store_true",
                           "dest": "skip_existing",
                           "default": False,
                           "help": "Skips frames that have already been "
                                   "extracted and exist in the alignments "
                                   "file"})
     argument_list.append({"opts": ("-sf", "--skip-existing-faces"),
                           "action": "store_true",
                           "dest": "skip_faces",
                           "default": False,
                           "help": "Skip frames that already have "
                                   "detected faces in the alignments "
                                   "file"})
     argument_list.append({"opts": ("-dl", "--debug-landmarks"),
                           "action": "store_true",
                           "dest": "debug_landmarks",
                           "default": False,
                           "help": "Draw landmarks on the ouput faces for "
                                   "debug"})
     argument_list.append({"opts": ("-ae", "--align-eyes"),
                           "action": "store_true",
                           "dest": "align_eyes",
                           "default": False,
                           "help": "Perform extra alignment to ensure "
                                   "left/right eyes are  at the same "
                                   "height"})
     argument_list.append({"opts": ("-si", "--save-interval"),
                           "dest": "save_interval",
                           "type": int,
                           "action": Slider,
                           "min_max": (0, 1000),
                           "rounding": 10,
                           "default": 0,
                           "help": "Automatically save the alignments file after a set amount "
                                   "of frames. Will only save at the end of extracting by "
                                   "default. WARNING: Don't interrupt the script when writing "
                                   "the file because it might get corrupted. Set to 0 to turn "
                                   "off"})
     return argument_list
    def _set_loss(self):
        """ Set the default loss options.

        Loss Documentation
        MAE https://heartbeat.fritz.ai/5-regression-loss-functions-all-machine
            -learners-should-know-4fb140e9d4b0
        MSE https://heartbeat.fritz.ai/5-regression-loss-functions-all-machine
            -learners-should-know-4fb140e9d4b0
        LogCosh https://heartbeat.fritz.ai/5-regression-loss-functions-all-machine
                -learners-should-know-4fb140e9d4b0
        Smooth L1 https://arxiv.org/pdf/1701.03077.pdf
        L_inf_norm https://medium.com/@montjoile/l0-norm-l1-norm-l2-norm-l-infinity
                   -norm-7a7d18a4f40c
        SSIM http://www.cns.nyu.edu/pub/eero/wang03-reprint.pdf
        GMSD https://arxiv.org/ftp/arxiv/papers/1308/1308.3052.pdf
        """
        logger.debug("Setting Loss config")
        section = "global.loss"
        self.add_section(title=section,
                         info="Loss configuration options\n"
                              "Loss is the mechanism by which a Neural Network judges how well it "
                              "thinks that it is recreating a face." + ADDITIONAL_INFO)
        self.add_item(
            section=section,
            title="loss_function",
            datatype=str,
            group="loss",
            default="ssim",
            choices=["mae", "mse", "logcosh", "smooth_loss", "l_inf_norm", "ssim", "gmsd",
                     "pixel_gradient_diff"],
            info="The loss function to use."
                 "\n\t MAE - Mean absolute error will guide reconstructions of each pixel "
                 "towards its median value in the training dataset. Robust to outliers but as "
                 "a median, it can potentially ignore some infrequent image types in the dataset."
                 "\n\t MSE - Mean squared error will guide reconstructions of each pixel "
                 "towards its average value in the training dataset. As an avg, it will be "
                 "suspectible to outliers and typically produces slightly blurrier results."
                 "\n\t LogCosh - log(cosh(x)) acts similiar to MSE for small errors and to "
                 "MAE for large errors. Like MSE, it is very stable and prevents overshoots "
                 "when errors are near zero. Like MAE, it is robust to outliers. NB: Due to a bug "
                 "in PlaidML, this loss does not work on AMD cards."
                 "\n\t Smooth_L1 --- Modification of the MAE loss to correct two of its "
                 "disadvantages. This loss has improved stability and guidance for small errors."
                 "\n\t L_inf_norm --- The L_inf norm will reduce the largest individual pixel "
                 "error in an image. As each largest error is minimized sequentially, the "
                 "overall error is improved. This loss will be extremely focused on outliers."
                 "\n\t SSIM - Structural Similarity Index Metric is a perception-based "
                 "loss that considers changes in texture, luminance, contrast, and local spatial "
                 "statistics of an image. Potentially delivers more realistic looking images."
                 "\n\t GMSD - Gradient Magnitude Similarity Deviation seeks to match "
                 "the global standard deviation of the pixel to pixel differences between two "
                 "images. Similiar in approach to SSIM. NB: This loss does not currently work on "
                 "AMD cards."
                 "\n\t Pixel_Gradient_Difference - Instead of minimizing the difference between "
                 "the absolute value of each pixel in two reference images, compute the pixel to "
                 "pixel spatial difference in each image and then minimize that difference "
                 "between two images. Allows for large color shifts,but maintains the structure "
                 "of the image.")
        self.add_item(
            section=section,
            title="mask_loss_function",
            datatype=str,
            group="loss",
            default="mse",
            choices=["mae", "mse"],
            info="The loss function to use when learning a mask."
                 "\n\t MAE - Mean absolute error will guide reconstructions of each pixel "
                 "towards its median value in the training dataset. Robust to outliers but as "
                 "a median, it can potentially ignore some infrequent image types in the dataset."
                 "\n\t MSE - Mean squared error will guide reconstructions of each pixel "
                 "towards its average value in the training dataset. As an avg, it will be "
                 "suspectible to outliers and typically produces slightly blurrier results.")
        self.add_item(
            section=section,
            title="l2_reg_term",
            datatype=int,
            group="loss",
            min_max=(0, 400),
            rounding=1,
            default=100,
            info="The amount of L2 Regularization to apply as a penalty to Structural Similarity "
                 "loss functions.\n\nNB: You should only adjust this if you know what you are "
                 "doing!\n\n"
                 "L2 regularization applies a penalty term to the given Loss function. This "
                 "penalty will only be applied if SSIM or GMSD is selected for the main loss "
                 "function, otherwise it is ignored.\n\nThe value given here is as a percentage "
                 "weight of the main loss function. For example:"
                 "\n\t 100 - Will give equal weighting to the main loss and the penalty function. "
                 "\n\t 25 - Will give the penalty function 1/4 of the weight of the main loss "
                 "function. "
                 "\n\t 400 - Will give the penalty function 4x as much importance as the main "
                 "loss function."
                 "\n\t 0 - Disables L2 Regularization altogether.")
        self.add_item(
            section=section,
            title="eye_multiplier",
            datatype=int,
            group="loss",
            min_max=(1, 40),
            rounding=1,
            default=3,
            fixed=False,
            info="The amount of priority to give to the eyes.\n\nThe value given here is as a "
                 "multiplier of the main loss score. For example:"
                 "\n\t 1 - The eyes will receive the same priority as the rest of the face. "
                 "\n\t 10 - The eyes will be given a score 10 times higher than the rest of the "
                 "face."
                 "\n\nNB: Penalized Mask Loss must be enable to use this option.")
        self.add_item(
            section=section,
            title="mouth_multiplier",
            datatype=int,
            group="loss",
            min_max=(1, 40),
            rounding=1,
            default=2,
            fixed=False,
            info="The amount of priority to give to the mouth.\n\nThe value given here is as a "
                 "multiplier of the main loss score. For Example:"
                 "\n\t 1 - The mouth will receive the same priority as the rest of the face. "
                 "\n\t 10 - The mouth will be given a score 10 times higher than the rest of the "
                 "face."
                 "\n\nNB: Penalized Mask Loss must be enable to use this option.")
        self.add_item(
            section=section,
            title="penalized_mask_loss",
            datatype=bool,
            default=True,
            group="loss",
            info="Image loss function is weighted by mask presence. For areas of "
                 "the image without the facial mask, reconstuction errors will be "
                 "ignored while the masked face area is prioritized. May increase "
                 "overall quality by focusing attention on the core face area.")
        self.add_item(
            section=section,
            title="mask_type",
            datatype=str,
            default="extended",
            choices=PluginLoader.get_available_extractors("mask", add_none=True),
            group="mask",
            gui_radio=True,
            info="The mask to be used for training. If you have selected 'Learn Mask' or "
                 "'Penalized Mask Loss' you must select a value other than 'none'. The required "
                 "mask should have been selected as part of the Extract process. If it does not "
                 "exist in the alignments file then it will be generated prior to training "
                 "commencing."
                 "\n\tnone: Don't use a mask."
                 "\n\tcomponents: Mask designed to provide facial segmentation based on the "
                 "positioning of landmark locations. A convex hull is constructed around the "
                 "exterior of the landmarks to create a mask."
                 "\n\textended: Mask designed to provide facial segmentation based on the "
                 "positioning of landmark locations. A convex hull is constructed around the "
                 "exterior of the landmarks and the mask is extended upwards onto the forehead."
                 "\n\tvgg-clear: Mask designed to provide smart segmentation of mostly frontal "
                 "faces clear of obstructions. Profile faces and obstructions may result in "
                 "sub-par performance."
                 "\n\tvgg-obstructed: Mask designed to provide smart segmentation of mostly "
                 "frontal faces. The mask model has been specifically trained to recognize "
                 "some facial obstructions (hands and eyeglasses). Profile faces may result in "
                 "sub-par performance."
                 "\n\tunet-dfl: Mask designed to provide smart segmentation of mostly frontal "
                 "faces. The mask model has been trained by community members and will need "
                 "testing for further description. Profile faces may result in sub-par "
                 "performance.")
        self.add_item(
            section=section,
            title="mask_blur_kernel",
            datatype=int,
            min_max=(0, 9),
            rounding=1,
            default=3,
            group="mask",
            info="Apply gaussian blur to the mask input. This has the effect of smoothing the "
                 "edges of the mask, which can help with poorly calculated masks and give less "
                 "of a hard edge to the predicted mask. The size is in pixels (calculated from "
                 "a 128px mask). Set to 0 to not apply gaussian blur. This value should be odd, "
                 "if an even number is passed in then it will be rounded to the next odd number.")
        self.add_item(
            section=section,
            title="mask_threshold",
            datatype=int,
            default=4,
            min_max=(0, 50),
            rounding=1,
            group="mask",
            info="Sets pixels that are near white to white and near black to black. Set to 0 for "
                 "off.")
        self.add_item(
            section=section,
            title="learn_mask",
            datatype=bool,
            default=False,
            group="mask",
            info="Dedicate a portion of the model to learning how to duplicate the input "
                 "mask. Increases VRAM usage in exchange for learning a quick ability to try "
                 "to replicate more complex mask models.")
Exemple #3
0
    def get_argument_list(self):
        argument_list = list()
        argument_list.append({
            "opts": ("-a", "--alignments"),
            "action": FileFullPaths,
            "type": str,
            "group": "data",
            "required": True,
            "filetypes": "alignments",
            "help": "Full path to the alignments file to add the mask to. NB: if the mask already "
                    "exists in the alignments file it will be overwritten."})
        argument_list.append({
            "opts": ("-i", "--input"),
            "action": DirOrFileFullPaths,
            "type": str,
            "group": "data",
            "required": True,
            "help": "Directory containing extracted faces, source frames, or a video file."})
        argument_list.append({
            "opts": ("-it", "--input-type"),
            "action": Radio,
            "type": str.lower,
            "choices": ("faces", "frames"),
            "dest": "input_type",
            "group": "data",
            "default": "frames",
            "help": "R|Whether the `input` is a folder of faces or a folder frames/video"
                    "\nL|faces: The input is a folder containing extracted faces."
                    "\nL|frames: The input is a folder containing frames or is a video"})
        argument_list.append({
            "opts": ("-M", "--masker"),
            "action": Radio,
            "type": str.lower,
            "choices": PluginLoader.get_available_extractors("mask"),
            "default": "extended",
            "group": "process",
            "help": "R|Masker to use."
                    "\nL|components: Mask designed to provide facial segmentation based on the "
                    "positioning of landmark locations. A convex hull is constructed around the "
                    "exterior of the landmarks to create a mask."
                    "\nL|extended: Mask designed to provide facial segmentation based on the "
                    "positioning of landmark locations. A convex hull is constructed around the "
                    "exterior of the landmarks and the mask is extended upwards onto the forehead."
                    "\nL|vgg-clear: Mask designed to provide smart segmentation of mostly frontal "
                    "faces clear of obstructions. Profile faces and obstructions may result in "
                    "sub-par performance."
                    "\nL|vgg-obstructed: Mask designed to provide smart segmentation of mostly "
                    "frontal faces. The mask model has been specifically trained to recognize "
                    "some facial obstructions (hands and eyeglasses). Profile faces may result in "
                    "sub-par performance."
                    "\nL|unet-dfl: Mask designed to provide smart segmentation of mostly frontal "
                    "faces. The mask model has been trained by community members and will need "
                    "testing for further description. Profile faces may result in sub-par "
                    "performance."})
        argument_list.append({
            "opts": ("-p", "--processing"),
            "action": Radio,
            "type": str.lower,
            "choices": ("all", "missing", "output"),
            "default": "missing",
            "group": "process",
            "help": "R|Whether to update all masks in the alignments files, only those faces "
                    "that do not already have a mask of the given `mask type` or just to output "
                    "the masks to the `output` location."
                    "\nL|all: Update the mask for all faces in the alignments file."
                    "\nL|missing: Create a mask for all faces in the alignments file where a mask "
                    "does not previously exist."
                    "\nL|output: Don't update the masks, just output them for review in the given "
                    "output folder."})
        argument_list.append({
            "opts": ("-o", "--output-folder"),
            "action": DirFullPaths,
            "dest": "output",
            "type": str,
            "group": "output",
            "help": "Optional output location. If provided, a preview of the masks created will "
                    "be output in the given folder."})
        argument_list.append({
            "opts": ("-b", "--blur_kernel"),
            "action": Slider,
            "type": int,
            "group": "output",
            "min_max": (0, 9),
            "default": 3,
            "rounding": 1,
            "help": "Apply gaussian blur to the mask output. Has the effect of smoothing the "
                    "edges of the mask giving less of a hard edge. the size is in pixels. This "
                    "value should be odd, if an even number is passed in then it will be rounded "
                    "to the next odd number. NB: Only effects the output preview. Set to 0 for "
                    "off"})
        argument_list.append({
            "opts": ("-t", "--threshold"),
            "action": Slider,
            "type": int,
            "group": "output",
            "min_max": (0, 50),
            "default": 4,
            "rounding": 1,
            "help": "Helps reduce 'blotchiness' on some masks by making light shades white "
                    "and dark shades black. Higher values will impact more of the mask. NB: "
                    "Only effects the output preview. Set to 0 for off"})

        return argument_list
Exemple #4
0
    def set_globals(self):
        """
        Set the global options for training

        Loss Documentation
        MAE https://heartbeat.fritz.ai/5-regression-loss-functions-all-machine
            -learners-should-know-4fb140e9d4b0
        MSE https://heartbeat.fritz.ai/5-regression-loss-functions-all-machine
            -learners-should-know-4fb140e9d4b0
        LogCosh https://heartbeat.fritz.ai/5-regression-loss-functions-all-machine
                -learners-should-know-4fb140e9d4b0
        Smooth L1 https://arxiv.org/pdf/1701.03077.pdf
        L_inf_norm https://medium.com/@montjoile/l0-norm-l1-norm-l2-norm-l-infinity
                   -norm-7a7d18a4f40c
        SSIM http://www.cns.nyu.edu/pub/eero/wang03-reprint.pdf
        GMSD https://arxiv.org/ftp/arxiv/papers/1308/1308.3052.pdf
        """
        logger.debug("Setting global config")
        section = "global"
        self.add_section(title=section,
                         info="Options that apply to all models" +
                         ADDITIONAL_INFO)
        self.add_item(
            section=section,
            title="coverage",
            datatype=float,
            default=68.75,
            min_max=(62.5, 100.0),
            rounding=2,
            fixed=True,
            group="face",
            info=
            "How much of the extracted image to train on. A lower coverage will limit the "
            "model's scope to a zoomed-in central area while higher amounts can include the "
            "entire face. A trade-off exists between lower amounts given more detail "
            "versus higher amounts avoiding noticeable swap transitions. Sensible values to "
            "use are:"
            "\n\t62.5%% spans from eyebrow to eyebrow."
            "\n\t75.0%% spans from temple to temple."
            "\n\t87.5%% spans from ear to ear."
            "\n\t100.0%% is a mugshot.")
        self.add_item(
            section=section,
            title="mask_type",
            datatype=str,
            default="extended",
            choices=PluginLoader.get_available_extractors("mask",
                                                          add_none=True),
            group="mask",
            gui_radio=True,
            info=
            "The mask to be used for training. If you have selected 'Learn Mask' or "
            "'Penalized Mask Loss' you must select a value other than 'none'. The required "
            "mask should have been selected as part of the Extract process. If it does not "
            "exist in the alignments file then it will be generated prior to training "
            "commencing."
            "\n\tnone: Don't use a mask."
            "\n\tcomponents: Mask designed to provide facial segmentation based on the "
            "positioning of landmark locations. A convex hull is constructed around the "
            "exterior of the landmarks to create a mask."
            "\n\textended: Mask designed to provide facial segmentation based on the "
            "positioning of landmark locations. A convex hull is constructed around the "
            "exterior of the landmarks and the mask is extended upwards onto the forehead."
            "\n\tvgg-clear: Mask designed to provide smart segmentation of mostly frontal "
            "faces clear of obstructions. Profile faces and obstructions may result in "
            "sub-par performance."
            "\n\tvgg-obstructed: Mask designed to provide smart segmentation of mostly "
            "frontal faces. The mask model has been specifically trained to recognize "
            "some facial obstructions (hands and eyeglasses). Profile faces may result in "
            "sub-par performance."
            "\n\tunet-dfl: Mask designed to provide smart segmentation of mostly frontal "
            "faces. The mask model has been trained by community members and will need "
            "testing for further description. Profile faces may result in sub-par "
            "performance.")
        self.add_item(
            section=section,
            title="mask_blur_kernel",
            datatype=int,
            min_max=(0, 9),
            rounding=1,
            default=3,
            group="mask",
            info=
            "Apply gaussian blur to the mask input. This has the effect of smoothing the "
            "edges of the mask, which can help with poorly calculated masks and give less "
            "of a hard edge to the predicted mask. The size is in pixels (calculated from "
            "a 128px mask). Set to 0 to not apply gaussian blur. This value should be odd, "
            "if an even number is passed in then it will be rounded to the next odd number."
        )
        self.add_item(
            section=section,
            title="mask_threshold",
            datatype=int,
            default=4,
            min_max=(0, 50),
            rounding=1,
            group="mask",
            info=
            "Sets pixels that are near white to white and near black to black. Set to 0 for "
            "off.")
        self.add_item(
            section=section,
            title="learn_mask",
            datatype=bool,
            default=False,
            group="mask",
            info=
            "Dedicate a portion of the model to learning how to duplicate the input "
            "mask. Increases VRAM usage in exchange for learning a quick ability to try "
            "to replicate more complex mask models.")
        self.add_item(
            section=section,
            title="penalized_mask_loss",
            datatype=bool,
            default=True,
            group="loss",
            info=
            "Image loss function is weighted by mask presence. For areas of "
            "the image without the facial mask, reconstuction errors will be "
            "ignored while the masked face area is prioritized. May increase "
            "overall quality by focusing attention on the core face area.")
        self.add_item(
            section=section,
            title="loss_function",
            datatype=str,
            group="loss",
            default="mae",
            choices=[
                "mae", "mse", "logcosh", "smooth_loss", "l_inf_norm", "ssim",
                "gmsd", "pixel_gradient_diff"
            ],
            info="The loss function to use."
            "\n\t MAE - Mean absolute error will guide reconstructions of each pixel "
            "towards its median value in the training dataset. Robust to outliers but as "
            "a median, it can potentially ignore some infrequent image types in the dataset."
            "\n\t MSE - Mean squared error will guide reconstructions of each pixel "
            "towards its average value in the training dataset. As an avg, it will be "
            "suspectible to outliers and typically produces slightly blurrier results."
            "\n\t LogCosh - log(cosh(x)) acts similiar to MSE for small errors and to "
            "MAE for large errors. Like MSE, it is very stable and prevents overshoots "
            "when errors are near zero. Like MAE, it is robust to outliers. NB: Due to a bug "
            "in PlaidML, this loss does not work on AMD cards."
            "\n\t Smooth_L1 --- Modification of the MAE loss to correct two of its "
            "disadvantages. This loss has improved stability and guidance for small errors."
            "\n\t L_inf_norm --- The L_inf norm will reduce the largest individual pixel "
            "error in an image. As each largest error is minimized sequentially, the "
            "overall error is improved. This loss will be extremely focused on outliers."
            "\n\t SSIM - Structural Similarity Index Metric is a perception-based "
            "loss that considers changes in texture, luminance, contrast, and local spatial "
            "statistics of an image. Potentially delivers more realistic looking images."
            "\n\t GMSD - Gradient Magnitude Similarity Deviation seeks to match "
            "the global standard deviation of the pixel to pixel differences between two "
            "images. Similiar in approach to SSIM. NB: This loss does not currently work on "
            "AMD cards."
            "\n\t Pixel_Gradient_Difference - Instead of minimizing the difference between "
            "the absolute value of each pixel in two reference images, compute the pixel to "
            "pixel spatial difference in each image and then minimize that difference "
            "between two images. Allows for large color shifts,but maintains the structure "
            "of the image.\n")
        self.add_item(
            section=section,
            title="icnr_init",
            datatype=bool,
            default=False,
            group="initialization",
            info=
            "Use ICNR to tile the default initializer in a repeating pattern. "
            "This strategy is designed for pairing with sub-pixel / pixel shuffler "
            "to reduce the 'checkerboard effect' in image reconstruction. "
            "\n\t https://arxiv.org/ftp/arxiv/papers/1707/1707.02937.pdf")
        self.add_item(
            section=section,
            title="conv_aware_init",
            datatype=bool,
            default=False,
            group="initialization",
            info=
            "Use Convolution Aware Initialization for convolutional layers. "
            "This can help eradicate the vanishing and exploding gradient problem "
            "as well as lead to higher accuracy, lower loss and faster convergence.\nNB:"
            "\n\t This can use more VRAM when creating a new model so you may want to "
            "lower the batch size for the first run. The batch size can be raised "
            "again when reloading the model. "
            "\n\t Multi-GPU is not supported for this option, so you should start the model "
            "on a single GPU. Once training has started, you can stop training, enable "
            "multi-GPU and resume."
            "\n\t Building the model will likely take several minutes as the calculations "
            "for this initialization technique are expensive. This will only impact starting "
            "a new model.")
        self.add_item(
            section=section,
            title="optimizer",
            datatype=str,
            gui_radio=True,
            group="optimizer",
            default="adam",
            choices=["adam", "nadam", "rms-prop"],
            info="The optimizer to use."
            "\n\t adam - Adaptive Moment Optimization. A stochastic gradient descent method "
            "that is based on adaptive estimation of first-order and second-order moments."
            "\n\t nadam - Adaptive Moment Optimization with Nesterov Momentum. Much like "
            "Adam but uses a different formula for calculating momentum."
            "\n\t rms-prop - Root Mean Square Propogation. Maintains a moving (discounted) "
            "average of the square of the gradients. Divides the gradient by the root of "
            "this average.")
        self.add_item(
            section=section,
            title="learning_rate",
            datatype=float,
            default=5e-5,
            min_max=(1e-6, 1e-4),
            rounding=6,
            fixed=False,
            group="optimizer",
            info=
            "Learning rate - how fast your network will learn (how large are the "
            "modifications to the model weights after one batch of training). Values that "
            "are too large might result in model crashes and the inability of the model to "
            "find the best solution. Values that are too small might be unable to escape "
            "from dead-ends and find the best global minimum.")
        self.add_item(
            section=section,
            title="reflect_padding",
            datatype=bool,
            default=False,
            group="network",
            info=
            "Use reflection padding rather than zero padding with convolutions. "
            "Each convolution must pad the image boundaries to maintain the proper "
            "sizing. More complex padding schemes can reduce artifacts at the "
            "border of the image."
            "\n\t http://www-cs.engr.ccny.cuny.edu/~wolberg/cs470/hw/hw2_pad.txt"
        )
        self.add_item(
            section=section,
            title="allow_growth",
            datatype=bool,
            default=False,
            group="network",
            fixed=False,
            info=
            "[Nvidia Only]. Enable the Tensorflow GPU 'allow_growth' configuration option. "
            "This option prevents Tensorflow from allocating all of the GPU VRAM at launch "
            "but can lead to higher VRAM fragmentation and slower performance. Should only "
            "be enabled if you are receiving errors regarding 'cuDNN fails to initialize' "
            "when commencing training.")
        self.add_item(
            section=section,
            title="mixed_precision",
            datatype=bool,
            default=False,
            group="network",
            info=
            "R|[Nvidia Only], NVIDIA GPUs can run operations in float16 faster than in "
            "float32. Mixed precision allows you to use a mix of float16 with float32, to "
            "get the performance benefits from float16 and the numeric stability benefits "
            "from float32.\nWhile mixed precision will run on most Nvidia models, it will "
            "only speed up training on more recent GPUs. Those with compute capability 7.0 "
            "or higher will see the greatest performance benefit from mixed precision "
            "because they have Tensor Cores. Older GPUs offer no math performance benefit "
            "for using mixed precision, however memory and bandwidth savings can enable some "
            "speedups. Generally RTX GPUs and later will offer the most benefit."
        )
        self.add_item(
            section=section,
            title="convert_batchsize",
            datatype=int,
            default=16,
            min_max=(1, 32),
            rounding=1,
            fixed=False,
            group="convert",
            info=
            "[GPU Only]. The number of faces to feed through the model at once when running "
            "the Convert process.\n\nNB: Increasing this figure is unlikely to improve "
            "convert speed, however, if you are getting Out of Memory errors, then you may "
            "want to reduce the batch size.")
Exemple #5
0
    def _set_loss(self) -> None:
        # pylint:disable=line-too-long
        """ Set the default loss options.

        Loss Documentation
        MAE https://heartbeat.fritz.ai/5-regression-loss-functions-all-machine-learners-should-know-4fb140e9d4b0
        MSE https://heartbeat.fritz.ai/5-regression-loss-functions-all-machine-learners-should-know-4fb140e9d4b0
        LogCosh https://heartbeat.fritz.ai/5-regression-loss-functions-all-machine-learners-should-know-4fb140e9d4b0
        Smooth L1 https://arxiv.org/pdf/1701.03077.pdf
        L_inf_norm https://medium.com/@montjoile/l0-norm-l1-norm-l2-norm-l-infinity-norm-7a7d18a4f40c
        SSIM http://www.cns.nyu.edu/pub/eero/wang03-reprint.pdf
        MSSIM https://www.cns.nyu.edu/pub/eero/wang03b.pdf
        GMSD https://arxiv.org/ftp/arxiv/papers/1308/1308.3052.pdf
        """  # noqa
        # pylint:enable=line-too-long
        logger.debug("Setting Loss config")
        section = "global.loss"
        self.add_section(
            title=section,
            info="Loss configuration options\n"
            "Loss is the mechanism by which a Neural Network judges how well it "
            "thinks that it is recreating a face." + ADDITIONAL_INFO)
        self.add_item(section=section,
                      title="loss_function",
                      datatype=str,
                      group="loss",
                      default="ssim",
                      fixed=False,
                      choices=[
                          x for x in sorted(_LOSS_HELP)
                          if x not in _NON_PRIMARY_LOSS
                      ],
                      info="The loss function to use.\n\n\t" +
                      "\n\t".join(f"{k}: {v}"
                                  for k, v in sorted(_LOSS_HELP.items())
                                  if k not in _NON_PRIMARY_LOSS))
        self.add_item(
            section=section,
            title="loss_function_2",
            datatype=str,
            group="loss",
            default="mse",
            fixed=False,
            choices=list(sorted(_LOSS_HELP)),
            info=
            "The second loss function to use. If using a structural based loss (such as "
            "SSIM, MS-SSIM or GMSD) it is common to add an L1 regularization(MAE) or L2 "
            "regularization (MSE) function. You can adjust the weighting of this loss "
            "function with the loss_weight_2 option.\n\n\t" +
            "\n\t".join(f"{k}: {v}" for k, v in sorted(_LOSS_HELP.items())))
        self.add_item(
            section=section,
            title="loss_weight_2",
            datatype=int,
            group="loss",
            min_max=(0, 400),
            rounding=1,
            default=100,
            fixed=False,
            info="The amount of weight to apply to the second loss function.\n\n"
            "\n\nThe value given here is as a percentage denoting how much the selected "
            "function should contribute to the overall loss cost of the model. For example:"
            "\n\t 100 - The loss calculated for the second loss function will be applied at "
            "its full amount towards the overall loss score. "
            "\n\t 25 - The loss calculated for the second loss function will be reduced by a "
            "quarter prior to adding to the overall loss score. "
            "\n\t 400 - The loss calculated for the second loss function will be mulitplied "
            "4 times prior to adding to the overall loss score. "
            "\n\t 0 - Disables the second loss function altogether.")
        self.add_item(
            section=section,
            title="loss_function_3",
            datatype=str,
            group="loss",
            default="none",
            fixed=False,
            choices=list(sorted(_LOSS_HELP)),
            info=
            "The third loss function to use. You can adjust the weighting of this loss "
            "function with the loss_weight_3 option.\n\n\t" +
            "\n\t".join(f"{k}: {v}" for k, v in sorted(_LOSS_HELP.items())))
        self.add_item(
            section=section,
            title="loss_weight_3",
            datatype=int,
            group="loss",
            min_max=(0, 400),
            rounding=1,
            default=0,
            fixed=False,
            info="The amount of weight to apply to the third loss function.\n\n"
            "\n\nThe value given here is as a percentage denoting how much the selected "
            "function should contribute to the overall loss cost of the model. For example:"
            "\n\t 100 - The loss calculated for the third loss function will be applied at "
            "its full amount towards the overall loss score. "
            "\n\t 25 - The loss calculated for the third loss function will be reduced by a "
            "quarter prior to adding to the overall loss score. "
            "\n\t 400 - The loss calculated for the third loss function will be mulitplied 4 "
            "times prior to adding to the overall loss score. "
            "\n\t 0 - Disables the third loss function altogether.")
        self.add_item(
            section=section,
            title="loss_function_4",
            datatype=str,
            group="loss",
            default="none",
            fixed=False,
            choices=list(sorted(_LOSS_HELP)),
            info=
            "The fourth loss function to use. You can adjust the weighting of this loss "
            "function with the loss_weight_3 option.\n\n\t" +
            "\n\t".join(f"{k}: {v}" for k, v in sorted(_LOSS_HELP.items())))
        self.add_item(
            section=section,
            title="loss_weight_4",
            datatype=int,
            group="loss",
            min_max=(0, 400),
            rounding=1,
            default=0,
            fixed=False,
            info="The amount of weight to apply to the fourth loss function.\n\n"
            "\n\nThe value given here is as a percentage denoting how much the selected "
            "function should contribute to the overall loss cost of the model. For example:"
            "\n\t 100 - The loss calculated for the fourth loss function will be applied at "
            "its full amount towards the overall loss score. "
            "\n\t 25 - The loss calculated for the fourth loss function will be reduced by a "
            "quarter prior to adding to the overall loss score. "
            "\n\t 400 - The loss calculated for the fourth loss function will be mulitplied "
            "4 times prior to adding to the overall loss score. "
            "\n\t 0 - Disables the fourth loss function altogether.")
        self.add_item(
            section=section,
            title="mask_loss_function",
            datatype=str,
            group="loss",
            default="mse",
            fixed=False,
            choices=["mae", "mse"],
            info="The loss function to use when learning a mask."
            "\n\t MAE - Mean absolute error will guide reconstructions of each pixel "
            "towards its median value in the training dataset. Robust to outliers but as "
            "a median, it can potentially ignore some infrequent image types in the dataset."
            "\n\t MSE - Mean squared error will guide reconstructions of each pixel "
            "towards its average value in the training dataset. As an average, it will be "
            "susceptible to outliers and typically produces slightly blurrier results."
        )
        self.add_item(
            section=section,
            title="eye_multiplier",
            datatype=int,
            group="loss",
            min_max=(1, 40),
            rounding=1,
            default=3,
            fixed=False,
            info=
            "The amount of priority to give to the eyes.\n\nThe value given here is as a "
            "multiplier of the main loss score. For example:"
            "\n\t 1 - The eyes will receive the same priority as the rest of the face. "
            "\n\t 10 - The eyes will be given a score 10 times higher than the rest of the "
            "face."
            "\n\nNB: Penalized Mask Loss must be enable to use this option.")
        self.add_item(
            section=section,
            title="mouth_multiplier",
            datatype=int,
            group="loss",
            min_max=(1, 40),
            rounding=1,
            default=2,
            fixed=False,
            info=
            "The amount of priority to give to the mouth.\n\nThe value given here is as a "
            "multiplier of the main loss score. For Example:"
            "\n\t 1 - The mouth will receive the same priority as the rest of the face. "
            "\n\t 10 - The mouth will be given a score 10 times higher than the rest of the "
            "face."
            "\n\nNB: Penalized Mask Loss must be enable to use this option.")
        self.add_item(
            section=section,
            title="penalized_mask_loss",
            datatype=bool,
            default=True,
            group="loss",
            info=
            "Image loss function is weighted by mask presence. For areas of "
            "the image without the facial mask, reconstruction errors will be "
            "ignored while the masked face area is prioritized. May increase "
            "overall quality by focusing attention on the core face area.")
        self.add_item(
            section=section,
            title="mask_type",
            datatype=str,
            default="extended",
            choices=PluginLoader.get_available_extractors("mask",
                                                          add_none=True,
                                                          extend_plugin=True),
            group="mask",
            gui_radio=True,
            info=
            "The mask to be used for training. If you have selected 'Learn Mask' or "
            "'Penalized Mask Loss' you must select a value other than 'none'. The required "
            "mask should have been selected as part of the Extract process. If it does not "
            "exist in the alignments file then it will be generated prior to training "
            "commencing."
            "\n\tnone: Don't use a mask."
            "\n\tbisenet-fp-face: Relatively lightweight NN based mask that provides more "
            "refined control over the area to be masked (configurable in mask settings). "
            "Use this version of bisenet-fp if your model is trained with 'face' or "
            "'legacy' centering."
            "\n\tbisenet-fp-head: Relatively lightweight NN based mask that provides more "
            "refined control over the area to be masked (configurable in mask settings). "
            "Use this version of bisenet-fp if your model is trained with 'head' centering."
            "\n\tcomponents: Mask designed to provide facial segmentation based on the "
            "positioning of landmark locations. A convex hull is constructed around the "
            "exterior of the landmarks to create a mask."
            "\n\textended: Mask designed to provide facial segmentation based on the "
            "positioning of landmark locations. A convex hull is constructed around the "
            "exterior of the landmarks and the mask is extended upwards onto the forehead."
            "\n\tvgg-clear: Mask designed to provide smart segmentation of mostly frontal "
            "faces clear of obstructions. Profile faces and obstructions may result in "
            "sub-par performance."
            "\n\tvgg-obstructed: Mask designed to provide smart segmentation of mostly "
            "frontal faces. The mask model has been specifically trained to recognize "
            "some facial obstructions (hands and eyeglasses). Profile faces may result in "
            "sub-par performance."
            "\n\tunet-dfl: Mask designed to provide smart segmentation of mostly frontal "
            "faces. The mask model has been trained by community members and will need "
            "testing for further description. Profile faces may result in sub-par "
            "performance.")
        self.add_item(
            section=section,
            title="mask_blur_kernel",
            datatype=int,
            min_max=(0, 9),
            rounding=1,
            default=3,
            group="mask",
            info=
            "Apply gaussian blur to the mask input. This has the effect of smoothing the "
            "edges of the mask, which can help with poorly calculated masks and give less "
            "of a hard edge to the predicted mask. The size is in pixels (calculated from "
            "a 128px mask). Set to 0 to not apply gaussian blur. This value should be odd, "
            "if an even number is passed in then it will be rounded to the next odd number."
        )
        self.add_item(
            section=section,
            title="mask_threshold",
            datatype=int,
            default=4,
            min_max=(0, 50),
            rounding=1,
            group="mask",
            info=
            "Sets pixels that are near white to white and near black to black. Set to 0 for "
            "off.")
        self.add_item(
            section=section,
            title="learn_mask",
            datatype=bool,
            default=False,
            group="mask",
            info=
            "Dedicate a portion of the model to learning how to duplicate the input "
            "mask. Increases VRAM usage in exchange for learning a quick ability to try "
            "to replicate more complex mask models.")
Exemple #6
0
 def get_optional_arguments():
     """ Put the arguments in a list so that they are accessible from both
     argparse and gui """
     argument_list = []
     argument_list.append({
         "opts": ("--serializer", ),
         "type":
         str.lower,
         "dest":
         "serializer",
         "default":
         "json",
         "choices": ("json", "pickle", "yaml"),
         "help":
         "Serializer for alignments file. If "
         "yaml is chosen and not available, then "
         "json will be used as the default "
         "fallback."
     })
     argument_list.append({
         "opts": ("-D", "--detector"),
         "type":
         str,
         "choices":
         PluginLoader.get_available_extractors("detect"),
         "default":
         "mtcnn",
         "help":
         "R|Detector to use."
         "\n'dlib-hog': uses least resources, but is the"
         "\n\tleast reliable."
         "\n'dlib-cnn': faster than mtcnn but detects"
         "\n\tfewer faces and fewer false positives."
         "\n'mtcnn': slower than dlib, but uses fewer"
         "\n\tresources whilst detecting more faces and"
         "\n\tmore false positives. Has superior"
         "\n\talignment to dlib"
     })
     argument_list.append({
         "opts": ("-A", "--aligner"),
         "type":
         str,
         "choices":
         PluginLoader.get_available_extractors("align"),
         "default":
         "fan",
         "help":
         "R|Aligner to use."
         "\n'dlib': Dlib Pose Predictor. Faster, less "
         "\n\tresource intensive, but less accurate."
         "\n'fan': Face Alignment Network. Best aligner."
         "\n\tGPU heavy."
     })
     argument_list.append({
         "opts": ("-mtms", "--mtcnn-minsize"),
         "type":
         int,
         "dest":
         "mtcnn_minsize",
         "default":
         20,
         "help":
         "The minimum size of a face to be "
         "accepted. Lower values use "
         "significantly more VRAM. Minimum "
         "value is 10. Default is 20 "
         "(MTCNN detector only)"
     })
     argument_list.append({
         "opts": ("-mtth", "--mtcnn-threshold"),
         "nargs":
         "+",
         "type":
         str,
         "dest":
         "mtcnn_threshold",
         "default": ["0.6", "0.7", "0.7"],
         "help":
         "R|Three step threshold for face "
         "detection. Should be\nthree decimal "
         "numbers each less than 1. Eg:\n"
         "'--mtcnn-threshold 0.6 0.7 0.7'.\n"
         "1st stage: obtains face candidates.\n"
         "2nd stage: refinement of face "
         "candidates.\n3rd stage: further "
         "refinement of face candidates.\n"
         "Default is 0.6 0.7 0.7 "
         "(MTCNN detector only)"
     })
     argument_list.append({
         "opts": ("-mtsc", "--mtcnn-scalefactor"),
         "type":
         float,
         "dest":
         "mtcnn_scalefactor",
         "default":
         0.709,
         "help":
         "The scale factor for the image "
         "pyramid. Should be a decimal number "
         "less than one. Default is 0.709 "
         "(MTCNN detector only)"
     })
     argument_list.append({
         "opts": ("-r", "--rotate-images"),
         "type":
         str,
         "dest":
         "rotate_images",
         "default":
         None,
         "help":
         "If a face isn't found, rotate the "
         "images to try to find a face. Can find "
         "more faces at the cost of extraction "
         "speed. Pass in a single number to use "
         "increments of that size up to 360, or "
         "pass in a list of numbers to enumerate "
         "exactly what angles to check"
     })
     argument_list.append({
         "opts": ("-bt", "--blur-threshold"),
         "type":
         int,
         "dest":
         "blur_thresh",
         "default":
         None,
         "help":
         "Automatically discard images blurrier "
         "than the specified threshold. "
         "Discarded images are moved into a "
         "\"blurry\" sub-folder. Lower values "
         "allow more blur"
     })
     argument_list.append({
         "opts": ("-mp", "--multiprocess"),
         "action":
         "store_true",
         "default":
         False,
         "help":
         "Run extraction in parallel. Offers "
         "speed up for some extractor/detector "
         "combinations, less so for others. "
         "Only has an effect if both the "
         "aligner and detector use the GPU, "
         "otherwise this is automatic."
     })
     argument_list.append({
         "opts": ("-sz", "--size"),
         "type":
         int,
         "default":
         256,
         "help":
         "The output size of extracted faces. "
         "Make sure that the model you intend "
         "to train supports your required "
         "size. This will only need to be "
         "changed for hi-res models."
     })
     argument_list.append({
         "opts": ("-s", "--skip-existing"),
         "action":
         "store_true",
         "dest":
         "skip_existing",
         "default":
         False,
         "help":
         "Skips frames that have already been "
         "extracted and exist in the alignments "
         "file"
     })
     argument_list.append({
         "opts": ("-sf", "--skip-existing-faces"),
         "action":
         "store_true",
         "dest":
         "skip_faces",
         "default":
         False,
         "help":
         "Skip frames that already have "
         "detected faces in the alignments "
         "file"
     })
     argument_list.append({
         "opts": ("-dl", "--debug-landmarks"),
         "action":
         "store_true",
         "dest":
         "debug_landmarks",
         "default":
         False,
         "help":
         "Draw landmarks on the ouput faces for "
         "debug"
     })
     argument_list.append({
         "opts": ("-ae", "--align-eyes"),
         "action":
         "store_true",
         "dest":
         "align_eyes",
         "default":
         False,
         "help":
         "Perform extra alignment to ensure "
         "left/right eyes are  at the same "
         "height"
     })
     argument_list.append({
         "opts": ("-si", "--save-interval"),
         "dest":
         "save_interval",
         "type":
         int,
         "default":
         None,
         "help":
         "Automatically save the alignments file "
         "after a set amount of frames. Will "
         "only save at the end of extracting by "
         "default. WARNING: Don't interrupt the "
         "script when writing the file because "
         "it might get corrupted."
     })
     return argument_list
Exemple #7
0
    def get_argument_list(self):
        argument_list = list()
        argument_list.append(dict(
            opts=("-a", "--alignments"),
            action=FileFullPaths,
            type=str,
            group=_("data"),
            required=True,
            filetypes="alignments",
            help=_("Full path to the alignments file to add the mask to. NB: if the mask already "
                   "exists in the alignments file it will be overwritten.")))
        argument_list.append(dict(
            opts=("-i", "--input"),
            action=DirOrFileFullPaths,
            type=str,
            group=_("data"),
            filetypes="video",
            required=True,
            help=_("Directory containing extracted faces, source frames, or a video file.")))
        argument_list.append(dict(
            opts=("-it", "--input-type"),
            action=Radio,
            type=str.lower,
            choices=("faces", "frames"),
            dest="input_type",
            group=_("data"),
            default="frames",
            help=_("R|Whether the `input` is a folder of faces or a folder frames/video"
                   "\nL|faces: The input is a folder containing extracted faces."
                   "\nL|frames: The input is a folder containing frames or is a video")))
        argument_list.append(dict(
            opts=("-M", "--masker"),
            action=Radio,
            type=str.lower,
            choices=PluginLoader.get_available_extractors("mask"),
            default="extended",
            group=_("process"),
            help=_("R|Masker to use."
                   "\nL|bisenet-fp: Relatively lightweight NN based mask that provides more "
                   "refined control over the area to be masked including full head masking "
                   "(configurable in mask settings)."
                   "\nL|components: Mask designed to provide facial segmentation based on the "
                   "positioning of landmark locations. A convex hull is constructed around the "
                   "exterior of the landmarks to create a mask."
                   "\nL|extended: Mask designed to provide facial segmentation based on the "
                   "positioning of landmark locations. A convex hull is constructed around the "
                   "exterior of the landmarks and the mask is extended upwards onto the forehead."
                   "\nL|vgg-clear: Mask designed to provide smart segmentation of mostly frontal "
                   "faces clear of obstructions. Profile faces and obstructions may result in "
                   "sub-par performance."
                   "\nL|vgg-obstructed: Mask designed to provide smart segmentation of mostly "
                   "frontal faces. The mask model has been specifically trained to recognize "
                   "some facial obstructions (hands and eyeglasses). Profile faces may result in "
                   "sub-par performance."
                   "\nL|unet-dfl: Mask designed to provide smart segmentation of mostly frontal "
                   "faces. The mask model has been trained by community members and will need "
                   "testing for further description. Profile faces may result in sub-par "
                   "performance.")))
        argument_list.append(dict(
            opts=("-p", "--processing"),
            action=Radio,
            type=str.lower,
            choices=("all", "missing", "output"),
            default="missing",
            group=_("process"),
            help=_("R|Whether to update all masks in the alignments files, only those faces "
                   "that do not already have a mask of the given `mask type` or just to output "
                   "the masks to the `output` location."
                   "\nL|all: Update the mask for all faces in the alignments file."
                   "\nL|missing: Create a mask for all faces in the alignments file where a mask "
                   "does not previously exist."
                   "\nL|output: Don't update the masks, just output them for review in the given "
                   "output folder.")))
        argument_list.append(dict(
            opts=("-o", "--output-folder"),
            action=DirFullPaths,
            dest="output",
            type=str,
            group=_("output"),
            help=_("Optional output location. If provided, a preview of the masks created will "
                   "be output in the given folder.")))
        argument_list.append(dict(
            opts=("-b", "--blur_kernel"),
            action=Slider,
            type=int,
            group=_("output"),
            min_max=(0, 9),
            default=3,
            rounding=1,
            help=_("Apply gaussian blur to the mask output. Has the effect of smoothing the "
                   "edges of the mask giving less of a hard edge. the size is in pixels. This "
                   "value should be odd, if an even number is passed in then it will be rounded "
                   "to the next odd number. NB: Only effects the output preview. Set to 0 for "
                   "off")))
        argument_list.append(dict(
            opts=("-t", "--threshold"),
            action=Slider,
            type=int,
            group=_("output"),
            min_max=(0, 50),
            default=4,
            rounding=1,
            help=_("Helps reduce 'blotchiness' on some masks by making light shades white "
                   "and dark shades black. Higher values will impact more of the mask. NB: "
                   "Only effects the output preview. Set to 0 for off")))
        argument_list.append(dict(
            opts=("-ot", "--output-type"),
            action=Radio,
            type=str.lower,
            choices=("combined", "masked", "mask"),
            default="combined",
            group=_("output"),
            help=_("R|How to format the output when processing is set to 'output'."
                   "\nL|combined: The image contains the face/frame, face mask and masked face."
                   "\nL|masked: Output the face/frame as rgba image with the face masked."
                   "\nL|mask: Only output the mask as a single channel image.")))
        argument_list.append(dict(
            opts=("-f", "--full-frame"),
            action="store_true",
            default=False,
            group=_("output"),
            help=_("R|Whether to output the whole frame or only the face box when using "
                   "output processing. Only has an effect when using frames as input.")))

        return argument_list