示例#1
0
    def process(self):
        """ Call the training process object """
        logger.debug("Starting Training Process")
        logger.info("Training data directory: %s", self.args.model_dir)

        # TODO Move these args to config and remove these deprecation warnings
        if hasattr(self.args,
                   "warp_to_landmarks") and self.args.warp_to_landmarks:
            deprecation_warning(
                "`-wl`, ``--warp-to-landmarks``",
                additional_info="This option will be available within training "
                "config settings (/config/train.ini).")
        if hasattr(self.args, "no_augment_color") and self.args.no_flip:
            deprecation_warning(
                "`-nac`, ``--no-augment-color``",
                additional_info="This option will be available within training "
                "config settings (/config/train.ini).")
        set_system_verbosity(self.args.loglevel)
        thread = self.start_thread()
        # queue_manager.debug_monitor(1)

        err = self.monitor(thread)

        self.end_thread(thread, err)
        logger.debug("Completed Training Process")
示例#2
0
    def process(self):
        """ The entry point for triggering the Training Process.

        Should only be called from  :class:`lib.cli.ScriptExecutor`
        """
        logger.debug("Starting Training Process")
        logger.info("Training data directory: %s", self._args.model_dir)

        # TODO Move these args to config and remove these deprecation warnings
        if hasattr(self._args,
                   "warp_to_landmarks") and self._args.warp_to_landmarks:
            deprecation_warning(
                "`-wl`, ``--warp-to-landmarks``",
                additional_info="This option will be available within training "
                "config settings (/config/train.ini).")
        if hasattr(self._args, "no_augment_color") and self._args.no_flip:
            deprecation_warning(
                "`-nac`, ``--no-augment-color``",
                additional_info="This option will be available within training "
                "config settings (/config/train.ini).")
        thread = self._start_thread()
        # from lib.queue_manager import queue_manager; queue_manager.debug_monitor(1)

        err = self._monitor(thread)

        self._end_thread(thread, err)
        logger.debug("Completed Training Process")
示例#3
0
    def __init__(self,
                 loglevel,
                 configfile=None,
                 git_model_id=None,
                 model_filename=None,
                 rotation=None,
                 min_size=0):
        logger.debug(
            "Initializing %s: (loglevel: %s, configfile: %s, git_model_id: %s, "
            "model_filename: %s, rotation: %s, min_size: %s)",
            self.__class__.__name__, loglevel, configfile, git_model_id,
            model_filename, rotation, min_size)
        self.config = get_config(".".join(self.__module__.split(".")[-2:]),
                                 configfile=configfile)
        self.loglevel = loglevel
        self.rotation = self.get_rotation_angles(rotation)
        self.min_size = min_size
        self.parent_is_pool = False
        self.init = None
        self.error = None

        # The input and output queues for the plugin.
        # See lib.queue_manager.QueueManager for getting queues
        self.queues = {"in": None, "out": None}

        #  Path to model if required
        self.model_path = self.get_model(git_model_id, model_filename)

        # Target image size for passing images through the detector
        # Set to tuple of dimensions (x, y) or int of pixel count
        self.target = None

        # Approximate VRAM used for the set target. Used to calculate
        # how many parallel processes / batches can be run.
        # Be conservative to avoid OOM.
        self.vram = None

        # Set to true if the plugin supports PlaidML
        self.supports_plaidml = False

        # For detectors that support batching, this should be set to
        # the calculated batch size that the amount of available VRAM
        # will support. It is also used for holding the number of threads/
        # processes for parallel processing plugins
        self.batch_size = 1

        if rotation is not None:
            deprecation_warning(
                "Rotation ('-r', '--rotation')",
                additional_info=
                "It is not necessary for most detectors and will "
                "be moved to plugin config for those detectors "
                "that require it.")
        logger.debug("Initialized _base %s", self.__class__.__name__)
示例#4
0
    def run_extraction(self):
        """ Run Face Detection """
        to_process = self.process_item_count()
        size = self.args.size if hasattr(self.args, "size") else 256
        align_eyes = self.args.align_eyes if hasattr(self.args,
                                                     "align_eyes") else False
        if align_eyes:
            deprecation_warning(
                "Align eyes (-ae --align-eyes)",
                additional_info="This functionality will still be available "
                "within the alignments tool.")
        exception = False

        for phase in range(self.extractor.passes):
            if exception:
                break
            is_final = self.extractor.final_pass
            detected_faces = dict()
            self.extractor.launch()
            self.check_thread_error()
            for idx, faces in enumerate(
                    tqdm(self.extractor.detected_faces(),
                         total=to_process,
                         file=sys.stdout,
                         desc="Running pass {} of {}: {}".format(
                             phase + 1, self.extractor.passes,
                             self.extractor.phase.title()))):
                self.check_thread_error()
                exception = faces.get("exception", False)
                if exception:
                    break
                filename = faces["filename"]

                if self.extractor.final_pass:
                    self.output_processing(faces, align_eyes, size, filename)
                    self.output_faces(filename, faces)
                    if self.save_interval and (idx +
                                               1) % self.save_interval == 0:
                        self.alignments.save()
                else:
                    del faces["image"]
                    detected_faces[filename] = faces

            if is_final:
                logger.debug("Putting EOF to save")
                self.save_queue.put("EOF")
            else:
                logger.debug("Reloading images")
                self.threaded_io("reload", detected_faces)
示例#5
0
    def __init__(self,
                 model_dir,
                 gpus=1,
                 configfile=None,
                 snapshot_interval=0,
                 no_logs=False,
                 warp_to_landmarks=False,
                 augment_color=True,
                 no_flip=False,
                 training_image_size=256,
                 alignments_paths=None,
                 preview_scale=100,
                 input_shape=None,
                 encoder_dim=None,
                 trainer="original",
                 pingpong=False,
                 memory_saving_gradients=False,
                 optimizer_savings=False,
                 predict=False):
        logger.debug(
            "Initializing ModelBase (%s): (model_dir: '%s', gpus: %s, configfile: %s, "
            "snapshot_interval: %s, no_logs: %s, warp_to_landmarks: %s, augment_color: "
            "%s, no_flip: %s, training_image_size, %s, alignments_paths: %s, "
            "preview_scale: %s, input_shape: %s, encoder_dim: %s, trainer: %s, "
            "pingpong: %s, memory_saving_gradients: %s, optimizer_savings: %s, "
            "predict: %s)", self.__class__.__name__, model_dir, gpus,
            configfile, snapshot_interval, no_logs, warp_to_landmarks,
            augment_color, no_flip, training_image_size, alignments_paths,
            preview_scale, input_shape, encoder_dim, trainer, pingpong,
            memory_saving_gradients, optimizer_savings, predict)

        self.predict = predict
        self.model_dir = model_dir
        self.vram_savings = VRAMSavings(pingpong, optimizer_savings,
                                        memory_saving_gradients)

        self.backup = Backup(self.model_dir, self.name)
        self.gpus = gpus
        self.configfile = configfile
        self.input_shape = input_shape
        self.encoder_dim = encoder_dim
        self.trainer = trainer

        self.load_config(
        )  # Load config if plugin has not already referenced it

        self.state = State(self.model_dir, self.name,
                           self.config_changeable_items, no_logs,
                           self.vram_savings.pingpong, training_image_size)

        self.blocks = NNBlocks(
            use_subpixel=self.config["subpixel_upscaling"],
            use_icnr_init=self.config["icnr_init"],
            use_convaware_init=self.config["conv_aware_init"],
            use_reflect_padding=self.config["reflect_padding"],
            first_run=self.state.first_run)

        self.is_legacy = False
        self.rename_legacy()
        self.load_state_info()

        self.networks = dict()  # Networks for the model
        self.predictors = dict()  # Predictors for model
        self.history = dict()  # Loss history per save iteration)

        # Training information specific to the model should be placed in this
        # dict for reference by the trainer.
        self.training_opts = {
            "alignments":
            alignments_paths,
            "preview_scaling":
            preview_scale / 100,
            "warp_to_landmarks":
            warp_to_landmarks,
            "augment_color":
            augment_color,
            "no_flip":
            no_flip,
            "pingpong":
            self.vram_savings.pingpong,
            "snapshot_interval":
            snapshot_interval,
            "training_size":
            self.state.training_size,
            "no_logs":
            self.state.current_session["no_logs"],
            "coverage_ratio":
            self.calculate_coverage_ratio(),
            "mask_type":
            self.config["mask_type"],
            "mask_blur_kernel":
            self.config["mask_blur_kernel"],
            "mask_threshold":
            self.config["mask_threshold"],
            "learn_mask": (self.config["learn_mask"]
                           and self.config["mask_type"] is not None),
            "penalized_mask_loss": (self.config["penalized_mask_loss"]
                                    and self.config["mask_type"] is not None)
        }
        logger.debug("training_opts: %s", self.training_opts)

        if self.multiple_models_in_folder:
            deprecation_warning(
                "Support for multiple model types within the same folder",
                additional_info=
                "Please split each model into separate folders to "
                "avoid issues in future.")

        self.build()
        logger.debug("Initialized ModelBase (%s)", self.__class__.__name__)
示例#6
0
文件: _base.py 项目: zzjoube/faceswap
    def __init__(self,
                 model_dir,
                 gpus,
                 configfile=None,
                 snapshot_interval=0,
                 no_logs=False,
                 warp_to_landmarks=False,
                 augment_color=True,
                 no_flip=False,
                 training_image_size=256,
                 alignments_paths=None,
                 preview_scale=100,
                 input_shape=None,
                 encoder_dim=None,
                 trainer="original",
                 pingpong=False,
                 memory_saving_gradients=False,
                 predict=False):
        logger.debug(
            "Initializing ModelBase (%s): (model_dir: '%s', gpus: %s, configfile: %s, "
            "snapshot_interval: %s, no_logs: %s, warp_to_landmarks: %s, augment_color: "
            "%s, no_flip: %s, training_image_size, %s, alignments_paths: %s, "
            "preview_scale: %s, input_shape: %s, encoder_dim: %s, trainer: %s, "
            "pingpong: %s, memory_saving_gradients: %s, predict: %s)",
            self.__class__.__name__, model_dir, gpus, configfile,
            snapshot_interval, no_logs, warp_to_landmarks, augment_color,
            no_flip, training_image_size, alignments_paths, preview_scale,
            input_shape, encoder_dim, trainer, pingpong,
            memory_saving_gradients, predict)

        self.predict = predict
        self.model_dir = model_dir

        self.backup = Backup(self.model_dir, self.name)
        self.gpus = gpus
        self.configfile = configfile
        self.blocks = NNBlocks(
            use_subpixel=self.config["subpixel_upscaling"],
            use_icnr_init=self.config["icnr_init"],
            use_reflect_padding=self.config["reflect_padding"])
        self.input_shape = input_shape
        self.output_shape = None  # set after model is compiled
        self.encoder_dim = encoder_dim
        self.trainer = trainer

        self.state = State(self.model_dir, self.name,
                           self.config_changeable_items, no_logs, pingpong,
                           training_image_size)
        self.is_legacy = False
        self.rename_legacy()
        self.load_state_info()

        self.networks = dict()  # Networks for the model
        self.predictors = dict()  # Predictors for model
        self.history = dict()  # Loss history per save iteration)

        # Training information specific to the model should be placed in this
        # dict for reference by the trainer.
        self.training_opts = {
            "alignments": alignments_paths,
            "preview_scaling": preview_scale / 100,
            "warp_to_landmarks": warp_to_landmarks,
            "augment_color": augment_color,
            "no_flip": no_flip,
            "pingpong": pingpong,
            "snapshot_interval": snapshot_interval
        }

        self.set_gradient_type(memory_saving_gradients)
        if self.multiple_models_in_folder:
            deprecation_warning(
                "Support for multiple model types within the same folder",
                additional_info=
                "Please split each model into separate folders to "
                "avoid issues in future.")

        self.build()
        self.set_training_data()
        logger.debug("Initialized ModelBase (%s)", self.__class__.__name__)