Example #1
0
 def get_model(self, git_model_id, model_filename, backend):
     """ Check if model is available, if not, download and unzip it """
     root_path = os.path.abspath(os.path.dirname(sys.argv[0]))
     cache_path = os.path.join(root_path, "plugins", "extract", ".cache")
     model = GetModel(model_filename, cache_path, git_model_id).model_path
     model = cv2.dnn.readNetFromCaffe(model[1], model[0])  # pylint: disable=no-member
     model.setPreferableTarget(self.get_backend(backend))
     return model
Example #2
0
 def get_model(git_model_id, model_filename):
     """ Check if model is available, if not, download and unzip it """
     root_path = os.path.abspath(os.path.dirname(sys.argv[0]))
     cache_path = os.path.join(root_path, "plugins", "extract", ".cache")
     model = GetModel(model_filename, cache_path, git_model_id).model_path
     model = cv2.dnn.readNetFromCaffe(model[1], model[0])  # pylint: disable=no-member
     model.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)  # pylint: disable=no-member
     return model
Example #3
0
    def _get_model(git_model_id, model_filename, backend):
        """ Check if model is available, if not, download and unzip it

        Parameters
        ----------
        git_model_id: int
            The second digit in the github tag that identifies this model. See
            https://github.com/deepfakes-models/faceswap-models for more information
        model_filename: str
            The name of the model to be loaded (see :class:`lib.utils.GetModel` for more
            information)
        backend: ['GPU', 'CPU']
            Whether to run inference on a GPU or on the CPU

        See Also
        --------
        lib.utils.GetModel: The model downloading and allocation class.
        """
        root_path = os.path.abspath(os.path.dirname(sys.argv[0]))
        cache_path = os.path.join(root_path, "plugins", "extract",
                                  "recognition", ".cache")
        model = GetModel(model_filename, cache_path, git_model_id).model_path
        if backend == "CPU":
            if os.environ.get("KERAS_BACKEND", "") == "plaidml.keras.backend":
                logger.info("Switching to tensorflow backend.")
                os.environ["KERAS_BACKEND"] = "tensorflow"
        import keras
        from lib.model.layers import L2_normalize
        if backend == "CPU":
            with keras.backend.tf.device("/cpu:0"):
                return keras.models.load_model(model,
                                               {"L2_normalize": L2_normalize})
        else:
            return keras.models.load_model(model,
                                           {"L2_normalize": L2_normalize})
Example #4
0
 def get_model(git_model_id, model_filename):
     """ Check if model is available, if not, download and unzip it """
     root_path = os.path.abspath(os.path.dirname(sys.argv[0]))
     cache_path = os.path.join(root_path, "plugins", "extract", ".cache")
     model = GetModel(model_filename, cache_path, git_model_id).model_path
     vgg_face = cv2.dnn.readNetFromCaffe(model[1], model[0])  # pylint: disable=no-member
     vgg_face.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)  # pylint: disable=no-member
     return vgg_face
Example #5
0
 def _get_model(cls, git_model_id, model_filename):
     """ Check if model is available, if not, download and unzip it """
     if model_filename is None:
         logger.debug("No model_filename specified. Returning None")
         return None
     if git_model_id is None:
         logger.debug("No git_model_id specified. Returning None")
         return None
     model = GetModel(model_filename, git_model_id)
     return model.model_path
Example #6
0
 def get_model(git_model_id, model_filename):
     """ Check if model is available, if not, download and unzip it """
     if model_filename is None:
         logger.debug("No model_filename specified. Returning None")
         return None
     if git_model_id is None:
         logger.debug("No git_model_id specified. Returning None")
         return None
     cache_path = os.path.join(os.path.dirname(__file__), ".cache")
     model = GetModel(model_filename, cache_path, git_model_id)
     return model.model_path
Example #7
0
    def _get_model(git_model_id, model_filename, backend, allow_growth):
        """ Check if model is available, if not, download and unzip it

        Parameters
        ----------
        git_model_id: int
            The second digit in the github tag that identifies this model. See
            https://github.com/deepfakes-models/faceswap-models for more information
        model_filename: str
            The name of the model to be loaded (see :class:`lib.utils.GetModel` for more
            information)
        backend: ['GPU', 'CPU']
            Whether to run inference on a GPU or on the CPU
        allow_growth: bool
            ``True`` if Tensorflow's allow_growth option should be set, otherwise ``False``

        See Also
        --------
        lib.utils.GetModel: The model downloading and allocation class.
        """
        root_path = os.path.abspath(os.path.dirname(sys.argv[0]))
        cache_path = os.path.join(root_path, "plugins", "extract",
                                  "recognition", ".cache")
        model = GetModel(model_filename, cache_path, git_model_id).model_path
        if backend == "CPU":
            if os.environ.get("KERAS_BACKEND", "") == "plaidml.keras.backend":
                logger.info("Switching to tensorflow backend.")
                os.environ["KERAS_BACKEND"] = "tensorflow"

        if allow_growth:
            # TODO This needs to be centralized. Just a hacky fix to read the allow growth config
            # option from the Extraction config file
            logger.info("Enabling Tensorflow 'allow_growth' option")
            import tensorflow as tf
            from keras.backend.tensorflow_backend import set_session
            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True
            config.gpu_options.visible_device_list = "0"
            set_session(tf.Session(config=config))
            logger.debug("Set Tensorflow 'allow_growth' option")

        import keras
        from lib.model.layers import L2_normalize
        if backend == "CPU":
            with keras.backend.tf.device("/cpu:0"):
                return keras.models.load_model(model,
                                               {"L2_normalize": L2_normalize})
        else:
            return keras.models.load_model(model,
                                           {"L2_normalize": L2_normalize})
Example #8
0
 def _get_model(self, git_model_id, model_filename):
     """ Check if model is available, if not, download and unzip it """
     if model_filename is None:
         logger.debug("No model_filename specified. Returning None")
         return None
     if git_model_id is None:
         logger.debug("No git_model_id specified. Returning None")
         return None
     plugin_path = os.path.join(*self.__module__.split(".")[:-1])
     if os.path.basename(plugin_path) in ("detect", "align"):
         base_path = os.path.dirname(os.path.realpath(sys.argv[0]))
         cache_path = os.path.join(base_path, plugin_path, ".cache")
     else:
         cache_path = os.path.join(os.path.dirname(__file__), ".cache")
     model = GetModel(model_filename, cache_path, git_model_id)
     return model.model_path
Example #9
0
 def get_model(self, git_model_id, model_filename, backend):
     """ Check if model is available, if not, download and unzip it """
     root_path = os.path.abspath(os.path.dirname(sys.argv[0]))
     cache_path = os.path.join(root_path, "plugins", "extract", ".cache")
     model = GetModel(model_filename, cache_path, git_model_id).model_path
     if backend == "CPU":
         if os.environ.get("KERAS_BACKEND", "") == "plaidml.keras.backend":
             logger.info("Switching to tensorflow backend.")
             os.environ["KERAS_BACKEND"] = "tensorflow"
     import keras
     from lib.model.layers import L2_normalize
     if backend == "CPU":
         with keras.backend.tf.device("/cpu:0"):
             return keras.models.load_model(model,
                                            {"L2_normalize": L2_normalize})
     else:
         return keras.models.load_model(model,
                                        {"L2_normalize": L2_normalize})
Example #10
0
    def _process_weights(self, model: Model) -> Model:
        """ Save and lock weights if requested.

        Parameters
        ----------
        model :class:`keras.models.Model`
            The loaded trunk or linear network

        Returns
        -------
        :class:`keras.models.Model`
            The network with weights loaded/not loaded and layers locked/unlocked
        """
        if self._load_weights:
            weights = GetModel(self._net.model_name,
                               self._net.model_id).model_path
            model.load_weights(weights)

        if self._eval_mode:
            model.trainable = False
            for layer in model.layers:
                layer.trainable = False
        return model
Example #11
0
    def _process_weights(self, model: Model) -> Model:
        """ Save and lock weights if requested.

        Parameters
        ----------
        model :class:`keras.models.Model`
            The loaded trunk or linear network

        layers: list, optional
            A list of layer names to explicitly load/freeze. If ``None`` then all model
            layers will be processed

        Returns
        -------
        :class:`keras.models.Model`
            The network with weights loaded/not loaded and layers locked/unlocked
        """
        weights = GetModel(self._net.model_name, self._net.model_id).model_path
        model.load_weights(weights)
        model.trainable = False
        for layer in model.layers:
            layer.trainable = False
        return model
Example #12
0
 def get_model(self, git_model_id, model_filename, backend):
     """ Check if model is available, if not, download and unzip it """
     model = GetModel(model_filename, git_model_id).model_path
     model = cv2.dnn.readNetFromCaffe(model[1], model[0])
     model.setPreferableTarget(self.get_backend(backend))
     return model