Exemple #1
0
 def _upload_modified_files(
     self,
     working_dir: Union[str, os.PathLike],
     repo_id: str,
     files_timestamps: Dict[str, float],
     commit_message: Optional[str] = None,
     token: Optional[str] = None,
     create_pr: bool = False,
 ):
     """
     Uploads all modified files in `working_dir` to `repo_id`, based on `files_timestamps`.
     """
     if commit_message is None:
         if "Model" in self.__class__.__name__:
             commit_message = "Upload model"
         elif "Config" in self.__class__.__name__:
             commit_message = "Upload config"
         elif "Tokenizer" in self.__class__.__name__:
             commit_message = "Upload tokenizer"
         elif "FeatureExtractor" in self.__class__.__name__:
             commit_message = "Upload feature extractor"
         elif "Processor" in self.__class__.__name__:
             commit_message = "Upload processor"
         else:
             commit_message = f"Upload {self.__class__.__name__}"
     modified_files = [
         f for f in os.listdir(working_dir)
         if f not in files_timestamps or os.path.getmtime(
             os.path.join(working_dir, f)) > files_timestamps[f]
     ]
     operations = []
     for file in modified_files:
         operations.append(
             CommitOperationAdd(path_or_fileobj=os.path.join(
                 working_dir, file),
                                path_in_repo=file))
     logger.info(
         f"Uploading the following files to {repo_id}: {','.join(modified_files)}"
     )
     return create_commit(repo_id=repo_id,
                          operations=operations,
                          commit_message=commit_message,
                          token=token,
                          create_pr=create_pr)
    def run(self):
        if version.parse(huggingface_hub.__version__) < version.parse("0.8.1"):
            raise ImportError(
                "The huggingface_hub version must be >= 0.8.1 to use this command. Please update your huggingface_hub"
                " installation.")
        else:
            from huggingface_hub import Repository, create_commit
            from huggingface_hub._commit_api import CommitOperationAdd

        # Fetch remote data
        repo = Repository(local_dir=self._local_dir,
                          clone_from=self._model_name)

        # Load config and get the appropriate architecture -- the latter is needed to convert the head's weights
        config = AutoConfig.from_pretrained(self._local_dir)
        architectures = config.architectures
        if architectures is None:  # No architecture defined -- use auto classes
            pt_class = getattr(import_module("transformers"), "AutoModel")
            tf_class = getattr(import_module("transformers"), "TFAutoModel")
            self._logger.warn(
                "No detected architecture, using AutoModel/TFAutoModel")
        else:  # Architecture defined -- use it
            if len(architectures) > 1:
                raise ValueError(
                    f"More than one architecture was found, aborting. (architectures = {architectures})"
                )
            self._logger.warn(f"Detected architecture: {architectures[0]}")
            pt_class = getattr(import_module("transformers"), architectures[0])
            try:
                tf_class = getattr(import_module("transformers"),
                                   "TF" + architectures[0])
            except AttributeError:
                raise AttributeError(
                    f"The TensorFlow equivalent of {architectures[0]} doesn't exist in transformers."
                )

        # Load models and acquire a basic input compatible with the model.
        pt_model = pt_class.from_pretrained(self._local_dir)
        tf_from_pt_model = tf_class.from_pretrained(self._local_dir,
                                                    from_pt=True)
        pt_input, tf_input = self.get_inputs(pt_model, config)
        pt_outputs = pt_model(**pt_input, output_hidden_states=True)
        del pt_model  # will no longer be used, and may have a large memory footprint

        tf_from_pt_model = tf_class.from_pretrained(self._local_dir,
                                                    from_pt=True)
        tf_from_pt_outputs = tf_from_pt_model(**tf_input,
                                              output_hidden_states=True)

        # Confirms that cross loading PT weights into TF worked.
        crossload_differences = self.find_pt_tf_differences(
            pt_outputs, tf_from_pt_outputs)
        output_differences = {
            k: v
            for k, v in crossload_differences.items() if "hidden" not in k
        }
        hidden_differences = {
            k: v
            for k, v in crossload_differences.items() if "hidden" in k
        }
        max_crossload_output_diff = max(output_differences.values())
        max_crossload_hidden_diff = max(hidden_differences.values())
        if max_crossload_output_diff > MAX_ERROR or max_crossload_hidden_diff > self._max_hidden_error:
            raise ValueError(
                "The cross-loaded TensorFlow model has different outputs, something went wrong!\n"
                +
                f"\nList of maximum output differences above the threshold ({MAX_ERROR}):\n"
                + "\n".join([
                    f"{k}: {v:.3e}"
                    for k, v in output_differences.items() if v > MAX_ERROR
                ]) +
                f"\n\nList of maximum hidden layer differences above the threshold ({self._max_hidden_error}):\n"
                + "\n".join([
                    f"{k}: {v:.3e}" for k, v in hidden_differences.items()
                    if v > self._max_hidden_error
                ]))

        # Save the weights in a TF format (if needed) and confirms that the results are still good
        tf_weights_path = os.path.join(self._local_dir, TF2_WEIGHTS_NAME)
        tf_weights_index_path = os.path.join(self._local_dir,
                                             TF2_WEIGHTS_INDEX_NAME)
        if (not os.path.exists(tf_weights_path)
                and not os.path.exists(tf_weights_index_path)
            ) or self._new_weights:
            tf_from_pt_model.save_pretrained(self._local_dir)
        del tf_from_pt_model  # will no longer be used, and may have a large memory footprint

        tf_model = tf_class.from_pretrained(self._local_dir)
        tf_outputs = tf_model(**tf_input, output_hidden_states=True)

        conversion_differences = self.find_pt_tf_differences(
            pt_outputs, tf_outputs)
        output_differences = {
            k: v
            for k, v in conversion_differences.items() if "hidden" not in k
        }
        hidden_differences = {
            k: v
            for k, v in conversion_differences.items() if "hidden" in k
        }
        max_conversion_output_diff = max(output_differences.values())
        max_conversion_hidden_diff = max(hidden_differences.values())
        if max_conversion_output_diff > MAX_ERROR or max_conversion_hidden_diff > self._max_hidden_error:
            raise ValueError(
                "The converted TensorFlow model has different outputs, something went wrong!\n"
                +
                f"\nList of maximum output differences above the threshold ({MAX_ERROR}):\n"
                + "\n".join([
                    f"{k}: {v:.3e}"
                    for k, v in output_differences.items() if v > MAX_ERROR
                ]) +
                f"\n\nList of maximum hidden layer differences above the threshold ({self._max_hidden_error}):\n"
                + "\n".join([
                    f"{k}: {v:.3e}" for k, v in hidden_differences.items()
                    if v > self._max_hidden_error
                ]))

        commit_message = "Update TF weights" if self._new_weights else "Add TF weights"
        if self._push:
            repo.git_add(auto_lfs_track=True)
            repo.git_commit(commit_message)
            repo.git_push(
                blocking=True)  # this prints a progress bar with the upload
            self._logger.warn(f"TF weights pushed into {self._model_name}")
        elif not self._no_pr:
            self._logger.warn("Uploading the weights into a new PR...")
            commit_descrition = (
                "Model converted by the [`transformers`' `pt_to_tf`"
                " CLI](https://github.com/huggingface/transformers/blob/main/src/transformers/commands/pt_to_tf.py). "
                "All converted model outputs and hidden layers were validated against its Pytorch counterpart.\n\n"
                f"Maximum crossload output difference={max_crossload_output_diff:.3e}; "
                f"Maximum crossload hidden layer difference={max_crossload_hidden_diff:.3e};\n"
                f"Maximum conversion output difference={max_conversion_output_diff:.3e}; "
                f"Maximum conversion hidden layer difference={max_conversion_hidden_diff:.3e};\n"
            )
            if self._extra_commit_description:
                commit_descrition += "\n\n" + self._extra_commit_description

            # sharded model -> adds all related files (index and .h5 shards)
            if os.path.exists(tf_weights_index_path):
                operations = [
                    CommitOperationAdd(path_in_repo=TF2_WEIGHTS_INDEX_NAME,
                                       path_or_fileobj=tf_weights_index_path)
                ]
                for shard_path in tf.io.gfile.glob(self._local_dir +
                                                   "/tf_model-*.h5"):
                    operations += [
                        CommitOperationAdd(
                            path_in_repo=os.path.basename(shard_path),
                            path_or_fileobj=shard_path)
                    ]
            else:
                operations = [
                    CommitOperationAdd(path_in_repo=TF2_WEIGHTS_NAME,
                                       path_or_fileobj=tf_weights_path)
                ]

            hub_pr_url = create_commit(
                repo_id=self._model_name,
                operations=operations,
                commit_message=commit_message,
                commit_description=commit_descrition,
                repo_type="model",
                create_pr=True,
            )
            self._logger.warn(f"PR open in {hub_pr_url}")
Exemple #3
0
    def run(self):
        if version.parse(huggingface_hub.__version__) < version.parse("0.8.1"):
            raise ImportError(
                "The huggingface_hub version must be >= 0.8.1 to use this command. Please update your huggingface_hub"
                " installation.")
        else:
            from huggingface_hub import Repository, create_commit
            from huggingface_hub._commit_api import CommitOperationAdd

        # Fetch remote data
        repo = Repository(local_dir=self._local_dir,
                          clone_from=self._model_name)

        # Load config and get the appropriate architecture -- the latter is needed to convert the head's weights
        config = AutoConfig.from_pretrained(self._local_dir)
        architectures = config.architectures
        if architectures is None:  # No architecture defined -- use auto classes
            pt_class = getattr(import_module("transformers"), "AutoModel")
            tf_class = getattr(import_module("transformers"), "TFAutoModel")
            self._logger.warn(
                "No detected architecture, using AutoModel/TFAutoModel")
        else:  # Architecture defined -- use it
            if len(architectures) > 1:
                raise ValueError(
                    f"More than one architecture was found, aborting. (architectures = {architectures})"
                )
            self._logger.warn(f"Detected architecture: {architectures[0]}")
            pt_class = getattr(import_module("transformers"), architectures[0])
            try:
                tf_class = getattr(import_module("transformers"),
                                   "TF" + architectures[0])
            except AttributeError:
                raise AttributeError(
                    f"The TensorFlow equivalent of {architectures[0]} doesn't exist in transformers."
                )

        # Load models and acquire a basic input for its modality.
        pt_model = pt_class.from_pretrained(self._local_dir)
        main_input_name = pt_model.main_input_name
        if main_input_name == "input_ids":
            pt_input, tf_input = self.get_text_inputs()
        elif main_input_name == "pixel_values":
            pt_input, tf_input = self.get_image_inputs()
        elif main_input_name == "input_features":
            pt_input, tf_input = self.get_audio_inputs()
        else:
            raise ValueError(
                f"Can't detect the model modality (`main_input_name` = {main_input_name})"
            )
        tf_from_pt_model = tf_class.from_pretrained(self._local_dir,
                                                    from_pt=True)

        # Extra input requirements, in addition to the input modality
        if config.is_encoder_decoder or (hasattr(pt_model, "encoder")
                                         and hasattr(pt_model, "decoder")):
            decoder_input_ids = np.asarray(
                [[1], [1]], dtype=int) * pt_model.config.decoder_start_token_id
            pt_input.update(
                {"decoder_input_ids": torch.tensor(decoder_input_ids)})
            tf_input.update(
                {"decoder_input_ids": tf.convert_to_tensor(decoder_input_ids)})

        # Confirms that cross loading PT weights into TF worked.
        crossload_differences = self.find_pt_tf_differences(
            pt_model, pt_input, tf_from_pt_model, tf_input)
        max_crossload_diff = max(crossload_differences.values())
        if max_crossload_diff > MAX_ERROR:
            raise ValueError(
                "The cross-loaded TensorFlow model has different outputs, something went wrong! Exaustive list of"
                f" maximum tensor differences above the error threshold ({MAX_ERROR}):\n"
                + "\n".join([
                    f"{key}: {value:.3e}"
                    for key, value in crossload_differences.items()
                    if value > MAX_ERROR
                ]))

        # Save the weights in a TF format (if needed) and confirms that the results are still good
        tf_weights_path = os.path.join(self._local_dir, TF_WEIGHTS_NAME)
        if not os.path.exists(tf_weights_path) or self._new_weights:
            tf_from_pt_model.save_weights(tf_weights_path)
        del tf_from_pt_model  # will no longer be used, and may have a large memory footprint
        tf_model = tf_class.from_pretrained(self._local_dir)
        conversion_differences = self.find_pt_tf_differences(
            pt_model, pt_input, tf_model, tf_input)
        max_conversion_diff = max(conversion_differences.values())
        if max_conversion_diff > MAX_ERROR:
            raise ValueError(
                "The converted TensorFlow model has different outputs, something went wrong! Exaustive list of maximum"
                f" tensor differences above the error threshold ({MAX_ERROR}):\n"
                + "\n".join([
                    f"{key}: {value:.3e}"
                    for key, value in conversion_differences.items()
                    if value > MAX_ERROR
                ]))

        commit_message = "Update TF weights" if self._new_weights else "Add TF weights"
        if self._push:
            repo.git_add(auto_lfs_track=True)
            repo.git_commit(commit_message)
            repo.git_push(
                blocking=True)  # this prints a progress bar with the upload
            self._logger.warn(f"TF weights pushed into {self._model_name}")
        elif not self._no_pr:
            self._logger.warn("Uploading the weights into a new PR...")
            commit_descrition = (
                "Model converted by the [`transformers`' `pt_to_tf`"
                " CLI](https://github.com/huggingface/transformers/blob/main/src/transformers/commands/pt_to_tf.py)."
                "\n\nAll converted model outputs and hidden layers were validated against its Pytorch counterpart."
                f" Maximum crossload output difference={max_crossload_diff:.3e}; Maximum converted output"
                f" difference={max_conversion_diff:.3e}.")
            if self._extra_commit_description:
                commit_descrition += "\n\n" + self._extra_commit_description
            hub_pr_url = create_commit(
                repo_id=self._model_name,
                operations=[
                    CommitOperationAdd(path_in_repo=TF_WEIGHTS_NAME,
                                       path_or_fileobj=tf_weights_path)
                ],
                commit_message=commit_message,
                commit_description=commit_descrition,
                repo_type="model",
                create_pr=True,
            )
            self._logger.warn(f"PR open in {hub_pr_url}")