Пример #1
0
 def to_torchserve(self,
                   filepath,
                   archive_format: tx.Literal["default",
                                              "no-archive"] = "default"):
     """Build a TorchServe-compatible MAR file for this model."""
     assert (
         marmpu is not None
     ), "You must `pip install torch-model-archiver` to use this function."
     os.makedirs(os.path.dirname(filepath), exist_ok=True)
     with tempfile.TemporaryDirectory() as tdir:
         serialized_file = os.path.join(tdir, "weights.pth")
         index_to_name_file = os.path.join(tdir, "index_to_name.json")
         model_file = os.path.join(tdir, "model.py")
         torch.save(self.model.state_dict(prefix="model."), serialized_file)
         with open(index_to_name_file, "w") as f:
             f.write(json.dumps(self.serve_module_index))
         with open(model_file, "w") as f:
             f.write(self.serve_module_string)
         args = types.SimpleNamespace(
             model_name=os.path.basename(filepath),
             serialized_file=serialized_file,
             handler="object_detector",
             model_file=model_file,
             version="1.0",
             requirements_file=None,
             runtime="python",
             extra_files=index_to_name_file,
             export_path=os.path.dirname(filepath),
             force=True,
             archive_format=archive_format,
         )
         marmp.package_model(
             args=args,
             manifest=marmpu.ModelExportUtils.generate_manifest_json(args))
    def test_export_model_method(self, patches):
        patches.export_utils.check_mar_already_exists.return_value = '/Users/dummyUser/'
        patches.export_utils.check_custom_model_types.return_value = '/Users/dummyUser', ['a.txt', 'b.txt']
        patches.export_utils.zip.return_value = None

        package_model(self.args, ModelExportUtils.generate_manifest_json(self.args))
        patches.export_utils.validate_inputs.assert_called()
        patches.export_utils.archive.assert_called()
        patches.export_utils.clean_temp_files.assert_called()
Пример #3
0
def mmcls2torchserve(
    config_file: str,
    checkpoint_file: str,
    output_folder: str,
    model_name: str,
    model_version: str = '1.0',
    force: bool = False,
):
    """Converts mmclassification model (config + checkpoint) to TorchServe
    `.mar`.

    Args:
        config_file:
            In MMClassification config format.
            The contents vary for each task repository.
        checkpoint_file:
            In MMClassification checkpoint format.
            The contents vary for each task repository.
        output_folder:
            Folder where `{model_name}.mar` will be created.
            The file created will be in TorchServe archive format.
        model_name:
            If not None, used for naming the `{model_name}.mar` file
            that will be created under `output_folder`.
            If None, `{Path(checkpoint_file).stem}` will be used.
        model_version:
            Model's version.
        force:
            If True, if there is an existing `{model_name}.mar`
            file under `output_folder` it will be overwritten.
    """
    mmcv.mkdir_or_exist(output_folder)

    config = mmcv.Config.fromfile(config_file)

    with TemporaryDirectory() as tmpdir:
        config.dump(f'{tmpdir}/config.py')

        args = Namespace(
            **{
                'model_file': f'{tmpdir}/config.py',
                'serialized_file': checkpoint_file,
                'handler': f'{Path(__file__).parent}/mmcls_handler.py',
                'model_name': model_name or Path(checkpoint_file).stem,
                'version': model_version,
                'export_path': output_folder,
                'force': force,
                'requirements_file': None,
                'extra_files': None,
                'runtime': 'python',
                'archive_format': 'default'
            })
        manifest = ModelExportUtils.generate_manifest_json(args)
        package_model(args, manifest)
Пример #4
0
def mmgen2torchserver(config_file: str,
                      checkpoint_file: str,
                      output_folder: str,
                      model_name: str,
                      model_version: str = '1.0',
                      model_type: str = 'unconditional',
                      force: bool = False):
    """Converts MMGeneration model (config + checkpoint) to TorchServe `.mar`.

    Args:
        config_file (str): Path of config file. The config should in
            MMGeneration format.
        checkpoint_file (str): Path of checkpoint. The checkpoint should in
            MMGeneration checkpoint format.
        output_folder (str): Folder where `{model_name}.mar` will be created.
            The file created will be in TorchServe archive format.
        model_name (str): Name of the generated ``'mar'`` file. If not None,
            used for naming the `{model_name}.mar` file that will be created
            under `output_folder`. If None, `{Path(checkpoint_file).stem}`
            will be used.
        model_version (str, optional): Model's version. Defaults to '1.0'.
        model_type (str, optional): Type of the model to be convert. Handler
            named ``{model_type}_handler`` would be used to generate ``mar``
            file. Defaults to 'unconditional'.
        force (bool, optional): If True, existing `{model_name}.mar` will be
            overwritten. Default to False.
    """
    mmcv.mkdir_or_exist(output_folder)

    config = mmcv.Config.fromfile(config_file)

    with TemporaryDirectory() as tmpdir:
        config.dump(f'{tmpdir}/config.py')

        args = Namespace(
            **{
                'model_file': f'{tmpdir}/config.py',
                'serialized_file': checkpoint_file,
                'handler':
                f'{Path(__file__).parent}/mmgen_{model_type}_handler.py',
                'model_name': model_name or Path(checkpoint_file).stem,
                'version': model_version,
                'export_path': output_folder,
                'force': force,
                'requirements_file': None,
                'extra_files': None,
                'runtime': 'python',
                'archive_format': 'default'
            })
        manifest = ModelExportUtils.generate_manifest_json(args)
        package_model(args, manifest)
Пример #5
0
    def pack(self,
             model,
             dst_path,
             model_name,
             model_version,
             additional_files=None):
        """
        Package a model into the MAR archive so that it can be used for serving using TorchServing

        :param model: an object representing a model
        :type model: torch.nn.Module
        :param dst_path: the destination base path (do not include the filename) of the MAR
        :type dst_path: str
        :param model_name: the name of the model (will be also used to define the prediction endpoint)
        :type model_name: str
        :param model_version: a string encoding the version of the model
        :type model_version: str
        :param additional_files: an optional list of files that should be included in the MAR
        :type additional_files: iterable

        :return: None
        """
        if additional_files is None:
            additional_files = []

        # save model
        torch.save(model, os.path.join(self.tmp_dir, 'model.pt'))

        args = ArgClass(
            os.path.join(self.tmp_dir, 'model.pt'), self.handler, ','.join([
                os.path.join(self.tmp_dir, 'pre_process_tform.pkl'),
                os.path.join(self.tmp_dir, 'post_process_tform.pkl'),
                os.path.join(self.tmp_dir, 'metadata.json')
            ] + additional_files), dst_path, model_name, model_version)

        manifest = ModelExportUtils.generate_manifest_json(args)

        package_model(args, manifest=manifest)
Пример #6
0
def mmpose2torchserve(config_file: str,
                      checkpoint_file: str,
                      output_folder: str,
                      model_name: str,
                      model_version: str = '1.0',
                      force: bool = False):
    """Converts MMPose model (config + checkpoint) to TorchServe `.mar`.

    Args:
        config_file:
            In MMPose config format.
            The contents vary for each task repository.
        checkpoint_file:
            In MMPose checkpoint format.
            The contents vary for each task repository.
        output_folder:
            Folder where `{model_name}.mar` will be created.
            The file created will be in TorchServe archive format.
        model_name:
            If not None, used for naming the `{model_name}.mar` file
            that will be created under `output_folder`.
            If None, `{Path(checkpoint_file).stem}` will be used.
        model_version:
            Model's version.
        force:
            If True, if there is an existing `{model_name}.mar`
            file under `output_folder` it will be overwritten.
    """

    mmcv.mkdir_or_exist(output_folder)

    config = mmcv.Config.fromfile(config_file)

    with TemporaryDirectory() as tmpdir:
        model_file = osp.join(tmpdir, 'config.py')
        config.dump(model_file)
        handler_path = osp.join(osp.dirname(__file__), 'mmpose_handler.py')
        model_name = model_name or osp.splitext(
            osp.basename(checkpoint_file))[0]

        # use mmcv CheckpointLoader if checkpoint is not from a local file
        if not osp.isfile(checkpoint_file):
            ckpt = CheckpointLoader.load_checkpoint(checkpoint_file)
            checkpoint_file = osp.join(tmpdir, 'checkpoint.pth')
            with open(checkpoint_file, 'wb') as f:
                torch.save(ckpt, f)

        args = Namespace(
            **{
                'model_file': model_file,
                'serialized_file': checkpoint_file,
                'handler': handler_path,
                'model_name': model_name,
                'version': model_version,
                'export_path': output_folder,
                'force': force,
                'requirements_file': None,
                'extra_files': None,
                'runtime': 'python',
                'archive_format': 'default'
            })
        manifest = ModelExportUtils.generate_manifest_json(args)
        package_model(args, manifest)