Пример #1
0
def torchscript_export(context, export_json, model, output_path, quantize):
    """Convert a pytext model snapshot to a torchscript model."""
    export_config = ExportConfig()
    # only populate from export_json if no export option is configured from the command line.
    if export_json:
        export_json_config = _load_and_validate_export_json_config(export_json)

        read_chunk_size = export_json_config.pop("read_chunk_size", None)
        if read_chunk_size is not None:
            print("Warning: Ignoring read_chunk_size.")

        if export_json_config.get("read_chunk_size", None) is not None:
            print(
                "Error: Do not know what to do with read_chunk_size.  Ignoring."
            )

        if "export_list" not in export_json_config.keys():
            export_section_config_list = [export_json_config["export"]]
        else:
            export_section_config_list = export_json_config["export_list"]

        for export_section_config in export_section_config_list:
            if not quantize and not output_path:
                export_config.export_caffe2_path = export_section_config.get(
                    "export_caffe2_path", None)
                export_config.export_onnx_path = export_section_config.get(
                    "export_onnx_path", "/tmp/model.onnx")
                export_config.torchscript_quantize = export_section_config.get(
                    "torchscript_quantize", False)

            else:
                print(
                    "the export-json config is ignored because export options are found the command line"
                )
                export_config.torchscript_quantize = quantize

            export_config.export_torchscript_path = export_section_config.get(
                "export_torchscript_path", None)
            # if config has export_torchscript_path, use export_torchscript_path from config, otherwise keep the default from CLI
            if export_config.export_torchscript_path is not None:
                output_path = export_config.export_torchscript_path

            export_config.export_lite_path = export_section_config.get(
                "export_lite_path", None)
            export_config.inference_interface = export_section_config.get(
                "inference_interface", None)
            export_config.accelerate = export_section_config.get(
                "accelerate", [])
            export_config.seq_padding_control = export_section_config.get(
                "seq_padding_control", None)
            export_config.batch_padding_control = export_section_config.get(
                "batch_padding_control", None)
            if not model or not output_path:
                config = context.obj.load_config()
                model = model or config.save_snapshot_path
                output_path = output_path or f"{config.save_snapshot_path}.torchscript"

            print(f"Exporting {model} to torchscript file: {output_path}")
            export_saved_model_to_torchscript(model, output_path,
                                              export_config)
Пример #2
0
def torchscript_export(context, export_json, model, output_path, quantize,
                       target):
    """Convert a pytext model snapshot to a torchscript model."""
    export_cfg = ExportConfig()
    # only populate from export_json if no export option is configured from the command line.
    if export_json:
        export_json_config = _load_and_validate_export_json_config(export_json)

        read_chunk_size = export_json_config.pop("read_chunk_size", None)
        if read_chunk_size is not None:
            print("Warning: Ignoring read_chunk_size.")

        if export_json_config.get("read_chunk_size", None) is not None:
            print(
                "Error: Do not know what to do with read_chunk_size.  Ignoring."
            )

        if "export" in export_json_config.keys():
            export_cfgs = [export_json_config["export"]]
        else:
            export_cfgs = export_json_config["export_list"]

        if target:
            print(
                "A single export was specified in the command line. Filtering out all other export options"
            )
            export_cfgs = [
                cfg for cfg in export_cfgs if cfg["target"] == target
            ]
            if export_cfgs == []:
                print(
                    "No ExportConfig matches the target name specified in the command line."
                )

        for partial_export_cfg in export_cfgs:
            if not quantize and not output_path:
                export_cfg = config_from_json(ExportConfig, partial_export_cfg)
            else:
                print(
                    "the export-json config is ignored because export options are found the command line"
                )
                export_cfg = config_from_json(
                    ExportConfig,
                    partial_export_cfg,
                    ("export_caffe2_path", "export_onnx_path"),
                )
                export_cfg.torchscript_quantize = quantize
            # if config has export_torchscript_path, use export_torchscript_path from config, otherwise keep the default from CLI
            if export_cfg.export_torchscript_path is not None:
                output_path = export_cfg.export_torchscript_path
            if not model or not output_path:
                config = context.obj.load_config()
                model = model or config.save_snapshot_path
                output_path = output_path or f"{config.save_snapshot_path}.torchscript"

            print(f"Exporting {model} to torchscript file: {output_path}")
            print(export_cfg)
            export_saved_model_to_torchscript(model, output_path, export_cfg)