示例#1
0
    def _pytorch_export(self, test_name, name, model_name, feature,
                        onnx_config_class_constructor):
        from transformers.onnx import export

        tokenizer = AutoTokenizer.from_pretrained(model_name)
        config = AutoConfig.from_pretrained(model_name)

        # Useful for causal lm models that do not use pad tokens.
        if not getattr(config, "pad_token_id", None):
            config.pad_token_id = tokenizer.eos_token_id

        model_class = FeaturesManager.get_model_class_for_feature(feature)
        model = model_class.from_config(config)
        onnx_config = onnx_config_class_constructor(model.config)

        with NamedTemporaryFile("w") as output:
            try:
                onnx_inputs, onnx_outputs = export(
                    tokenizer, model, onnx_config,
                    onnx_config.default_onnx_opset, Path(output.name))
                validate_model_outputs(
                    onnx_config,
                    tokenizer,
                    model,
                    Path(output.name),
                    onnx_outputs,
                    onnx_config.atol_for_validation,
                )
            except (RuntimeError, ValueError) as e:
                self.fail(f"{name}, {feature} -> {e}")
示例#2
0
    def test_pytorch_export_with_past(self):
        from transformers.onnx import export

        for name, model, model_class, config_class, onnx_config_class in PYTORCH_EXPORT_WITH_PAST_MODELS:
            with self.subTest(name):
                self.assertTrue(hasattr(onnx_config_class, "with_past"),
                                "OnnxConfigWithPast should have with_past()")

                tokenizer = AutoTokenizer.from_pretrained(model)
                model = model_class(config_class())
                onnx_config = onnx_config_class.with_past(model.config)

                self.assertTrue(
                    hasattr(onnx_config, "use_past"),
                    "OnnxConfigWithPast should have use_past attribute.")
                self.assertTrue(
                    onnx_config.use_past,
                    "OnnxConfigWithPast.use_past should be if called with with_past()"
                )

                with NamedTemporaryFile("w") as output:
                    output = Path(output.name)
                    onnx_inputs, onnx_outputs = export(tokenizer, model,
                                                       onnx_config,
                                                       DEFAULT_ONNX_OPSET,
                                                       output)

                    try:
                        validate_model_outputs(onnx_config, tokenizer, model,
                                               output, onnx_outputs, 1e-5)
                    except ValueError as ve:
                        self.fail(f"{name} -> {ve}")
示例#3
0
    def _onnx_export(self,
                     test_name,
                     name,
                     model_name,
                     feature,
                     onnx_config_class_constructor,
                     device="cpu"):
        from transformers.onnx import export

        model_class = FeaturesManager.get_model_class_for_feature(feature)
        config = AutoConfig.from_pretrained(model_name)
        model = model_class.from_config(config)

        # Dynamic axes aren't supported for YOLO-like models. This means they cannot be exported to ONNX on CUDA devices.
        # See: https://github.com/ultralytics/yolov5/pull/8378
        if model.__class__.__name__.startswith("Yolos") and device != "cpu":
            return

        onnx_config = onnx_config_class_constructor(model.config)

        if is_torch_available():
            from transformers.utils import torch_version

            if torch_version < onnx_config.torch_onnx_minimum_version:
                pytest.skip(
                    "Skipping due to incompatible PyTorch version. Minimum required is"
                    f" {onnx_config.torch_onnx_minimum_version}, got: {torch_version}"
                )

        preprocessor = get_preprocessor(model_name)

        # Useful for causal lm models that do not use pad tokens.
        if isinstance(preprocessor, PreTrainedTokenizerBase) and not getattr(
                config, "pad_token_id", None):
            config.pad_token_id = preprocessor.eos_token_id

        with NamedTemporaryFile("w") as output:
            try:
                onnx_inputs, onnx_outputs = export(
                    preprocessor,
                    model,
                    onnx_config,
                    onnx_config.default_onnx_opset,
                    Path(output.name),
                    device=device)
                validate_model_outputs(
                    onnx_config,
                    preprocessor,
                    model,
                    Path(output.name),
                    onnx_outputs,
                    onnx_config.atol_for_validation,
                )
            except (RuntimeError, ValueError) as e:
                self.fail(f"{name}, {feature} -> {e}")
    def _onnx_export(self,
                     test_name,
                     name,
                     model_name,
                     feature,
                     onnx_config_class_constructor,
                     device="cpu"):
        from transformers.onnx import export

        model_class = FeaturesManager.get_model_class_for_feature(feature)
        config = AutoConfig.from_pretrained(model_name)
        model = model_class.from_config(config)
        onnx_config = onnx_config_class_constructor(model.config)

        if is_torch_available():
            from transformers.utils import torch_version

            if torch_version < onnx_config.torch_onnx_minimum_version:
                pytest.skip(
                    "Skipping due to incompatible PyTorch version. Minimum required is"
                    f" {onnx_config.torch_onnx_minimum_version}, got: {torch_version}"
                )

        # Check the modality of the inputs and instantiate the appropriate preprocessor
        if model.main_input_name == "input_ids":
            preprocessor = AutoTokenizer.from_pretrained(model_name)
            # Useful for causal lm models that do not use pad tokens.
            if not getattr(config, "pad_token_id", None):
                config.pad_token_id = preprocessor.eos_token_id
        elif model.main_input_name == "pixel_values":
            preprocessor = AutoFeatureExtractor.from_pretrained(model_name)
        else:
            raise ValueError(
                f"Unsupported model input name: {model.main_input_name}")

        with NamedTemporaryFile("w") as output:
            try:
                onnx_inputs, onnx_outputs = export(
                    preprocessor,
                    model,
                    onnx_config,
                    onnx_config.default_onnx_opset,
                    Path(output.name),
                    device=device)
                validate_model_outputs(
                    onnx_config,
                    preprocessor,
                    model,
                    Path(output.name),
                    onnx_outputs,
                    onnx_config.atol_for_validation,
                )
            except (RuntimeError, ValueError) as e:
                self.fail(f"{name}, {feature} -> {e}")
示例#5
0
    def test_pytorch_export_default(self):
        from transformers.onnx import export

        for name, model, model_class, config_class, onnx_config_class in PYTORCH_EXPORT_DEFAULT_MODELS:
            with self.subTest(name):
                self.assertTrue(hasattr(onnx_config_class, "default"))

                tokenizer = AutoTokenizer.from_pretrained(model)
                model = model_class(config_class())
                onnx_config = onnx_config_class.default(model.config)

                with NamedTemporaryFile("w") as output:
                    onnx_inputs, onnx_outputs = export(tokenizer, model,
                                                       onnx_config,
                                                       DEFAULT_ONNX_OPSET,
                                                       Path(output.name))

                    try:
                        validate_model_outputs(onnx_config, tokenizer, model,
                                               Path(output.name), onnx_outputs,
                                               1e-5)
                    except ValueError as ve:
                        self.fail(f"{name} -> {ve}")