Example #1
0
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation.  All rights reserved.
# Licensed under the MIT License.  See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------

import os
import unittest

import pytest
import torch
from parity_utilities import find_transformers_source

from onnxruntime import get_available_providers

if find_transformers_source() and find_transformers_source(["models", "t5"]):
    from benchmark_helper import Precision
    from convert_generation import main as run
    from models.t5.convert_to_onnx import export_onnx_models as export_t5_onnx_models
else:
    from onnxruntime.transformers.benchmark_helper import Precision
    from onnxruntime.transformers.convert_generation import main as run
    from onnxruntime.transformers.models.t5.convert_to_onnx import export_onnx_models as export_t5_onnx_models


class TestBeamSearchGpt(unittest.TestCase):
    """Test BeamSearch for GPT-2 model"""
    def setUp(self):
        self.model_name = "gpt2"
        self.gpt2_onnx_path = os.path.join(".", "onnx_models",
                                           "gpt2_past_fp32_shape.onnx")
Example #2
0
# license information.
# --------------------------------------------------------------------------

# For live logging, use the command: pytest -o log_cli=true --log-cli-level=DEBUG

import shutil
import unittest

import pytest
import torch
from model_loader import get_fusion_test_model, get_test_data_path
from onnx import TensorProto, load_model
from parity_utilities import find_transformers_source
from transformers import is_tf_available

if find_transformers_source():
    from benchmark_helper import ConfigModifier, OptimizerInfo, Precision
    from huggingface_models import MODELS
    from onnx_exporter import export_onnx_model_from_pt, export_onnx_model_from_tf
    from onnx_model import OnnxModel
    from optimizer import optimize_model
else:
    from onnxruntime.transformers.benchmark_helper import ConfigModifier, OptimizerInfo, Precision
    from onnxruntime.transformers.huggingface_models import MODELS
    from onnxruntime.transformers.onnx_exporter import export_onnx_model_from_pt, export_onnx_model_from_tf
    from onnxruntime.transformers.onnx_model import OnnxModel
    from onnxruntime.transformers.optimizer import optimize_model

TEST_MODELS = {
    "bert_keras_0": (
        "models",
Example #3
0
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation.  All rights reserved.
# Licensed under the MIT License.  See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------

import logging
import os
import unittest

import coloredlogs
import pytest
from parity_utilities import find_transformers_source

if find_transformers_source(sub_dir_paths=["models", "gpt2"]):
    from benchmark_gpt2 import main, parse_arguments
else:
    from onnxruntime.transformers.models.gpt2.benchmark_gpt2 import main, parse_arguments


class TestGpt2(unittest.TestCase):
    def setUp(self):
        from onnxruntime import get_available_providers

        self.test_cuda = "CUDAExecutionProvider" in get_available_providers()

    def run_benchmark_gpt2(self, arguments: str):
        args = parse_arguments(arguments.split())
        csv_filename = main(args)
        self.assertIsNotNone(csv_filename)