示例#1
0
    def prepare(cls, model, device=None, **kwargs):
        """
        Load the model and creates a :class:`migraphx.program`
        ready to be used as a backend.

        :param model: ModelProto (returned by `onnx.load`),
            string for a filename or bytes for a serialized model
        :param device: requested device for the computation,
            None means the default one which depends on
            the compilation settings
        :param kwargs: see :class:`onnxruntime.SessionOptions`
        :return: :class:`migraphx.program`
        """
        if isinstance(model, MIGraphXBackendRep):
            return model
        elif isinstance(model, migraphx.program):
            return MIGraphXBackendRep(model, cls._input_names)
        elif isinstance(model, (str, bytes)):
            for k, v in kwargs.items():
                if hasattr(options, k):
                    setattr(options, k, v)
            if device is not None and not cls.supports_device(device):
                raise RuntimeError(
                    "Incompatible device expected '{0}', got '{1}'".format(
                        device, get_device()))
            inf = migraphx.parse_onnx_buffer(model)
            device = cls._device
            cls._input_names = inf.get_parameter_names()
            inf.compile(migraphx.get_target(device.lower()))
            return cls.prepare(inf, device, **kwargs)
        else:
            # type: ModelProto
            check_model(model)
            bin = model.SerializeToString()
            return cls.prepare(bin, device, **kwargs)
def test_neg_int64():
    p = migraphx.parse_onnx("neg_test.onnx")
    print(p)
    print("Compiling ...")
    p.compile(migraphx.get_target("gpu"))
    print(p)
    params = {}

    shapes = p.get_parameter_shapes()
    params["0"] = np.arange(6).reshape(shapes["0"].lens()).astype(np.int64)

    r = p.run(params)
    print(r)
def test_conv_relu():
    p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx")
    print(p)
    print("Compiling ...")
    p.compile(migraphx.get_target("gpu"))
    print(p)
    params = {}

    for key, value in p.get_parameter_shapes().items():
        print("Parameter {} -> {}".format(key, value))
        params[key] = migraphx.generate_argument(value)

    r = p.run(params)
    print(r)
def test_nonzero():
    p = migraphx.parse_onnx("nonzero_dynamic_test.onnx")
    print(p)
    print("Compiling ...")
    p.compile(migraphx.get_target("gpu"))
    print(p)
    params = {}

    shapes = p.get_parameter_shapes()
    params["data"] = np.array([1, 1, 0, 1]).reshape(
        shapes["data"].lens()).astype(np.bool)

    r = p.run(params)
    print(r)
def test_sub_uint64():
    p = migraphx.parse_onnx("implicit_sub_bcast_test.onnx")
    print(p)
    print("Compiling ...")
    p.compile(migraphx.get_target("gpu"))
    print(p)
    params = {}

    shapes = p.get_parameter_shapes()
    params["0"] = np.arange(120).reshape(shapes["0"].lens()).astype(np.uint64)
    params["1"] = np.arange(20).reshape(shapes["1"].lens()).astype(np.uint64)

    r = p.run(params)
    print(r)
示例#6
0
def test_output():
    p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx")
    p.compile(migraphx.get_target("gpu"))

    r1 = run(p)[-1]
    r2 = run(p)[-1]
    assert_eq(r1, r2)
    assert_eq(r1.tolist(), r2.tolist())

    check_argument(r1)
    check_argument(r2)

    m1 = memoryview(r1)
    m2 = memoryview(r2)

    check_shapes(r1, m1)
    check_shapes(r2, m2)
def test_fp16_imagescaler():
    p = migraphx.parse_onnx("imagescaler_half_test.onnx")
    print(p)
    s1 = p.get_output_shapes()[-1]
    print("Compiling ...")
    p.compile(migraphx.get_target("gpu"))
    print(p)
    s2 = p.get_output_shapes()[-1]
    assert s1 == s2

    params = {}
    shapes = p.get_parameter_shapes()
    params["0"] = np.random.randn(768).reshape(shapes["0"].lens()).astype(
        np.float16)

    r = p.run(params)[-1]
    print(r)
def test_if_pl():
    p = migraphx.parse_onnx("if_pl_test.onnx")
    print(p)
    s1 = p.get_output_shapes()[-1]
    print("Compiling ...")
    p.compile(migraphx.get_target("gpu"))
    print(p)
    s2 = p.get_output_shapes()[-1]
    assert s1 == s2

    params = {}
    shapes = p.get_parameter_shapes()
    params["x"] = np.ones(6).reshape(shapes["x"].lens()).astype(np.float32)
    params["y"] = np.array([2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0
                            ]).reshape(shapes["y"].lens()).astype(np.float32)
    params["cond"] = np.array([1]).reshape(()).astype(np.bool)

    r = p.run(params)[-1]
    print(r)
示例#9
0
def test_add_scalar():
    p = migraphx.parse_onnx("add_scalar_test.onnx")
    print(p)
    s1 = p.get_output_shapes()[-1]
    print("Compiling ...")
    p.compile(migraphx.get_target("cpu"))
    print(p)
    s2 = p.get_output_shapes()[-1]
    assert s1 == s2

    d0 = list(range(120))
    arg0 = create_buffer("B", d0, [2, 3, 4, 5])
    d1 = [1]
    arg1 = create_buffer("B", d1, ())

    params = {}
    params["0"] = migraphx.argument(arg0)
    params["1"] = migraphx.argument(arg1)

    r = p.run(params)[-1]
    print(r)
示例#10
0
import sys
import os
import numpy as np
from PIL import Image
from torchvision import models, transforms
from torch.autograd import Variable

if len(sys.argv) == 3:
    onnxfile = sys.argv[1]
    imagedir = sys.argv[2]
else:
    print('Usage: python classify-image.py <ONNX file> <image file>')
    sys.exit(1)

model = migraphx.parse_onnx(onnxfile)
model.compile(migraphx.get_target("gpu"))

normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.224])

# allocate space
params = {}
for key, value in model.get_parameter_shapes().items():
    params[key] = migraphx.allocate_gpu(value)
    if key == '0':
        if value.lens() == [1L, 3L, 224L, 224L]:
            format = 'imagenet224'
            preprocess = transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(), normalize
示例#11
0
import migraphx

p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx")
print(p)
print("Compiling ...")
p.compile(migraphx.get_target("gpu"), offload_copy=False)
print(p)
params = {}

for key, value in p.get_parameter_shapes().items():
    print("Parameter {} -> {}".format(key, value))
    params[key] = migraphx.to_gpu(migraphx.generate_argument(value))

r = migraphx.from_gpu(p.run(params))
print(r)
def main():
    args = parse_args()
    test_loc = args.test_dir
    target = args.target

    test_name = os.path.basename(os.path.normpath(test_loc))

    print("Running test \"{}\" on target \"{}\" ...\n".format(
        test_name, target))

    # get model full path
    model_name = get_model_name(test_loc)
    model_path_name = test_loc + '/' + model_name

    # get param names
    param_names = model_parameter_names(model_path_name)

    # get output names
    output_names = model_output_names(model_path_name)

    # get test cases
    cases = get_test_cases(test_loc)
    sample_case = test_loc + '/' + cases[0]
    param_shapes = get_input_shapes(sample_case, param_names)
    for name, dims in param_shapes.items():
        print("Input: {}, shape: {}".format(name, dims))
    print()

    # read and compile model
    model = migraphx.parse_onnx(model_path_name, map_input_dims=param_shapes)
    model.compile(migraphx.get_target(target))

    # get test cases
    case_num = len(cases)
    correct_num = 0
    for case_name in cases:
        io_folder = test_loc + '/' + case_name
        input_data = wrapup_inputs(io_folder, param_names)
        gold_outputs = read_outputs(io_folder, output_names)

        # if input shape is different from model shape, reload and recompile
        # model
        input_shapes = tune_input_shape(model, input_data)
        if not len(input_shapes) == 0:
            model = migraphx.parse_onnx(model_path_name,
                                        map_input_dims=input_shapes)
            model.compile(migraphx.get_target(target))

        # run the model and return outputs
        output_data = run_one_case(model, input_data)

        # check output correctness
        ret = check_correctness(gold_outputs, output_data)
        if ret:
            correct_num += 1

        output_str = "PASSED" if ret else "FAILED"
        print("\tCase {}: {}".format(case_name, output_str))

    print("\nTest \"{}\" has {} cases:".format(test_name, case_num))
    print("\t Passed: {}".format(correct_num))
    print("\t Failed: {}".format(case_num - correct_num))
    if case_num > correct_num:
        error_num = case_num - correct_num
        raise ValueError(str(error_num) + " cases failed!")
import migraphx

p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx")
print(p)
s1 = p.get_shape()
print("Compiling ...")
p.compile(migraphx.get_target("cpu"))
print(p)
s2 = p.get_shape()
assert s1 == s2
params = {}
for key, value in p.get_parameter_shapes().items():
    print("Parameter {} -> {}".format(key, value))
    params[key] = migraphx.generate_argument(value)

r = p.run(params)
print(r)
示例#14
0
def infer_gpu(model, device, data_type, input_size, output_size, batch_size,
              args):
    data = torch.randn(batch_size, input_size, device="cuda")

    if data_type == "float16":
        data = data.half()
        model_final = model.half()
        if args.use_trt:
            print("Creating TRT model")
            from torch_tensorrt.fx.lower import (
                lower_to_trt, )
            from torch_tensorrt.fx.utils import LowerPrecision
            model_final = lower_to_trt(
                model_final,
                [data],
                max_batch_size=batch_size,
                explicit_batch_dimension=False,
                max_workspace_size=4 << 30,
                lower_precision=LowerPrecision.FP16,
            )
    else:
        model_final = model

    if args.use_migraphx:
        torch.onnx.export(
            model_final,
            torch.randn(batch_size,
                        input_size,
                        device="cuda",
                        dtype=torch.float16
                        if data_type == "float16" else torch.float32),
            "benchmark.onnx",
            input_names=["input"],
            output_names=["output"],
        )
        import migraphx
        migraphx_program = migraphx.parse_onnx("benchmark.onnx")
        migraphx_program.compile(migraphx.get_target("gpu"),
                                 offload_copy=False)

    torch.cuda.synchronize()
    start_event = torch.cuda.Event(enable_timing=True)
    end_event = torch.cuda.Event(enable_timing=True)
    total_time = 0.0

    for i in range(args.steps + args.warmups):
        data = torch.randn(batch_size, input_size, device="cuda")

        if data_type == "float16":
            data = data.half()

        if args.use_migraphx:
            params = {}
            for key, value in migraphx_program.get_parameter_shapes().items():
                params[key] = migraphx.to_gpu(
                    migraphx.generate_argument(value))

        if i >= args.warmups:
            start_event.record()

        if args.use_migraphx:
            migraphx_program.run(params)
        else:
            model_final(data)

        if i >= args.warmups:
            if args.use_migraphx:
                torch.cuda.synchronize()
            end_event.record()
            torch.cuda.synchronize()
            total_time += start_event.elapsed_time(end_event) * 1.0e-3

    return (total_time)