コード例 #1
0
def test_forward_inception_v1():
    '''test inception V1 model'''
    with tf.Graph().as_default():
        graph_def = tf_testing.get_workload("InceptionV1/classify_image_graph_def-with_shapes.pb")
        # Call the utility to import the graph definition into default graph.
        graph_def = tf_testing.ProcessGraphDefParam(graph_def)

        # Build an image from random data.
        from PIL import Image
        from tvm.contrib import util

        img_array = np.random.uniform(size=(1, 600, 600, 3)).astype("uint8")
        img = Image.frombuffer('RGB', (600, 600), img_array.tostring(), 'raw', 'RGB', 0, 1)
        temp = util.tempdir()
        img_path = temp.relpath("tf-test.jpg")
        img.save(img_path);

        import os.path
        if not tf.gfile.Exists(os.path.join(img_path)):
            tf.logging.fatal('File does not exist %s', image)
        data = tf.gfile.FastGFile(os.path.join(img_path), 'rb').read()

        temp.remove()

        # Extract tensorflow decoded image frame for tvm input
        with tf.Session() as sess:
            tvm_data = run_tf_graph(sess, data, 'DecodeJpeg/contents:0', 'DecodeJpeg:0')

        with tf.Session() as sess:
            tf_output = run_tf_graph(sess, data, 'DecodeJpeg/contents:0', 'softmax:0')
            tvm_output = run_tvm_graph(graph_def, tvm_data, 'DecodeJpeg/contents')
            tvm.testing.assert_allclose(tf_output[0], tvm_output[0], rtol=1e-5, atol=1e-5)
コード例 #2
0
def from_tensorflow(model_path, shapes, outputs=None, opt_model_path=None):
    # type: (str, dict, List[str, ]str) -> relay.expr.Module, dict
    """ Load tensorflow model from file and convert to relay expression
    """
    try:
        import tensorflow as tf
    except ImportError as e:
        raise ImportError("Please install tensorflow before trying to import"
                          " tensorflow models.")

    if outputs is None or len(outputs) < 1:
        raise ValueError("Please provide the output names for the provided"
                         " Tensorflow model")

    with tf.gfile.GFile(model_path, 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        graph = tf.import_graph_def(graph_def, name='')
        # Call the utility to import the graph definition into default graph.
        graph_def = tf_testing.ProcessGraphDefParam(graph_def)

    mod, params = relay.frontend.from_tensorflow(graph_def,
                                                 layout=None,
                                                 shape=shapes,
                                                 outputs=outputs)

    # data_layout = 'NHWC'
    # TODO: Get the right layout??
    return mod, params
コード例 #3
0
def create_graph():
    """Creates a graph from saved GraphDef file and returns a saver."""
    # Creates graph from saved graph_def.pb.
    with tf.gfile.FastGFile(model_path, 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        graph = tf.import_graph_def(graph_def, name='')
        # Call the utility to import the graph definition into default graph.
        graph_def = tf_testing.ProcessGraphDefParam(graph_def)
コード例 #4
0
def from_tensorflow(model_path, img_shape):
    import tensorflow as tf
    import tvm.relay.testing.tf as tf_testing

    def analyze_tf_inputs_outputs(graph):
        ops = graph.get_operations()
        outputs_set = set(ops)
        inputs = []
        for op in ops:
            if len(op.inputs) == 0 and op.type != 'Const':
                inputs.append(op)
            else:
                for input_tensor in op.inputs:
                    if input_tensor.op in outputs_set:
                        outputs_set.remove(input_tensor.op)
        outputs = [op.name for op in outputs_set]
        outputs.sort()
        inputs = [op.name for op in inputs]
        return inputs, outputs

    shape_dict = {}
    with tf.compat.v1.gfile.GFile(model_path, 'rb') as f:
        graph_def = tf.compat.v1.GraphDef()
        graph_def.ParseFromString(f.read())
        graph = tf.import_graph_def(graph_def, name='')
        graph_inputs, graph_outputs = analyze_tf_inputs_outputs(tf.get_default_graph())
        for node in graph_def.node:
            if node.name in graph_inputs:
                get_shape = ()
                input_lenth = len(node.attr['shape'].shape.dim)
                if input_lenth:
                    for i in range(input_lenth):
                        dim_size = node.attr['shape'].shape.dim[i].size
                        dim_size = dim_size if dim_size != -1 else None
                        get_shape += (dim_size,)
                if len(get_shape) == 0 or not all(get_shape[1:]):
                    input_shape = (1,) + tuple(img_shape) + (3,)
                elif not all(get_shape) and all(get_shape[1:]):
                    input_shape = (1,) + get_shape[1:]
                else:
                    input_shape = get_shape
                shape_dict[node.name] = input_shape
        # Call the utility to import the graph definition into default graph.
        graph_def = tf_testing.ProcessGraphDefParam(graph_def)
        # Add shapes to the graph.
        with tf.compat.v1.Session() as sess:
            graph_def = tf_testing.AddShapesToGraphDef(sess, graph_outputs)

    mod, params = relay.frontend.from_tensorflow(graph_def, layout=None, shape=shape_dict)
    Model = namedtuple('Model', ['mod', 'params', 'graph_inputs'])
    model = Model(mod, params, graph_inputs)
    return model
コード例 #5
0
def test_forward_inception_v3():
    '''test inception V3 model'''
    with tf.Graph().as_default():
        graph_def = tf_testing.get_workload('InceptionV3/inception_v3_2016_08_28_frozen-with_shapes.pb')
        # Call the utility to import the graph definition into default graph.
        graph_def = tf_testing.ProcessGraphDefParam(graph_def)

        data = np.random.uniform(size=(1, 299, 299, 3)).astype('float32')

        with tf.Session() as sess:
            tf_output = run_tf_graph(sess, data, 'input:0', 'InceptionV3/Predictions/Reshape_1:0')
            tvm_output = run_tvm_graph(graph_def, data, 'input')
            tvm.testing.assert_allclose(tf_output[0], tvm_output[0], rtol=1e-5, atol=1e-5)
コード例 #6
0
ファイル: frontends.py プロジェクト: yhcvb/incubator-tvm
    def load(self, path, shape_dict=None, **kwargs):
        # pylint: disable=C0415
        import tensorflow as tf
        import tvm.relay.testing.tf as tf_testing

        with tf.io.gfile.GFile(path, "rb") as tf_graph:
            content = tf_graph.read()

        graph_def = tf.compat.v1.GraphDef()
        graph_def.ParseFromString(content)
        graph_def = tf_testing.ProcessGraphDefParam(graph_def)

        logger.debug("parse TensorFlow model and convert into Relay computation graph")
        return relay.frontend.from_tensorflow(graph_def, shape=shape_dict, **kwargs)
コード例 #7
0
    def load(self, path):
        # pylint: disable=C0415
        import tensorflow as tf
        import tvm.relay.testing.tf as tf_testing

        with tf.io.gfile.GFile(path, "rb") as tf_graph:
            content = tf_graph.read()

        graph_def = tf.compat.v1.GraphDef()
        graph_def.ParseFromString(content)
        graph_def = tf_testing.ProcessGraphDefParam(graph_def)

        logging.debug("relay.frontend.from_tensorflow")
        return relay.frontend.from_tensorflow(graph_def)
コード例 #8
0
def importTFModel(model_path, output_name):
    with tf.compat.v1.gfile.GFile(model_path, 'rb') as f:
        graph_def = tf.compat.v1.GraphDef()
        graph_def.ParseFromString(f.read())
        graph = tf.import_graph_def(graph_def, name='')

        # Call the utility to import the graph definition into default graph.
        graph_def = tf_testing.ProcessGraphDefParam(graph_def)

        # Add shapes to the graph.
        with tf.compat.v1.Session() as sess:
            graph_def = tf_testing.AddShapesToGraphDef(sess, output_name)

            return graph_def
コード例 #9
0
def test_forward_resnetv2():
    '''test resnet model'''
    if is_gpu_available():
        with tf.Graph().as_default():
            graph_def = tf_testing.get_workload("ResnetV2/resnet-20180601_resnet_v2_imagenet-shapes.pb")
            # Call the utility to import the graph definition into default graph.
            graph_def = tf_testing.ProcessGraphDefParam(graph_def)

            data = np.random.uniform(size=(128, 224, 224, 3)).astype('float32')
            out_node = 'ArgMax'

            with tf.Session() as sess:
                tf_output = run_tf_graph(sess, data, 'input_tensor:0', out_node + ':0')
                tvm_output = run_tvm_graph(graph_def, data, 'input_tensor', tf_output.shape, 'float32')
                tvm.testing.assert_allclose(np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5)
コード例 #10
0
ファイル: backend_tvm.py プロジェクト: prime91/inference
    def load(self, model_path, inputs=None, outputs=None):
        # there is no input/output meta data i the graph so it need to come from config.
        if not inputs:
            raise ValueError("TVM needs inputs")
        if not outputs:
            raise ValueError("TVM needs outputs")
        self.outputs = outputs
        self.inputs = inputs

        # Import model
        # ------------
        # Creates tensorflow graph definition from protobuf file.

        with tf_compat_v1.gfile.GFile(model_path, "rb") as f:
            graph_def = tf_compat_v1.GraphDef()
            graph_def.ParseFromString(f.read())
            graph = tf.import_graph_def(graph_def, name="")
            # Call the utility to import the graph definition into default graph.
            graph_def = tf_testing.ProcessGraphDefParam(graph_def)
            # Add shapes to the graph.
            with tf_compat_v1.Session() as sess:
                graph_def = tf_testing.AddShapesToGraphDef(sess, "softmax")

        # Import the graph to Relay
        # -------------------------
        # Import tensorflow graph definition to relay frontend.
        mod, params = relay.frontend.from_tensorflow(graph_def,
                                                     layout=self.layout,
                                                     shape=None,
                                                     outputs=None)

        # Relay Build
        # -----------
        # Compile the graph to llvm target with given input specification.
        with tvm.transform.PassContext(opt_level=3):
            lib = relay.build(mod,
                              target=self.target,
                              target_host=self.target_host,
                              params=params)

        self.m = graph_runtime.GraphModule(lib["default"](self.ctx))

        return self
コード例 #11
0
def tf_tvm_lstm(opt_level):
    with tf.Session() as sess:
        inputs = model_inputs()
        model = get_tf_model()
        sess.run(tf.global_variables_initializer())

        # saver = tf.compat.v1.train.Saver()
        # saver.save(sess, model_path + "/tf_model")
        # print([n.name for n in tf.get_default_graph().as_graph_def().node])
        from util import export_pb
        export_pb(sess,
                  model_path + "/tf_model.pb",
                  inputs=['Placeholder', 'Placeholder_1'],
                  outputs=["rnn/transpose_1"])
        # _ = model.predict(inputs)       # way to set input shape or it cannot be saved
        # model.save(model_path + "/tf_mode.pb")
        # tf.saved_model.save(model, model_path)

        print("tvm compiling ...")
        tic = time.time()
        import tvm.relay.testing.tf as tf_testing
        with tf.gfile.GFile(model_path + "/tf_model.pb", 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            graph = tf.import_graph_def(graph_def, name='')
            # Call the utility to import the graph definition into default graph.
            graph_def = tf_testing.ProcessGraphDefParam(graph_def)
            # Add shapes to the graph.
            # with tf.Session() as sess:
            #     graph_def = tf_testing.AddShapesToGraphDef(sess, 'softmax')

        # mod, params = relay.frontend.from_keras(model, shape=data_shape)
        mod, params = relay.frontend.from_tensorflow(
            graph_def,
            shape={
                "Placeholder": data_shape,
                "Placeholder_1": [6, 2, data_shape[0], 2]
            })
        with relay.build_config(opt_level=3):
            graph, lib, params = relay.build(mod,
                                             target="llvm -mcpu=core-avx2")
        tic = time.time() - tic
        print("tvm compiling completed. spent %f seconds" % tic)
コード例 #12
0
def test_forward_mobilenet():
    '''test mobilenet model'''
    # MobilenetV2
    with tf.Graph().as_default():
        graph_def = tf_testing.get_workload(
            "https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz",
            "mobilenet_v2_1.4_224_frozen.pb")
        # Call the utility to import the graph definition into default graph.
        graph_def = tf_testing.ProcessGraphDefParam(graph_def)

        data = np.random.uniform(size=(1, 224, 224, 3)).astype('float32')
        out_node = 'MobilenetV2/Predictions/Reshape_1'

        with tf.Session() as sess:
            # Add shapes to the graph.
            graph_def = tf_testing.AddShapesToGraphDef(sess, out_node)
            tf_output = run_tf_graph(sess, data, 'input:0', out_node + ':0')
            tvm_output = run_tvm_graph(graph_def, data, 'input')
            tvm.testing.assert_allclose(np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5)
コード例 #13
0
def get_tf_model_InceptionV1(model_path):

    with tf_compat_v1.gfile.GFile(model_path, 'rb') as f:
        graph_def = tf_compat_v1.GraphDef()
        graph_def.ParseFromString(f.read())
        graph = tf.import_graph_def(graph_def, name='')
        # Call the utility to import the graph definition into default graph.
        graph_def = tf_testing.ProcessGraphDefParam(graph_def)
        # Add shapes to the graph.
        with tf_compat_v1.Session() as sess:
            graph_def = tf_testing.AddShapesToGraphDef(sess, 'softmax')

    x = (299, 299, 3)
    layout = None
    shape_dict = {'DecodeJpeg/contents': x}
    dtype_dict = {'DecodeJpeg/contents': 'uint8'}
    mod, params = relay.frontend.from_tensorflow(
        graph_def, layout="NCHW", shape={'DecodeJpeg/contents': (299, 299, 3)})

    return mod, params, x
コード例 #14
0
def export_tvm(weight_file='examples/brokenegg.npz', model_path=None):
    import tvm.relay.testing.tf as tf_testing
    from tvm import relay
    import tvm

    from brokenegg_transformer.runtime import transformer

    graph, [inputs, targets], _ = transformer.load_model(weight_file,
                                                         as_graph=True)
    with tf.Session(graph=graph) as sess:
        graph_def = tf.graph_util.convert_variables_to_constants(
            sess, graph.as_graph_def(), ["logits"])
    if False:

        graph_def = tf_testing.ProcessGraphDefParam(graph_def)
        with tf.Session(graph=graph) as sess:
            graph_def = tf_testing.AddShapesToGraphDef(sess, 'logits')

    layout = None
    target = 'llvm'
    target_host = 'llvm'

    shape_dict = {'inputs': [None, None], 'targets': [None, None]}
    dtype_dict = {
        'inputs': "int64",
        'targets': "int64",
    }
    with tf.Session(graph=graph) as sess:
        mod, params = relay.frontend.from_tensorflow(graph_def,
                                                     layout=layout,
                                                     shape=shape_dict,
                                                     outputs=["logits"])

    with tvm.transform.PassContext(opt_level=3):
        graph, lib, params = relay.build(mod,
                                         target=target,
                                         target_host=target_host,
                                         params=params)
コード例 #15
0
def main():
    # target setttings
    target = 'llvm'
    target_host = 'llvm'
    layout = None
    ctx = tvm.cpu(0)

    model_path = argv[1]
    graph_def = tf.GraphDef()

    with tf.gfile.FastGFile(model_path, 'rb') as f:
        graph_def.ParseFromString(f.read())
        graph = tf.import_graph_def(graph_def, name='')
        graph_def = tf_testing.ProcessGraphDefParam(graph_def)

    shape_dict = {}
    
    mod, params = relay.frontend.from_tensorflow(graph_def, layout=layout, shape=shape_dict)

    print("Tensorflow protobuf imported to relay frontend.")

    with relay.build_config(opt_level=3):
            graph, lib, params = relay.build(mod, target=target, target_host=target_host, params=params)
コード例 #16
0
def get_tf_yolov3_tiny(
        model_path=("/hdd02/zhangyiyang/Tensorflow-YOLOv3/"
                    "weights/raw-yolov3-tiny.pb"),
        outputs=['yolov3_tiny/concat_6'],):
    input_shape = (1, 416, 416, 3)

    with tf.compat.v1.gfile.GFile(model_path, 'rb') as f:
        graph_def = tf.compat.v1.GraphDef()
        graph_def.ParseFromString(f.read())
        tf.import_graph_def(graph_def, name='')
        graph_def = tf_testing.ProcessGraphDefParam(graph_def)
        with tf.compat.v1.Session() as sess:
            graph_def = tf_testing.AddShapesToGraphDef(
                sess, outputs[0])
        print("successfully load tf model")

    mod, params = relay.frontend.from_tensorflow(
        graph_def,
        layout="NCHW",
        shape={'Placeholder': input_shape},
        outputs=outputs,
    )
    print("successfully convert tf model to relay")
    return mod, params, input_shape
コード例 #17
0
ファイル: TVMTimer.py プロジェクト: sachacon/F20_EE464H_H1
def run_timing(device, platform, model, remote=None, autotvm_log=None, batch=1, runs=3, reps=5, log=None):
    """
    Run a time trail on TVM

    :param device: The device to run this on
    :param platform: The platform get the machine learning model on
    :param model: The machine learning model to use
    :param remote: Details about the remote device
    :param autotvm_log: The path to the auto TVM file
    :param batch: The number of pictures to run in one go
    :param runs: The number of runs to run the picture through
    :param reps: The number of times the measurement should be repeated
    :param log: The output file
    """

    # Output details of run
    from cpuinfo import get_cpu_info
    from datetime import datetime

    print("\n──────────────────────────── TVMUI ────────────────────────────\n")
    log.write("TVM Time Trial\n")
    log_print(log, "Started on " + str(datetime.now().strftime("%m/%d/%Y at %H:%M:%S")))
    if remote is None:
        log_print(log, 'Hardware: ' + device)
        if device == 'x86':
            log_print(log, 'CPU Type: ' + get_cpu_info().get('brand_raw'))
    else:
        log_print(log, 'Remote Name: ' + remote["name"])
        log_print(log, 'Remote Device: ' + remote["type"])
        log_print(log, 'Remote Hardware: ' + remote["hardware"])
    log_print(log, 'Backend: ' + platform)
    log_print(log, 'Model: ' + model)
    log_print(log, str(batch) + " picture(s) per run")
    log_print(log, str(runs) + " run average, repeated " + str(reps) + " times.")
    if autotvm_log is None:
        log_print(log, 'AutoTVM: No\n')
    else:
        log_print(log, 'AutoTVM: Yes\n')

    # Get the model and image data
    import numpy as np
    from PIL import Image
    from tvm import relay
    import tvm
    from tvm.contrib.download import download_testdata

    print("Loading models and images...")

    pictures = get_pics(batch)
    dataset = []

    if platform == "MXNet":
        from mxnet.gluon.model_zoo.vision import get_model

        block = get_model(model, pretrained=True)

        synset_url = "".join(
            [
                "https://gist.githubusercontent.com/zhreshold/",
                "4d0b62f3d01426887599d4f7ede23ee5/raw/",
                "596b27d23537e5a1b5751d2b0481ef172f58b539/",
                "imagenet1000_clsid_to_human.txt",
            ]
        )
        synset_name = "imagenet1000_clsid_to_human.txt"
        synset_path = download_testdata(synset_url, synset_name, module="data")
        with open(synset_path) as f:
            synset = eval(f.read())

        def transform_image(image):
            image = np.array(image) - np.array([123.0, 117.0, 104.0])
            image /= np.array([58.395, 57.12, 57.375])
            image = image.transpose((2, 0, 1))
            image = image[np.newaxis, :]
            return image

        if model == 'resnet18_v1' or model == 'mobilenetv2_1.0':
            for img in pictures:
                dataset.append(transform_image(Image.open(img).resize((224, 224))))
            input_shape = [batch, 3, 224, 224]

        elif model == 'inceptionv3':
            for img in pictures:
                dataset.append(transform_image(Image.open(img).resize((299, 299))))
            input_shape = [batch, 3, 299, 299]
        else:
            raise Exception("Invalid Model")

        shape_dict = {"data": input_shape}

        mod, params = relay.frontend.from_mxnet(block, shape_dict)
        func = mod["main"]
        func = relay.Function(func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs)

    elif platform == "PyTorch":
        import torch
        import torchvision

        model = getattr(torchvision.models, model)(pretrained=True)
        model = model.eval()

        # We grab the TorchScripted model via tracing
        input_shape = [batch, 3, 224, 224]
        input_data = torch.randn(input_shape)
        scripted_model = torch.jit.trace(model, input_data).eval()

        synset_url = "".join(
            [
                "https://raw.githubusercontent.com/Cadene/",
                "pretrained-models.pytorch/master/data/",
                "imagenet_synsets.txt",
            ]
        )
        synset_name = "imagenet_synsets.txt"
        synset_path = download_testdata(synset_url, synset_name, module="data")
        with open(synset_path) as f:
            synsets = f.readlines()
        synsets = [x.strip() for x in synsets]
        splits = [line.split(" ") for line in synsets]
        key_to_classname = {spl[0]: " ".join(spl[1:]) for spl in splits}

        class_url = "".join(
            [
                "https://raw.githubusercontent.com/Cadene/",
                "pretrained-models.pytorch/master/data/",
                "imagenet_classes.txt",
            ]
        )
        class_name = "imagenet_classes.txt"
        class_path = download_testdata(class_url, class_name, module="data")
        with open(class_path) as f:
            class_id_to_key = f.readlines()
        class_id_to_key = [x.strip() for x in class_id_to_key]

        def transform_image(image):
            from torchvision import transforms

            my_preprocess = transforms.Compose(
                [
                    transforms.Resize(256),
                    transforms.CenterCrop(224),
                    transforms.ToTensor(),
                    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
                ]
            )
            img = my_preprocess(image)
            return np.expand_dims(img, 0)

        for img in pictures:
            dataset.append(transform_image(Image.open(img).resize((224, 224))))

        input_name = "data"
        shape_list = [(input_name, input_shape)]
        func, params = relay.frontend.from_pytorch(scripted_model, shape_list)
    elif platform == "TensorFlow":
        import tensorflow as tf
        import os

        try:
            tf_compat_v1 = tf.compat.v1
        except ImportError:
            tf_compat_v1 = tf
        import tvm.relay.testing.tf as tf_testing

        # Base location for model related files.
        repo_base = "https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/"
        model_name = "classify_image_graph_def-with_shapes.pb"
        model_url = os.path.join(repo_base, model_name)

        # Image label map
        map_proto = "imagenet_2012_challenge_label_map_proto.pbtxt"
        map_proto_url = os.path.join(repo_base, map_proto)

        # Human readable text for labels
        label_map = "imagenet_synset_to_human_label_map.txt"
        label_map_url = os.path.join(repo_base, label_map)

        model_path = download_testdata(model_url, model_name, module=["tf", "InceptionV1"])
        map_proto_path = download_testdata(map_proto_url, map_proto, module="data")
        label_path = download_testdata(label_map_url, label_map, module="data")

        with tf_compat_v1.gfile.GFile(model_path, "rb") as f:
            graph_def = tf_compat_v1.GraphDef()
            graph_def.ParseFromString(f.read())
            graph = tf.import_graph_def(graph_def, name="")
            # Call the utility to import the graph definition into default graph.
            graph_def = tf_testing.ProcessGraphDefParam(graph_def)
            # Add shapes to the graph.
            with tf_compat_v1.Session() as sess:
                graph_def = tf_testing.AddShapesToGraphDef(sess, "softmax")
        for img in pictures:
            dataset.append(np.array(Image.open(img).resize((299, 299))))
        shape_dict = {"data": [batch, 3, 299, 299]}
        dtype_dict = {"DecodeJpeg/contents": "uint8"}
        mod, params = relay.frontend.from_tensorflow(graph_def, layout=None, shape=shape_dict)
    else:
        raise Exception('Not Supported!')

    # Build the graph
    if device == 'x86':
        target = "llvm"
        ctx = tvm.cpu(0)
        log_print(log, 'Target: ' + target)
    elif device == 'Metal':
        target = "metal"
        ctx = tvm.metal(0)
        log_print(log, 'Target: ' + target)
    elif device == 'arm_cpu':
        target = tvm.target.arm_cpu(remote["type"])
        ctx = tvm.cpu(0)
        log_print(log, 'Target: ' + remote["type"])
    else:
        target = device
        ctx = tvm.cpu(0)
        log_print(log, 'Target: ' + device)
    log_print(log, 'Actual Model: ' + model + '\n')
    print('Making the graph...')
    if autotvm_log is not None:
        from tvm import autotvm
        log_print(log, 'Using AutoTVM file ' + autotvm_log)
        with autotvm.apply_graph_best(autotvm_log):
            with tvm.transform.PassContext(opt_level=3):
                lib = relay.build(func, target, params=params)
    else:
        with tvm.transform.PassContext(opt_level=3):
            lib = relay.build(func, target, params=params)

    print("\nSetting up TVM...")
    from tvm.contrib import graph_runtime

    # Remote upload
    if remote is not None:
        from tvm import rpc
        from tvm.contrib import utils, graph_runtime as runtime
        print("Exporting graph...")
        tmp = utils.tempdir()
        lib_fname = tmp.relpath("net.tar")
        lib.export_library(lib_fname)
        print("Connecting to device...")
        remote = rpc.connect(str(remote["ip"]), int(remote["port"]))
        print("Uploading to device...")
        remote.upload(lib_fname)
        lib = remote.load_module("net.tar")
        if device == 'x86':
            ctx = remote.cpu(0)
        elif device == 'Metal':
            ctx = remote.metal(0)
        elif device == 'arm_cpu':
            ctx = remote.cpu(0)
        else:
            ctx = remote.cpu(0)
    dtype = "float32"
    m = graph_runtime.GraphModule(lib["default"](ctx))

    def run_tvm(pics, number, repeat):
        """
        Runs a single inference and gives back the time

        :param pics: The images(s) to run
        :param number: The number of times to run the inference
        :param repeat:  The number of times to repeat the measurement
        :return: An array with the time and the result
        """

        # combine pictures
        arr = np.ndarray(shape=input_shape, dtype=dtype)
        p = 0
        for ip in pics:
            arr[p] = ip.astype(dtype)
            p = p + 1
        m.set_input("data", tvm.nd.array(arr))

        #Actually run inference
        time = m.module.time_evaluator("run", ctx, number=number, repeat=repeat)()

        #Get output
        res = []
        if platform == 'MXNet':
            for i in range(len(pics)):
                res.append(synset[np.argmax(m.get_output(0).asnumpy()[i])])
        if platform == 'PyTorch':
            # Get top-1 result for TVM
            for i in range(len(pics)):
                top1_tvm = np.argmax(m.get_output(0).asnumpy()[i])
                tvm_class_key = class_id_to_key[top1_tvm]
                res.append(key_to_classname[tvm_class_key])
        if platform == 'TensorFlow':
            pre = np.squeeze(m.get_output(0, tvm.nd.empty(((1, 1008)), "float32")).asnumpy())
            node_lookup = tf_testing.NodeLookup(label_lookup_path=map_proto_path, uid_lookup_path=label_path)
            top_k = pre.argsort()[-5:][::-1]
            res = node_lookup.id_to_string(top_k[0])
        return [time, res]

    # Run the inferences
    output = []
    total = 0

    print("\nRunning inferences...")
    for i in range(int(len(dataset) / batch)):
        log_print(log, "\nSet " + str(i + 1) + ":")
        inp = []
        # Create the next batch
        for j in range(batch):
            inp.append(dataset[batch * i + j])
        # Run inference here
        output = run_tvm(inp, runs, reps)
        # Output results
        e = 0
        for rl in output[1]:
            log_print(log, "Image " + str(e + 1) + " Path: " + pictures[batch * i + e])
            log_print(log, "Image " + str(e + 1) + " ID: " + rl)
            e = e + 1
        log_print(log, "Time taken: " + str('%.2f' % (1000 * output[0].mean)) + " ms")
        total = total + output[0].mean
    ave = total / int(len(dataset) / batch)
    log_print(log, '\nAVERAGE TIME: ' + str(ave * 1000) + " ms")
    log_print(log, "Finished on " + str(datetime.now().strftime("%m/%d/%Y at %H:%M:%S")))
    log.close()
    return
コード例 #18
0
                               model_name,
                               module=['tf', 'InceptionV1'])
map_proto_path = download_testdata(map_proto_url, map_proto, module='data')
label_path = download_testdata(label_map_url, label_map, module='data')

######################################################################
# Import model
# ------------
# Creates tensorflow graph definition from protobuf file.

with tf.gfile.FastGFile(model_path, 'rb') as f:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(f.read())
    graph = tf.import_graph_def(graph_def, name='')
    # Call the utility to import the graph definition into default graph.
    graph_def = tf_testing.ProcessGraphDefParam(graph_def)
    # Add shapes to the graph.
    with tf.Session() as sess:
        graph_def = tf_testing.AddShapesToGraphDef(sess, 'softmax')

######################################################################
# Decode image
# ------------
# .. note::
#
#   tensorflow frontend import doesn't support preprocessing ops like JpegDecode.
#   JpegDecode is bypassed (just return source node).
#   Hence we supply decoded frame to TVM instead.
#

from PIL import Image
コード例 #19
0
ファイル: tvm_cli.py プロジェクト: LucaFos/modelzoo
def compile(info):
    if info['model_path'].endswith('.onnx'):
        is_onnx = True
    elif info['model_path'].endswith('.pb'):
        is_onnx = False
    else:
        raise Exception('Model file format not supported')

    # Load model
    if is_onnx:
        onnx_model = onnx.load(info['model_path'])
        mod, params = relay.frontend.from_onnx(onnx_model, info['input_dict'])
        optimization_level = 3
    else:
        with tf.compat.v1.Session() as sess:
            with tf.io.gfile.GFile(info['model_path'], 'rb') as f:
                graph_def = tf.compat.v1.GraphDef()
                graph_def.ParseFromString(f.read())
                input_map = {}
                for index, (name,
                            shape) in enumerate(info['input_dict'].items()):
                    tf_new_image = tf.compat.v1.placeholder(
                        shape=[1 if x == -1 else x for x in shape],
                        dtype=info['input_data_type'],
                        name=name)
                    input_map["input:" + str(index)] = tf_new_image
                tf.import_graph_def(graph_def, name='', input_map=input_map)
                graph_def = sess.graph.as_graph_def()
                graph_def = tf_testing.ProcessGraphDefParam(graph_def)
        input_shape_dict = {'DecodeJpeg/contents': info['input_list']}
        mod, params = relay.frontend.from_tensorflow(
            graph_def, shape=input_shape_dict, outputs=info['output_names'])
        optimization_level = 2

    # Set compilation params
    if info['cross_compile']:
        if info['target'] == 'cuda':
            raise Exception('cuda cross-compilation not supported yet')
        info['target'] += ' -target=aarch64-linux-gnu'

    # Transform data layout to what is expected by CUDA hardware, i.e. NCHW
    if info['target'] == 'cuda':
        desired_layouts = {'nn.conv2d': ['NCHW', 'default']}
        seq = tvm.transform.Sequential([
            relay.transform.RemoveUnusedFunctions(),
            relay.transform.ConvertLayout(desired_layouts)
        ])
        with tvm.transform.PassContext(opt_level=3):
            mod = seq(mod)

    # Compile model
    # Note opt_level cannot be higher than 2 because of a bug:
    # https://discuss.tvm.ai/t/tvm-0-6-1-compile-yolo-v2-tiny-fail-worked-in-v0-5-2/7244
    with autotvm.apply_history_best(info['autotvm_log']):
        with relay.build_config(opt_level=optimization_level):
            graph, lib, params = relay.build(mod,
                                             target=info['target'],
                                             params=params)

    # Write the compiled model to files
    output_model_path = path.join(info['output_path'],
                                  OUTPUT_NETWORK_MODULE_FILENAME)
    output_graph_path = path.join(info['output_path'],
                                  OUTPUT_NETWORK_GRAPH_FILENAME)
    output_param_path = path.join(info['output_path'],
                                  OUTPUT_NETWORK_PARAM_FILENAME)

    print('Writing library to', output_model_path)
    if info['cross_compile']:
        lib.export_library(
            output_model_path,
            cc.build_create_shared_func(options=[
                '--target=aarch64-linux-gnu', '-march=armv8-a', '-mfpu=NEON'
            ],
                                        compile_cmd='/usr/bin/clang'))
    else:
        lib.export_library(output_model_path)

    print('Writing graph to', output_graph_path)
    with open(output_graph_path, 'w') as graph_file:
        graph_file.write(graph)

    print('Writing weights to', output_param_path)
    with open(output_param_path, 'wb') as param_file:
        param_file.write(relay.save_param_dict(params))
コード例 #20
0
def test_forward_ptb():
    '''test ptb model'''
    config = tf_testing.get_config()
    num_steps = config.num_steps
    num_hidden = config.hidden_size
    num_layers = config.num_layers
    batch_size = config.batch_size
    vocab_size = config.vocab_size
    out_sample_shape = (batch_size, vocab_size)
    out_state_shape = (num_layers, 2, batch_size, num_hidden)
    #Sample input
    inpt = "we have no useful information on"
    cnt_sample = 20

    def _pretty_print(items, is_char_model, id2word):
        if not is_char_model:
            return ' '.join([id2word[x] for x in items])
        else:
            return ''.join([id2word[x] for x in items]).replace('_', ' ')

    def _get_tvm_graph_module(graph_def):
        sym, params = nnvm.frontend.from_tensorflow(graph_def)

        #Cell inputs 'c and 'h' consist of all layers values
        shape_dict = {'Model/Placeholder': (batch_size, num_steps),
                      'Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_c':(num_layers, batch_size, num_hidden),
                      'Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_h':(num_layers, batch_size, num_hidden)}
        dtype_dict = {'Model/Placeholder': 'int32',
                      'Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_c':'float32',
                      'Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_h':'float32'}
        target = 'llvm'
        graph, lib, params = nnvm.compiler.build(sym, target, shape_dict,
                                                 dtype=dtype_dict, params=params)
        from tvm.contrib import graph_runtime
        ctx = tvm.cpu(0)
        return params, graph_runtime.create(graph, lib, ctx)

    def _do_tvm_sample(model, data, in_states, params, num_samples):
        """Sampled from the model"""
        samples = []
        state = in_states
        sample = None
        def _get_sample(data, state):
            input_data = np.full((batch_size, num_steps), data, dtype="int32")
            in_state_tup = np.split(state, indices_or_sections=2, axis=1)
            in_state_c = np.reshape(in_state_tup[0], (num_layers, batch_size, num_hidden))
            in_state_h = np.reshape(in_state_tup[1], (num_layers, batch_size, num_hidden))

            model.set_input('Model/Placeholder', tvm.nd.array(input_data.astype("int32")))
            model.set_input('Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_c',
                        tvm.nd.array(in_state_c.astype("float32")))
            model.set_input('Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_h',
                        tvm.nd.array(in_state_h.astype("float32")))
            model.set_input(**params)
            model.run()
            tvm_output = model.get_output(0, tvm.nd.empty(out_sample_shape,
                                                      "float32")).asnumpy()
            state_output = model.get_output(1, tvm.nd.empty(out_state_shape,
                                                        "float32")).asnumpy()
            sample = tf_testing.pick_from_weight(tvm_output[0])

            return sample, state_output

        for x in data:
            sample, state = _get_sample(x, state)

        if sample is not None:
            samples.append(sample)
        else:
            samples.append(0)

        k = 1
        while k < num_samples:
            sample, state = _get_sample(samples[-1], state)
            samples.append(sample)
            k += 1
        return samples, state

    with tf.Graph().as_default():
        word_to_id, id_to_word, graph_def = tf_testing.get_workload_ptb()
        vocab_size = len(word_to_id)
        # Call the utility to import the graph definition into default graph.
        graph_def = tf_testing.ProcessGraphDefParam(graph_def)
        sess = tf.Session()

    #TVM graph module creation
    params, m = _get_tvm_graph_module(graph_def)

    # Create 10 predicted statments of 20 words
    cnt_stm = 0
    while cnt_stm < 10:
        cnt_stm += 1
        in_state = np.full((num_layers, 2, batch_size, num_hidden), 0, dtype="float32")
        seed_for_sample = inpt.split()
        tvm_samples, tvm_state = _do_tvm_sample(m, [word_to_id[word] \
                                                    for word in seed_for_sample],
                                                in_state, params, cnt_sample)
        tvm_sample_str = _pretty_print(tvm_samples, False, id_to_word)
        tf_samples, tf_state = tf_testing.do_tf_sample(sess,
                                [word_to_id[word] for word in seed_for_sample],
                                in_state, cnt_sample)
        tf_sample_str = _pretty_print(tf_samples, False, id_to_word)
        inpt = tvm_sample_str
        tvm.testing.assert_allclose(tf_samples, tvm_samples, rtol=1e-5, atol=1e-5)
        assert(tvm_sample_str == tf_sample_str)
コード例 #21
0
def test_TF():

    model = TFNet()

    tf.saved_model.save(obj=model, export_dir="./out/tensorflow/model")

    # frozen_model
    '''
    TVM不支持直接读取tf模型,需要将模型转化成frozen_model
    有点类似于动态模型转化成静态模型的感觉
    代码是网上抄的,所以我不知道原理
    '''
    full_model = tf.function(lambda Input: model(Input))
    full_model = full_model.get_concrete_function(
        tf.TensorSpec((1, 3, 64, 64), 'float32'))

    # Get frozen ConcreteFunction
    frozen_func = convert_variables_to_constants_v2(full_model)
    frozen_func.graph.as_graph_def()

    layers = [op.name for op in frozen_func.graph.get_operations()]

    # Save frozen graph from frozen ConcreteFunction to hard drive
    tf.io.write_graph(graph_or_graph_def=frozen_func.graph,
                      logdir="./out/tensorflow/frozen_models",
                      name="test.pb",
                      as_text=False)

    # graph_def
    '''
    根据frozen_model转化成可以被分析的graph_def形式,api来源于tensorflow1的GraphDef()
    没有找到tensorflow2的支持信息,官方也没有给出信息
    '''
    with tf_compat_v1.gfile.GFile('./out/tensorflow/frozen_models/test.pb',
                                  "rb") as f:
        graph_def = tf_compat_v1.GraphDef()
        graph_def.ParseFromString(f.read())
        graph = tf.import_graph_def(graph_def, name="")
        # Call the utility to import the graph definition into default graph.
        graph_def = tf_testing.ProcessGraphDefParam(graph_def)
        # with tf_compat_v1.Session() as sess:
        #     graph_def = tf_testing.AddShapesToGraphDef(sess, 'Identity')

    # input_node
    '''
    输入节点标记
    '''
    input_node_name = [
        node.name for node in graph_def.node
        if len(node.input) == 0 and node.op not in ('Const')
    ]
    input_node_name = input_node_name[0]
    shape_dict = {input_node_name: (1, 3, 64, 64)}

    # output_node
    '''
    寻找最终节点(无后继),默认是最后一个节点
    '''
    graph_dict = dict()
    for node in graph_def.node:
        graph_dict[node.name] = node.input
    for name_src in graph_dict:
        found = False
        for name_dst in graph_dict:
            if name_src == name_dst: continue
            if name_src in graph_dict[name_dst]:
                found = True
                break
        if not found:
            output_node_name = name_src
            break

    # Addshape
    '''
    大概就是在output节点上打个标记,表示最终输出,默认情况下是最后一个节点
    '''
    with tf_compat_v1.Session(graph=graph) as sess:
        graph_def = tf_testing.AddShapesToGraphDef(sess, output_node_name)

    layout = 'NCHW'
    mod, params = relay.frontend.from_tensorflow(graph_def,
                                                 layout=layout,
                                                 shape=shape_dict,
                                                 outputs=[output_node_name])
    f = open('./out/tensorflow.txt', 'w')
    print(mod['main'], file=f)  ## 需要声明是main,现在的翻译转化会产生很多没有用的函数信息
    f.close()
コード例 #22
0
def compile(info):
    if info['model_path'].endswith('.onnx'):
        is_onnx = True
    elif info['model_path'].endswith('.pb'):
        is_onnx = False
    else:
        raise Exception('Model file format not supported')

    # Load model
    if is_onnx:
        onnx_model = onnx.load(info['model_path'])
        mod, params = relay.frontend.from_onnx(onnx_model, info['input_dict'])
        optimization_level = 3
    else:
        with tf.compat.v1.Session() as sess:
            with tf.io.gfile.GFile(info['model_path'], 'rb') as f:
                graph_def = tf.compat.v1.GraphDef()
                graph_def.ParseFromString(f.read())
                tf.import_graph_def(graph_def, name='')
                graph_def = sess.graph.as_graph_def()
                graph_def = tf_testing.ProcessGraphDefParam(graph_def)

        input_shape_dict = {'DecodeJpeg/contents': info['input_list']}
        mod, params = relay.frontend.from_tensorflow(
            graph_def, shape=input_shape_dict, outputs=info['output_names'])
        optimization_level = 2

    # Set compilation params
    target = 'llvm'
    if info['cross_compile']:
        target += ' -target=aarch64-linux-gnu'

    # Compile model
    # Note opt_level cannot be higher than 2 because of a bug:
    # https://discuss.tvm.ai/t/tvm-0-6-1-compile-yolo-v2-tiny-fail-worked-in-v0-5-2/7244
    with relay.build_config(opt_level=optimization_level):
        graph, lib, params = relay.build(mod, target=target, params=params)

    # Write the compiled model to files
    output_model_path = path.join(info['output_path'],
                                  OUTPUT_NETWORK_MODULE_FILENAME)
    output_graph_path = path.join(info['output_path'],
                                  OUTPUT_NETWORK_GRAPH_FILENAME)
    output_param_path = path.join(info['output_path'],
                                  OUTPUT_NETWORK_PARAM_FILENAME)

    print('Writing library to', output_model_path)
    if info['cross_compile']:
        lib.export_library(
            output_model_path,
            cc.build_create_shared_func(options=[
                '--target=aarch64-linux-gnu', '-march=armv8-a', '-mfpu=NEON'
            ],
                                        compile_cmd='/usr/bin/clang'))
    else:
        lib.export_library(output_model_path)

    print('Writing graph to', output_graph_path)
    with open(output_graph_path, 'w') as graph_file:
        graph_file.write(graph)

    print('Writing weights to', output_param_path)
    with open(output_param_path, 'wb') as param_file:
        param_file.write(relay.save_param_dict(params))
コード例 #23
0
def export_module(opts):
    # Target settings
    layout = "NCHW"

    # Download required files
    from tvm.contrib.download import download_testdata
    model_path = download_testdata(model_url, model_file_name, module=['tf', 'keyword_spotting'])
    label_path = download_testdata(label_url, label_name, module=['data'])

    # Import model
    with tf_compat_v1.gfile.GFile(model_path, 'rb') as f:
        graph_def = tf_compat_v1.GraphDef()
        graph_def.ParseFromString(f.read())
        graph = tf.import_graph_def(graph_def, name='')
        graph_def = tf_testing.ProcessGraphDefParam(graph_def)
        with tf_compat_v1.Session() as sess:
            graph_def = tf_testing.AddShapesToGraphDef(sess, 'labels_softmax')

    build_dir = opts.out_dir
    if not os.path.exists(build_dir):
        os.makedirs(build_dir)

    ##save original TF graph
    if DEBUG_LOG:
        with open(os.path.join(build_dir, f'{model_name}_graph_original.log'), 'w') as orig_file:
            orig_file.write(str(graph_def))

    ##remove pre-processing nodes and fix begining
    nodes = []
    ##add first op
    input_dim0 = 1
    input_dim1 = 49
    input_dim2 = 10
    new_input = graph_def.node.add()
    new_input.op = 'Placeholder'
    new_input.name = 'Mfcc'
    new_input.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(
            type=dtypes.float32.as_datatype_enum))
                
    nodes.append(new_input)

    removed_count = 0
    for ii, node in enumerate(graph_def.node, start=0):
        if node.op == 'DecodeWav' \
        or node.op == 'AudioSpectrogram' \
        or node.op == 'Mfcc' \
        or node.op == 'Placeholder' \
        or node.op == 'wav_data':
            removed_count += 1
            pass
        else:
            nodes.append(node) 
    print(f'NUM of layers removed: {removed_count}')

    new_graph = tf_compat_v1.GraphDef()
    new_graph.node.extend(nodes)
    ##log new graph
    if DEBUG_LOG:
        with open(os.path.join(build_dir, f'{model_name}_graph_new.log'), 'w') as new_graph_log:
            new_graph_log.write(str(new_graph))

    ##get mod and params with new graph
    shape_dict = {'Mfcc': (1, 49, 10)}
    mod, params = relay.frontend.from_tensorflow(new_graph,
                                                layout=layout,
                                                shape=shape_dict)

    if DEBUG_LOG:
        with open(os.path.join(build_dir, f'{model_name}_mod.log'), 'w') as mod_file:
            mod_file.write(str(mod))
        with open(os.path.join(build_dir, f'{model_name}_param.log'), 'w') as param_log:
            param_log.write(str(params))

    #quantization
    if opts.quantize:
        if not opts.global_scale:
            raise RuntimeError('Global Scale is not valid!')
        global_scale = float(opts.global_scale)
        print('INFO: Quantizing...')
        print(f'INFO: Global Scale: {global_scale}')
        with relay.quantize.qconfig(calibrate_mode='global_scale', 
                                    global_scale=global_scale,
                                    skip_conv_layers=[0]):
            mod = relay.quantize.quantize(mod, params)

        if DEBUG_LOG:
            with open(os.path.join(build_dir, f'{model_name}_mod_quantized.log'), 'w') as mod_log:
                mod_log.write(str(mod))

    #save module
    if opts.quantize:
        file_path = f'{build_dir}/module_gs_{global_scale}.pickle'
        with open(file_path, 'wb') as h1:
            pickle.dump(mod, h1, protocol=pickle.HIGHEST_PROTOCOL)
            print(f'INFO: {file_path} saved!')
        with open(f'{build_dir}/module_gs_{global_scale}.txt', 'w') as f:
            f.write(mod.astext())
    else:
        file_path = f'{build_dir}/module.pickle'
        with open(file_path, 'wb') as h1:
            pickle.dump(mod, h1, protocol=pickle.HIGHEST_PROTOCOL)
            print(f'INFO: {file_path} saved!')
        param_path = f'{build_dir}/params.bin'
        with open(param_path, 'wb') as f_params:
            f_params.write(relay.save_param_dict(params))
            print(f'INFO: {param_path} saved!')
        with open(f'{build_dir}/module.txt', 'w') as f:
            f.write(mod.astext())
    return mod, params