Ejemplo n.º 1
0
def get_workload(batch_size=1,
                 image_shape=(3, 224, 224),
                 num_classes=1000,
                 dtype="float32"):
    """Get benchmark workload for SqueezeNet

    Parameters
    ----------
    batch_size : int, optional
        The batch size used in the model

    num_classes : int, optional
        Number of classes

    image_shape : tuple, optional
        The input image shape

    dtype : str, optional
        The data type

    Returns
    -------
    net : relay.Function
        The computational graph

    params : dict of str to NDArray
        The parameters.
    """

    net = get_net(batch_size, image_shape, num_classes, dtype)
    return create_workload(net)
Ejemplo n.º 2
0
def test_annotate_spans_compatibility():
    data = relay.var("data", relay.TensorType((1, 3, 64, 64), "float32"))
    weight = relay.var("weight")

    bn_gamma = relay.var("bn_gamma")
    bn_beta = relay.var("bn_beta")
    bn_mmean = relay.var("bn_mean")
    bn_mvar = relay.var("bn_var")

    simple_net = relay.nn.conv2d(data=data,
                                 weight=weight,
                                 kernel_size=(3, 3),
                                 channels=3,
                                 padding=(1, 1))
    simple_net = relay.nn.batch_norm(simple_net, bn_gamma, bn_beta, bn_mmean,
                                     bn_mvar)[0]
    simple_net = relay.Function(relay.analysis.free_vars(simple_net),
                                simple_net)

    module, params = testing.create_workload(simple_net)

    # Apply some simple passes to legalize the IR.
    with tvm.transform.PassContext(opt_level=0):
        module, params = relay.optimize(module,
                                        tvm.testing.enabled_targets()[0][0],
                                        params)

    seq = tvm.transform.Sequential(
        [relay.transform.AnnotateSpans(),
         relay.transform.DefuseOps()])
    with tvm.transform.PassContext(opt_level=3):
        module = seq(module)
Ejemplo n.º 3
0
    def test_simple_network(self):
        data = relay.var(
            "data",
            relay.TensorType((-1, 3, 224, 224), "float32")
        )
        weight = relay.var("weight")
        bn_gamma = relay.var("bn_gamma")
        bn_beta = relay.var("bn_beta")
        bn_mmean = relay.var("bn_mean")
        bn_mvar = relay.var("bn_var")

        simple_net = relay.nn.pad(data, ((0, 0), (0, 0), (1, 1), (1, 1)))
        simple_net = relay.nn.conv2d(
            data=simple_net,
            weight=weight,
            kernel_size=(3, 3),
            channels=16,
            padding=(0, 0)
        )
        simple_net = relay.nn.batch_norm(
            simple_net,
            bn_gamma,
            bn_beta,
            bn_mmean,
            bn_mvar
        )[0]
        simple_net = relay.nn.relu(simple_net)
        simple_net = relay.op.reduce.mean(simple_net, axis=(2, 3))
        simple_net = relay.op.transform.squeeze(simple_net)

        dense_weight = relay.var("dense_weight")
        dense_bias = relay.var('dense_bias')
        simple_net = relay.nn.dense(simple_net, weight=dense_weight, units=10)
        simple_net = relay.nn.bias_add(simple_net, dense_bias, axis=1)

        simple_net = relay.nn.softmax(simple_net, axis=1)
        simple_net = relay.op.transform.reshape(simple_net, newshape=(-1, 10))

        simple_net = relay.Function(
            relay.analysis.free_vars(simple_net),
            simple_net
        )

        mod, params = testing.create_workload(simple_net)

        xgraph = xf_relay.from_relay(mod, params)

        layers = xgraph.get_layers()

        assert(layers[0].type[0] == 'Input')
        assert(layers[1].type[0] == 'Pad')
        assert(layers[2].type[0] == 'Convolution')
        assert(layers[3].type[0] == 'BatchNorm')
        assert(layers[4].type[0] == 'ReLU')
        assert(layers[5].type[0] == 'Mean')
        assert(layers[6].type[0] == 'Squeeze')
        assert(layers[7].type[0] == 'Dense')
        assert(layers[8].type[0] == 'BiasAdd')
        assert(layers[9].type[0] == 'Softmax')
        assert(layers[10].type[0] == 'Reshape')
    def test_nn_adaptive_avg_pool2d_3(self):
        warnings.filterwarnings("ignore")
        data = relay.var("data", relay.TensorType((-1, 6, 6, 4), "float32"))

        net = relay.nn.adaptive_avg_pool2d(data,
                                           output_size=(6, 6),
                                           layout='NHWC')

        net = relay.Function(relay.analysis.free_vars(net), net)

        mod, params = testing.create_workload(net)

        xgraph = xf_relay.from_relay(mod, params)

        layers = xgraph.get_layers()

        assert layers[0].type[0] == 'Input'
        assert layers[0].shapes.tolist() == [-1, 6, 6, 4]
        assert layers[1].type[0] == 'Transpose'
        assert layers[1].shapes.tolist() == [-1, 4, 6, 6]
        assert layers[2].type[0] == 'Pooling'
        assert layers[2].shapes.tolist() == [-1, 4, 6, 6]
        assert layers[2].attrs['padding'] == [[0, 0], [0, 0], [0, 0], [0, 0]]
        assert layers[2].attrs['insize'] == [6, 6]
        assert layers[2].attrs['outsize'] == [6, 6]
        assert layers[2].attrs['data_layout'] == 'NCHW'
        assert layers[2].attrs['strides'] == [1, 1]
        assert layers[2].attrs['kernel_size'] == [1, 1]
        assert layers[2].attrs['pool_type'] == 'Avg'
Ejemplo n.º 5
0
    def test_nn_adaptive_avg_pool2d_3(self):
        warnings.filterwarnings("ignore")
        data = relay.var("data", relay.TensorType((-1, 6, 6, 4), "float32"))

        net = relay.nn.adaptive_avg_pool2d(data,
                                           output_size=(6, 6),
                                           layout="NHWC")

        net = relay.Function(relay.analysis.free_vars(net), net)

        mod, params = testing.create_workload(net)

        xgraph = xf_relay.from_relay(mod, params)

        layers = xgraph.get_layers()

        assert layers[0].type[0] == "Input"
        assert layers[0].shapes.tolist() == [-1, 6, 6, 4]
        assert layers[1].type[0] == "Transpose"
        assert layers[1].shapes.tolist() == [-1, 4, 6, 6]
        assert layers[2].type[0] == "Pooling"
        assert layers[2].shapes.tolist() == [-1, 4, 6, 6]
        assert layers[2].attrs["padding"] == [[0, 0], [0, 0], [0, 0], [0, 0]]
        assert layers[2].attrs["insize"] == [6, 6]
        assert layers[2].attrs["outsize"] == [6, 6]
        assert layers[2].attrs["data_layout"] == "NCHW"
        assert layers[2].attrs["strides"] == [1, 1]
        assert layers[2].attrs["kernel_size"] == [1, 1]
        assert layers[2].attrs["pool_type"] == "Avg"
Ejemplo n.º 6
0
    def test_resnet_block(self):
        data = relay.var("data", relay.TensorType((-1, 3, 224, 224),
                                                  "float32"))
        weight = relay.var("weight")
        bn_gamma = relay.var("bn_gamma")
        bn_beta = relay.var("bn_beta")
        bn_mmean = relay.var("bn_mean")
        bn_mvar = relay.var("bn_var")

        conv2d0_expr = relay.nn.conv2d(data=data,
                                       weight=weight,
                                       kernel_size=(3, 3),
                                       channels=16,
                                       padding=(1, 1))
        bn0_expr = relay.nn.batch_norm(conv2d0_expr, bn_gamma, bn_beta,
                                       bn_mmean, bn_mvar)[0]
        relu0_expr = relay.nn.relu(bn0_expr)

        max_pool0_expr = relay.nn.max_pool2d(relu0_expr,
                                             pool_size=(2, 2),
                                             strides=(2, 2))

        conv2d1_weight = relay.var("conv2d1_weight")
        conv2d1_bias = relay.var("conv2d1_bias")
        conv2d1_expr = relay.nn.conv2d(data=max_pool0_expr,
                                       weight=conv2d1_weight,
                                       kernel_size=(3, 3),
                                       channels=16,
                                       padding=(1, 1))
        bias_add0_expr = relay.nn.bias_add(conv2d1_expr, conv2d1_bias, axis=1)
        relu1_expr = relay.nn.relu(bias_add0_expr)
        add0_expr = relay.op.tensor.add(max_pool0_expr, relu1_expr)

        avg_pool0_expr = relay.nn.avg_pool2d(add0_expr,
                                             pool_size=(2, 2),
                                             strides=(2, 2))
        global_avg_pool0_expr = relay.op.nn.global_avg_pool2d(avg_pool0_expr)
        bf_expr = relay.nn.batch_flatten(global_avg_pool0_expr)

        net = avg_pool0_expr

        net = relay.Function(relay.analysis.free_vars(net), net)

        mod, params = testing.create_workload(net)

        xgraph = xf_relay.from_relay(mod, params)

        layers = xgraph.get_layers()

        assert (layers[0].type[0] == 'Input')
        assert (layers[1].type[0] == 'Convolution')
        assert (layers[2].type[0] == 'BatchNorm')
        assert (layers[3].type[0] == 'ReLU')
        assert (layers[4].type[0] == 'Pooling')
        assert (layers[5].type[0] == 'Convolution')
        assert (layers[6].type[0] == 'BiasAdd')
        assert (layers[7].type[0] == 'ReLU')
        assert (layers[8].type[0] == 'Eltwise')
        assert (layers[9].type[0] == 'Pooling')
        assert (layers[9].shapes == [-1, 16, 56, 56])
Ejemplo n.º 7
0
    def test_conv2d_transpose(self):
        data = relay.var("data", relay.TensorType((-1, 1, 3, 3), "float32"))
        weight = relay.var("weight")

        simple_net = relay.nn.conv2d_transpose(
            data=data,
            weight=weight,
            kernel_size=(2, 2),
            channels=1,
            padding=(0, 0),
            strides=(2, 2),
            data_layout="NCHW",
            kernel_layout="IOHW",
        )

        simple_net = relay.Function(relay.analysis.free_vars(simple_net),
                                    simple_net)

        mod, params = testing.create_workload(simple_net)

        xgraph = xf_relay.from_relay(mod, params)

        layers = xgraph.get_layers()

        assert layers[0].type[0] == "Input"
        assert layers[0].shapes == [-1, 1, 3, 3]

        assert layers[1].type[0] == "Conv2DTranspose"
        assert layers[1].shapes == [-1, 1, 6, 6]
        assert layers[1].sizes == [36]
        assert layers[1].attrs["padding"] == [[0, 0], [0, 0], [0, 0], [0, 0]]
        assert layers[1].attrs["strides"] == [2, 2]
        assert layers[1].attrs["dilation"] == [1, 1]
Ejemplo n.º 8
0
def quantize_and_build(out):
    f = relay.Function(relay.analysis.free_vars(out), out)
    mod, params = testing.create_workload(f)

    with relay.quantize.qconfig(skip_conv_layers=[]):
        qmod = relay.quantize.quantize(mod, params)

    relay.build(qmod, "llvm", params=params)
Ejemplo n.º 9
0
def main():
    dshape = (32, 16)
    net = _get_model(dshape)
    mod, params = testing.create_workload(net)
    graph, lib, params = relay.build(mod, "llvm", params=params)

    with open(osp.join(CWD, "graph.json"), "w") as f_resnet:
        f_resnet.write(graph)
    with open(osp.join(CWD, "graph.params"), "wb") as f_params:
        f_params.write(runtime.save_param_dict(params))
Ejemplo n.º 10
0
def main():
    dshape = (32, 16)
    net = _get_model(dshape)
    mod, params = testing.create_workload(net)
    graph, lib, params = relay.build(mod, 'llvm', params=params)

    with open(osp.join(CWD, 'graph.json'), 'w') as f_resnet:
        f_resnet.write(graph)
    with open(osp.join(CWD, 'graph.params'), 'wb') as f_params:
        f_params.write(relay.save_param_dict(params))
Ejemplo n.º 11
0
def _get_relay_workload(workload):
    """
    Get the workload determined by the JSON configuration 'model' parameter.
    If no such workload is found, or the paramter value specified is not a 
    single workload, then an exception is raised.
    """
    assert workload[
        "type"] == "op", "Only single workloads are supported in experiments."
    workload_name = workload["name"]

    if workload_name == "convolution1":
        dtype = "float32"
        shape = (1, 144, 28, 28)
        data = relay.var("data", shape=shape, dtype=dtype)
        weight = relay.var("weight")
        out = relay.nn.conv2d(data, weight, channels=32, kernel_size=(1, 1))
        net = relay.Function(relay.analysis.free_vars(out), out)
        return testing.create_workload(net)
    elif workload_name == "convolution2":
        dtype = "float32"
        shape = (20, 16, 50, 100)
        data = relay.var("data", shape=shape, dtype=dtype)
        weight = relay.var("weight")
        out = relay.nn.conv2d(data, weight, channels=32, kernel_size=(3, 3))
        net = relay.Function(relay.analysis.free_vars(out), out)
        return testing.create_workload(net)
    elif workload_name == "matmul1":
        dtype = "float32"
        data = relay.var("data", shape=(100, 30, 40), dtype=dtype)
        data2 = relay.var("data", shape=(1, 50, 40), dtype=dtype)
        out = relay.nn.batch_matmul(data, data2)
        net = relay.Function(relay.analysis.free_vars(out), out)
        return testing.create_workload(net)
    elif workload_name == "matmul2":
        dtype = "float32"
        data = relay.var("data", shape=(30, 30, 30), dtype=dtype)
        data2 = relay.var("data", shape=(30, 30, 30), dtype=dtype)
        out = relay.nn.batch_matmul(data, data2)
        net = relay.Function(relay.analysis.free_vars(out), out)
        return testing.create_workload(net)
    else:
        raise ValueError(f"Workload name {workload_name} not recognised.")
Ejemplo n.º 12
0
    def test_yolo_reorg(self):
        data = relay.var("data", relay.TensorType((-1, 4, 2, 2), "float32"))

        net = relay.vision.yolo_reorg(data, stride=2)
        net = relay.Function(relay.analysis.free_vars(net), net)
        mod, params = testing.create_workload(net)

        xgraph = xf_relay.from_relay(mod, params)
        layers = xgraph.get_layers()

        assert layers[0].type[0] == 'Input'
        assert layers[1].type[0] == 'YoloReorg'
Ejemplo n.º 13
0
    def test_simple_network(self):
        data = relay.var(
            "data",
            relay.TensorType((-1, 1, 4, 4), "float32")
        )
        weight = relay.var("weight")

        # simple_net = relay.nn.pad(data, ((0, 0), (0, 0), (1, 1), (1, 1)))
        simple_net = relay.nn.conv2d(
            data=data,
            weight=weight,
            kernel_size=(2, 2),
            channels=2,
            padding=(0, 0)
        )

        simple_net = relay.Function(
            relay.analysis.free_vars(simple_net),
            simple_net
        )

        mod, params = testing.create_workload(simple_net)

        weight = np.reshape(np.array([[[1, 2], [3, 0]], [[1, 1], [0, 1]]],
                                     dtype=np.float32),
                            (2, 1, 2, 2))

        xgraph = xf_relay.from_relay(mod, {'weight': weight})

        layers = xgraph.get_layers()

        inputs = {
            'data': np.reshape(np.array([
                [10, 10, 0, 40],
                [50, 10, 0, 80],
                [30, 50, 10, 0],
                [10, 90, 30, 40]]), (1, 1, 4, 4))
        }
        res = run._run_network_cpu(xgraph, inputs)
        # print(res[0])

        expected_outpt = np.array([[
            [[180., 40., 80.],
             [160., 160., 190.],
             [160., 340., 100.]],

            [[30., 10., 120.],
             [110., 20., 80.],
             [170., 90., 50.]]
        ]])

        np.testing.assert_array_equal(res[0], expected_outpt)
Ejemplo n.º 14
0
def main():
    dshape = (4, 8)
    net = _get_model(dshape)
    mod, params = testing.create_workload(net)
    graph, lib, params = relay.build(mod, 'llvm --system-lib', params=params)

    out_dir = sys.argv[1]
    lib.save(osp.join(sys.argv[1], 'graph.o'))
    with open(osp.join(out_dir, 'graph.json'), 'w') as f_resnet:
        f_resnet.write(graph)

    with open(osp.join(out_dir, 'graph.params'), 'wb') as f_params:
        f_params.write(relay.save_param_dict(params))
Ejemplo n.º 15
0
def main():
    dshape = (4, 8)
    net = _get_model(dshape)
    mod, params = testing.create_workload(net)
    graph, lib, params = relay.build(mod, "llvm --system-lib", params=params)

    out_dir = sys.argv[1]
    lib.save(osp.join(sys.argv[1], "graph.o"))
    with open(osp.join(out_dir, "graph.json"), "w") as f_resnet:
        f_resnet.write(graph)

    with open(osp.join(out_dir, "graph.params"), "wb") as f_params:
        f_params.write(runtime.save_param_dict(params))
Ejemplo n.º 16
0
    def test_sqrt(self):
        data = relay.var("data", relay.TensorType((-1, 4, 2, 2), "float32"))

        net = relay.sqrt(data)
        net = relay.Function(relay.analysis.free_vars(net), net)
        mod, params = testing.create_workload(net)

        xgraph = xf_relay.from_relay(mod, params)
        layers = xgraph.get_layers()

        assert layers[0].type[0] == 'Input'
        assert layers[1].type[0] == 'Sqrt'
        assert 'relay_id' in layers[1].attrs
Ejemplo n.º 17
0
def get_bitserial_conv2d_nhwc(model,
                              layer_name,
                              batch_size=1,
                              dtype='int8',
                              activation_bits=2,
                              weight_bits=2,
                              out_dtype='int16'):
    """get the bitserial 2D convolution here, the input layout is 'nchw' """

    params = model['conv2d'][layer_name]['params']

    # The image shape should be hwc
    image_shape = params[1], params[2], params[0]
    # The data shape should be nhwc
    data_shape = (batch_size, ) + image_shape

    output_channel = params[3]

    kernel_size = params[4]

    # the weight shape should be HWIO
    weight_shape = kernel_size, kernel_size, params[0], output_channel

    stride = params[5]
    padding = params[6]

    data = relay.var("data", shape=data_shape, dtype=dtype)
    weight = relay.var(layer_name + "_weight", shape=weight_shape, dtype=dtype)

    net = binary_layers.bitserial_conv2d(
        data=data,
        weight=weight,
        strides=(stride, stride),
        padding=(padding, padding),
        channels=output_channel,
        kernel_size=(kernel_size, kernel_size),
        activation_bits=activation_bits,
        weight_bits=weight_bits,
        data_layout='NHWC',
        kernel_layout='HWIO',  # Have to define here.
        pack_dtype='uint8',
        out_dtype=out_dtype,
        name=layer_name)

    net = relay.Function(relay.analysis.free_vars(net), net)

    mod, params = testing.create_workload(net)

    # We only needs to return this three variables
    return mod, params, data_shape
Ejemplo n.º 18
0
def _get_relay_convolution():
    """
    Create simple relay convolution.
    """
    dtype = "float32"
    shape = (1, 3, 8, 8)
    data = relay.var("data", shape=shape, dtype=dtype)
    weight = relay.var("weight")
    out = relay.nn.conv2d(data,
                          weight,
                          channels=16,
                          kernel_size=(3, 3),
                          padding=(1, 1))
    net = relay.Function(relay.analysis.free_vars(out), out)
    return testing.create_workload(net)
Ejemplo n.º 19
0
    def test_softmax(self):
        data = relay.var("data", relay.TensorType((-1, 4, 2, 2), "float32"))

        net = relay.nn.softmax(data)
        net = relay.Function(relay.analysis.free_vars(net), net)
        mod, params = testing.create_workload(net)

        xgraph = xf_relay.from_relay(mod, params)
        layers = xgraph.get_layers()

        assert layers[0].type[0] == "Input"
        assert layers[1].type[0] == "Softmax"
        assert "relay_id" in layers[1].attrs
        assert "axis" in layers[1].attrs
        assert layers[1].attrs["axis"] == -1
Ejemplo n.º 20
0
    def test_simple_network_cvx(self):
        data = relay.var(
            "data",
            relay.TensorType((-1, 3, 224, 224), "float32")
        )
        weight = relay.var("weight")
        bn_gamma = relay.var("bn_gamma")
        bn_beta = relay.var("bn_beta")
        bn_mmean = relay.var("bn_mean")
        bn_mvar = relay.var("bn_var")

        simple_net = relay.nn.pad(data, ((0, 0), (0, 0), (1, 1), (1, 1)))
        simple_net = relay.nn.conv2d(
            data=simple_net,
            weight=weight,
            kernel_size=(3, 3),
            channels=16,
            padding=(0, 0)
        )
        simple_net = relay.nn.relu(simple_net)

        simple_net = relay.Function(
            relay.analysis.free_vars(simple_net),
            simple_net
        )

        mod, params = testing.create_workload(simple_net)

        xgraph = xf_relay.from_relay(
            mod,
            params,
            cvx_preprocessing={'data': 'scale-0.5__transpose-2,0,1'}
            )

        layers = xgraph.get_layers()

        assert(layers[0].type[0] == 'StrInput')
        assert layers[0].shapes == [-1]
        assert layers[1].type[0] == 'Cvx'
        assert layers[1].shapes == [-1, 3, 224, 224]
        assert(layers[2].type[0] == 'Pad')
        assert(layers[3].type[0] == 'Convolution')
        assert(layers[4].type[0] == 'ReLU')

        assert(layers[0].tops == ['data_cvx'])
        assert(layers[1].bottoms == ['data'])
        assert(layers[1].tops[0][:7] == 'nn_pad-')
Ejemplo n.º 21
0
def test_fold_batch_norm():
    def expected():
        data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
        weight = relay.const(np.zeros((16, 3, 3, 3)))
        bias = relay.const(np.zeros((16, 1, 1)))
        conv = relay.nn.conv2d(data=data,
                               weight=weight,
                               kernel_size=(3, 3),
                               channels=16,
                               padding=(1, 1))
        add = relay.add(conv, bias)
        return relay.Function(relay.analysis.free_vars(add), add)

    remove_bn_pass = tvm.transform.Sequential([
        relay.transform.InferType(),
        relay.transform.SimplifyInference(),
        relay.transform.FoldConstant(),
        relay.transform.FoldScaleAxis(),
    ])

    data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
    weight = relay.var("weight")
    bn_gamma = relay.var("bn_gamma")
    bn_beta = relay.var("bn_beta")
    bn_mmean = relay.var("bn_mean")
    bn_mvar = relay.var("bn_var")

    conv = relay.nn.conv2d(data=data,
                           weight=weight,
                           kernel_size=(3, 3),
                           channels=16,
                           padding=(1, 1))
    bn_output = relay.nn.batch_norm(conv, bn_gamma, bn_beta, bn_mmean, bn_mvar)

    def initializer(_, param):
        param = np.zeros(param.shape)

    mod, params = create_workload(bn_output[0], initializer)
    mod["main"] = bind_params_by_name(mod["main"], params)

    with tvm.transform.PassContext(opt_level=3):
        mod = remove_bn_pass(mod)

    expect = run_infer_type(expected())
    assert tvm.ir.structural_equal(mod["main"], expect)
Ejemplo n.º 22
0
    def test_simple_network_cvx(self):
        data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
        weight = relay.var("weight")
        bn_gamma = relay.var("bn_gamma")
        bn_beta = relay.var("bn_beta")
        bn_mmean = relay.var("bn_mean")
        bn_mvar = relay.var("bn_var")

        simple_net = relay.nn.pad(data, ((0, 0), (0, 0), (1, 1), (1, 1)))
        simple_net = relay.nn.conv2d(
            data=simple_net,
            weight=weight,
            kernel_size=(3, 3),
            channels=16,
            padding=(0, 0),
        )
        simple_net = relay.nn.relu(simple_net)

        simple_net = relay.Function(relay.analysis.free_vars(simple_net),
                                    simple_net)

        mod, params = testing.create_workload(simple_net)

        xgraph = xf_relay.from_relay(
            mod,
            params,
            cvx_preprocessing={"data": "scale-0.5__transpose-2,0,1"})

        assert len(xgraph.get_input_names()) == 1
        layers = xgraph.get_layers()

        # assert layers[0].type[0] == "Constant"
        assert layers[0].type[0] == "StrInput"
        assert layers[0].shapes == [-1]
        assert layers[1].type[0] == "Cvx"
        assert layers[1].shapes == [-1, 3, 224, 224]
        assert layers[2].type[0] == "Pad"
        assert layers[3].type[0] == "Convolution"
        assert layers[4].type[0] == "ReLU"

        assert layers[0].tops == ["data_cvx"]
        assert layers[1].bottoms == ["data"]
        assert layers[1].tops[0][:7] == "nn.pad-"
Ejemplo n.º 23
0
    def test_expand_dims(self):
        data = relay.var("data", relay.TensorType((-1, 4), "float32"))

        net = relay.expand_dims(data, axis=1, num_newaxis=2)

        net = relay.Function(relay.analysis.free_vars(net), net)

        mod, params = testing.create_workload(net)

        xgraph = xf_relay.from_relay(mod, params)

        layers = xgraph.get_layers()

        assert layers[0].type[0] == 'Input'
        assert layers[1].type[0] == 'ExpandDims'
        assert 'relay_id' in layers[1].attrs
        assert layers[1].attrs['axis'] == 1
        assert layers[1].attrs['num_newaxis'] == 2
        assert layers[1].shapes == [-1, 1, 1, 4]
Ejemplo n.º 24
0
def test_integer_compatibility_in_layout_transform():
    x = relay.var("data", shape=(2, 3, 48, 48), dtype="float32")
    conv_out = relay.nn.conv2d(
        x,
        relay.var("weight", shape=(1, 3, 1, 1), dtype="float32"),
        strides=[47, 47],
        channels=1,
        kernel_size=[1, 1],
    )
    bias_out = relay.nn.bias_add(conv_out, relay.var("bias"))
    broadcast_out = relay.op.broadcast_to(
        bias_out, relay.const([2, 1, 2, 2], dtype="int64"))
    y = relay.add(bias_out, broadcast_out)

    mod, _ = testing.create_workload(y)
    with tvm.transform.PassContext(opt_level=3):
        with tvm.target.Target("llvm"):
            mod = relay.transform.CanonicalizeOps()(mod)
            mod = relay.transform.AlterOpLayout()(mod)
Ejemplo n.º 25
0
def get_bitserial_conv2d(model, layer_name, batch_size=1, dtype='int8'):
    params = model['conv2d'][layer_name]['params']

    image_shape = params[0], params[1], params[2]

    data_shape = (batch_size, ) + image_shape

    output_channel = params[3]

    kernel_size = params[4]

    weight_shape = output_channel, params[0], kernel_size, kernel_size

    stride = params[5]
    padding = params[6]

    activation_bits = params[7]
    weight_bits = params[8]

    data = relay.var("data", shape=data_shape, dtype=dtype)
    weight = relay.var(layer_name + "_weight", shape=weight_shape, dtype=dtype)

    net = binary_layers.bitserial_conv2d(data=data,
                                         weight=weight,
                                         strides=(stride, stride),
                                         padding=(padding, padding),
                                         channels=output_channel,
                                         kernel_size=(kernel_size,
                                                      kernel_size),
                                         activation_bits=activation_bits,
                                         weight_bits=weight_bits,
                                         pack_dtype='uint8',
                                         name=layer_name)

    net = relay.Function(relay.analysis.free_vars(net), net)

    mod, params = testing.create_workload(net)

    # We only needs to return this three variables
    return mod, params, data_shape
Ejemplo n.º 26
0
def test_conv2d_relu():
    data_shape = (1, 1280, 14, 14)
    out_channels = 256
    kernel_size, strides, padding, dilation, groups = (1, 1), (1, 1), (0, 0, 0, 0), (1, 1), 1
    data_layout, kernel_layout = "NCHW", "OIHW"
    dtype = "float32"

    f = get_conv2d_relu(
        data_shape,
        out_channels,
        kernel_size,
        strides,
        padding,
        dilation,
        groups,
        data_layout,
        kernel_layout,
        dtype,
    )

    mod, params = testing.create_workload(f)
    verify_meta_schedule_with_tensorrt(mod, params, data_shape)
def test_immutability():
    simple_net = relay.nn.conv2d(
        data=relay.var("data", relay.TensorType((1, 3, 224, 224), "float32")),
        weight=relay.var("weight"),
        kernel_size=(5, 5),
        channels=3,
        padding=(1, 1),
    )
    simple_net = relay.Function(relay.analysis.free_vars(simple_net),
                                simple_net)
    mod, _ = create_workload(simple_net)

    old_mod = mod

    with tvm.transform.PassContext(opt_level=4):
        with tvm.target.Target("llvm"):
            seq = tvm.transform.Sequential(
                passes=[transform.ToBasicBlockNormalForm()], opt_level=4)
            new_mod = seq(mod)

    assert old_mod.astext() == mod.astext()
    assert old_mod.astext() != new_mod.astext()
Ejemplo n.º 28
0
def get_network():
    out_channels = 16
    batch_size = 1
    data = relay.var("data", relay.TensorType((batch_size, 3, img_size, img_size), "float16"))
    dense_weight = relay.var(
        "dweight", relay.TensorType((batch_size, 16 * img_size * img_size), "float16")
    )
    weight = relay.var("weight")
    second_weight = relay.var("second_weight")
    bn_gamma = relay.var("bn_gamma")
    bn_beta = relay.var("bn_beta")
    bn_mmean = relay.var("bn_mean")
    bn_mvar = relay.var("bn_var")
    simple_net = relay.nn.conv2d(
        data=data, weight=weight, kernel_size=(3, 3), channels=out_channels, padding=(1, 1)
    )
    simple_net = relay.nn.batch_norm(simple_net, bn_gamma, bn_beta, bn_mmean, bn_mvar)[0]
    simple_net = relay.nn.relu(simple_net)
    simple_net = relay.nn.batch_flatten(simple_net)
    simple_net = relay.nn.dense(simple_net, dense_weight)
    simple_net = relay.Function(relay.analysis.free_vars(simple_net), simple_net)
    data_shape = (batch_size, 3, img_size, img_size)
    net, params = testing.create_workload(simple_net)
    return net, params, data_shape
Ejemplo n.º 29
0
dense1_weight = relay.var("dense1_weight")
dense2_weight = relay.var("dense2_weight")

simple_net = relay.nn.conv2d(data=data, weight=conv1_weight, kernel_size=(5,5), channels=20, strides=(1,1),padding=(0, 0))
simple_net = relay.nn.max_pool2d(simple_net,pool_size=(2, 2),strides=(2, 2),padding=(0, 0))
simple_net = relay.nn.conv2d(simple_net, weight=conv2_weight, kernel_size=(5,5), channels=50, strides=(1,1),padding=(0, 0))
simple_net = relay.nn.max_pool2d(simple_net,pool_size=(2, 2),strides=(2, 2),padding=(0, 0))
simple_net = relay.nn.batch_flatten(simple_net)
simple_net = relay.nn.dense(simple_net, dense1_weight,units=500)
simple_net = relay.nn.relu(simple_net)
simple_net = relay.nn.dense(simple_net, dense2_weight,units=10)
simple_net = relay.nn.softmax(simple_net,1)

node = relay.analysis.free_vars(simple_net)
simple_net = relay.Function(node, simple_net)
net, params = testing.create_workload(simple_net)
opt_level = 0
target = tvm.target.cuda()
with relay.build_config(opt_level=opt_level):
    graph, lib, params = relay.build_module.build(net, target, params=params)
ctx = tvm.gpu()

images = topi.image.load_test_images()
labels = topi.image.load_test_labels()
data = images[0:batch_size]
predict_out = labels[0:batch_size]

paramsName_pair = relay.build_module.getParamsNamePair()

conv1_weight_params = getLenetParams(4,"p0_params.txt",20,1,5,5)
conv2_weight_params = getLenetParams(4,"p1_params.txt",50,20,5,5)
Ejemplo n.º 30
0
    def test_simple_network(self):
        data = relay.var("data", relay.TensorType((-1, 3, 224, 224),
                                                  "float32"))
        weight = relay.var("weight")
        bn_gamma = relay.var("bn_gamma")
        bn_beta = relay.var("bn_beta")
        bn_mmean = relay.var("bn_mean")
        bn_mvar = relay.var("bn_var")

        simple_net = relay.nn.pad(data, ((0, 0), (0, 0), (1, 1), (1, 1)))
        simple_net = relay.nn.conv2d(data=simple_net,
                                     weight=weight,
                                     kernel_size=(3, 3),
                                     channels=16,
                                     padding=(0, 0))
        simple_net = relay.nn.batch_norm(simple_net, bn_gamma, bn_beta,
                                         bn_mmean, bn_mvar)[0]
        simple_net = relay.nn.relu(simple_net)
        simple_net = relay.op.reduce.mean(simple_net, axis=(2, 3))
        simple_net = relay.op.transform.squeeze(simple_net)

        dense_weight = relay.var("dense_weight")
        dense_bias = relay.var('dense_bias')
        simple_net = relay.nn.dense(simple_net, weight=dense_weight, units=10)
        simple_net = relay.nn.bias_add(simple_net, dense_bias, axis=1)

        simple_net = relay.nn.softmax(simple_net, axis=1)
        simple_net = relay.op.transform.reshape(simple_net, newshape=(-1, 10))

        simple_net = relay.Function(relay.analysis.free_vars(simple_net),
                                    simple_net)

        mod, params = testing.create_workload(simple_net)

        json_file = os.path.join(FILE_DIR, "relay_mod_test.json")
        with open(json_file, 'w') as f:
            json.dump(tvm.ir.save_json(mod), f)

        params_file = os.path.join(FILE_DIR, "relay_params_test.params")
        with open(params_file, "wb") as fo:
            fo.write(relay.save_param_dict(params))

        mod_read, params_read = load_model_from_file('Relay', 'Relay')(
            model_path=json_file,
            shapes={
                'data': [-1, 3, 224, 224]
            },
            opt_model_path=params_file)

        xgraph = xf_relay.from_relay(mod_read, params_read)

        layers = xgraph.get_layers()

        assert layers[0].type[0] == 'Input'
        assert layers[1].type[0] == 'Pad'
        assert layers[2].type[0] == 'Convolution'
        assert layers[3].type[0] == 'BatchNorm'
        assert layers[4].type[0] == 'ReLU'
        assert layers[5].type[0] == 'Mean'
        assert layers[6].type[0] == 'Squeeze'
        assert layers[7].type[0] == 'Dense'
        assert layers[8].type[0] == 'BiasAdd'
        assert layers[9].type[0] == 'Softmax'
        assert layers[10].type[0] == 'Reshape'

        os.remove(json_file)
        os.remove(params_file)