Exemple #1
0
    def test_repeat(self):
        c = relay.expr.const(np.ones((2, 2), dtype=np.float32))
        net = relay.repeat(c, repeats=2, axis=0)
        net = relay.Function([], net)
        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xgraph = xf_relay.from_relay(mod, {})
        layers = xgraph.get_layers()

        assert layers[0].type[0] == "Constant"
        assert layers[1].type[0] == "AnyOp"
        assert layers[1].shapes == [8]

        c = relay.expr.const(np.ones((2, 2), dtype=np.float32))
        net = relay.repeat(c, repeats=2, axis=1)
        net = relay.Function([], net)
        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xgraph = xf_relay.from_relay(mod, {})
        layers = xgraph.get_layers()

        assert layers[0].type[0] == "Constant"
        assert layers[1].type[0] == "AnyOp"
        assert layers[1].shapes == [2, 4]
Exemple #2
0
    def test_slice_like(self):
        data = relay.expr.const(np.ones((1, 6, 4, 4), np.float32))
        sl = relay.expr.const(np.ones((1, 4, 3, 3), np.float32))
        net = relay.slice_like(data, sl)
        net = relay.Function([], net)
        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xgraph = xf_relay.from_relay(mod, {})
        layers = xgraph.get_layers()

        assert layers[0].type[0] == "Constant"
        assert layers[1].type[0] == "Constant"
        assert layers[2].type[0] == "AnyOp"
        assert layers[2].shapes == [1, 4, 3, 3]

        data = relay.expr.const(np.ones((1, 6, 4, 4), np.float32))
        sl = relay.expr.const(np.ones((1, 4, 3, 3), np.float32))
        net = relay.slice_like(data, sl, axes=(2, 3))
        net = relay.Function([], net)
        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xgraph = xf_relay.from_relay(mod, {})
        layers = xgraph.get_layers()

        assert layers[0].type[0] == "Constant"
        assert layers[1].type[0] == "Constant"
        assert layers[2].type[0] == "AnyOp"
        assert layers[2].shapes == [1, 6, 3, 3]
Exemple #3
0
    def test_relay_op(self):
        data = relay.var("data", relay.TensorType((-1, 4, 2, 2), "float32"))

        net = relay.std(data, axis=1, keepdims=False, exclude=False)

        net = relay.Function(relay.analysis.free_vars(net), net)

        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xgraph = xf_relay.from_relay(mod, {})

        layers = xgraph.get_layers()

        assert layers[0].type[0] == "Input"

        assert layers[1].type[0] == "Mean"
        assert layers[1].shapes == [-1, 1, 2, 2]
        # assert isinstance(layers[1].attrs['relay_id'], list)
        assert layers[1].attrs["axes"] == [1]
        assert layers[1].attrs["keepdims"] is True

        assert layers[2].type[0] == "RelayOp"
        assert layers[2].shapes == [-1, 2, 2]
        # assert isinstance(layers[2].attrs['relay_id'], list)
        assert layers[2].attrs["relay_shape"] == [-1, 2, 2]
        assert layers[2].attrs["dtype"] == "float32"
        assert layers[2].attrs["axis"] == "[1]"
        assert layers[2].attrs["keepdims"] == "0"
        assert layers[2].attrs["exclude"] == "0"

        assert layers[3].type[0] == "Sqrt"
        assert layers[3].shapes == [-1, 2, 2]
    def test_relay_op(self):
        data = relay.var("data", relay.TensorType((-1, 4, 2, 2), "float32"))

        net = relay.std(data, axis=1, keepdims=False, exclude=False)

        net = relay.Function(relay.analysis.free_vars(net), net)

        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xgraph = xf_relay.from_relay(mod, {})

        layers = xgraph.get_layers()

        assert layers[0].type[0] == 'Input'

        assert layers[1].type[0] == 'Mean'
        assert layers[1].shapes == [-1, 1, 2, 2]
        # assert isinstance(layers[1].attrs['relay_id'], list)
        assert layers[1].attrs['axes'] == [1]
        assert layers[1].attrs['keepdims'] is True

        assert layers[2].type[0] == 'RelayOp'
        assert layers[2].shapes == [-1, 2, 2]
        # assert isinstance(layers[2].attrs['relay_id'], list)
        assert layers[2].attrs['relay_shape'] == [-1, 2, 2]
        assert layers[2].attrs['dtype'] == 'float32'
        assert layers[2].attrs['axis'] == '[1]'
        assert layers[2].attrs['keepdims'] == '0'
        assert layers[2].attrs['exclude'] == '0'

        assert layers[3].type[0] == 'Sqrt'
        assert layers[3].shapes == [-1, 2, 2]
Exemple #5
0
    def test_split_tuple(self):
        data = relay.var("data", relay.TensorType((-1, 5, 4, 4), "float32"))

        net = relay.split(data, indices_or_sections=(1, 4), axis=1)\
                   .astuple()

        net = relay.Function([data], net)

        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xgraph = xf_relay.from_relay(mod, {})

        layers = xgraph.get_layers()

        assert layers[0].type[0] == 'Input'
        assert layers[1].type[0] == 'Split'
        assert 'relay_id' in layers[1].attrs
        assert layers[1].attrs['axis'] == 1
        assert layers[1].attrs['indices'] == (1, 4)
        assert layers[1].shapes == TupleShape([
            TensorShape([-1, 1, 4, 4]),
            TensorShape([-1, 3, 4, 4]),
            TensorShape([-1, 1, 4, 4])
        ])
Exemple #6
0
    def test_resnet_block(self):
        data = relay.var("data", relay.TensorType((-1, 3, 224, 224),
                                                  "float32"))
        weight = relay.var("weight")
        bn_gamma = relay.var("bn_gamma")
        bn_beta = relay.var("bn_beta")
        bn_mmean = relay.var("bn_mean")
        bn_mvar = relay.var("bn_var")

        conv2d0_expr = relay.nn.conv2d(data=data,
                                       weight=weight,
                                       kernel_size=(3, 3),
                                       channels=16,
                                       padding=(1, 1))
        bn0_expr = relay.nn.batch_norm(conv2d0_expr, bn_gamma, bn_beta,
                                       bn_mmean, bn_mvar)[0]
        relu0_expr = relay.nn.relu(bn0_expr)

        max_pool0_expr = relay.nn.max_pool2d(relu0_expr,
                                             pool_size=(2, 2),
                                             strides=(2, 2))

        conv2d1_weight = relay.var("conv2d1_weight")
        conv2d1_bias = relay.var("conv2d1_bias")
        conv2d1_expr = relay.nn.conv2d(data=max_pool0_expr,
                                       weight=conv2d1_weight,
                                       kernel_size=(3, 3),
                                       channels=16,
                                       padding=(1, 1))
        bias_add0_expr = relay.nn.bias_add(conv2d1_expr, conv2d1_bias, axis=1)
        relu1_expr = relay.nn.relu(bias_add0_expr)
        add0_expr = relay.op.tensor.add(max_pool0_expr, relu1_expr)

        avg_pool0_expr = relay.nn.avg_pool2d(add0_expr,
                                             pool_size=(2, 2),
                                             strides=(2, 2))
        global_avg_pool0_expr = relay.op.nn.global_avg_pool2d(avg_pool0_expr)
        bf_expr = relay.nn.batch_flatten(global_avg_pool0_expr)

        net = avg_pool0_expr

        net = relay.Function(relay.analysis.free_vars(net), net)

        mod, params = testing.create_workload(net)

        xgraph = xf_relay.from_relay(mod, params)

        layers = xgraph.get_layers()

        assert (layers[0].type[0] == 'Input')
        assert (layers[1].type[0] == 'Convolution')
        assert (layers[2].type[0] == 'BatchNorm')
        assert (layers[3].type[0] == 'ReLU')
        assert (layers[4].type[0] == 'Pooling')
        assert (layers[5].type[0] == 'Convolution')
        assert (layers[6].type[0] == 'BiasAdd')
        assert (layers[7].type[0] == 'ReLU')
        assert (layers[8].type[0] == 'Eltwise')
        assert (layers[9].type[0] == 'Pooling')
        assert (layers[9].shapes == [-1, 16, 56, 56])
Exemple #7
0
    def test_add(self):
        left = relay.var("left", relay.TensorType((-1, 4, 2, 2), "float32"))

        right = relay.expr.const(np.zeros((2, 2), dtype=np.float32))

        net = relay.add(left, right)

        net = relay.Function([left], net)

        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xgraph = xf_relay.from_relay(mod, {})

        layers = xgraph.get_layers()

        assert layers[0].type[0] == 'Input'
        assert 'relay_id' in layers[0].attrs

        assert layers[1].type[0] == 'Constant'
        assert layers[1].tops[0][:3] == 'add'
        assert 'relay_id' in layers[1].attrs

        assert layers[2].type[0] == 'Add'
        assert layers[2].shapes == [-1, 4, 2, 2]
        assert 'relay_id' in layers[2].attrs
Exemple #8
0
    def test_multiply(self):
        left = relay.var("left", relay.TensorType((-1, 4, 2, 2), "float32"))

        right = relay.var("right", relay.TensorType((-1, 4, 2, 2), "float32"))

        net = relay.multiply(left, right)

        net = relay.Function([left, right], net)

        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xgraph = xf_relay.from_relay(mod, {})

        layers = xgraph.get_layers()

        assert layers[0].type[0] == 'Input'
        assert 'relay_id' in layers[0].attrs

        assert layers[1].type[0] == 'Input'
        assert 'relay_id' in layers[1].attrs

        assert layers[2].type[0] == 'Multiply'
        assert layers[2].shapes == [-1, 4, 2, 2]
        assert 'relay_id' in layers[1].attrs
    def test_batch_norm(self):
        var = relay.var("var", relay.TensorType((-1, 4, 2, 2), "float32"))
        data_mean = relay.expr.const(np.zeros((4, ), dtype=np.float32))
        data_var = relay.expr.const(np.ones((4, ), dtype=np.float32))
        gamma = relay.expr.const(2. * np.ones((4, ), dtype=np.float32))
        beta = relay.expr.const(3. * np.ones((4, ), dtype=np.float32))

        bn = relay.nn.batch_norm(var, gamma, beta, data_mean, data_var)[0]
        # tgi = relay.TupleGetItem(bn, 0)
        func = relay.Function([var], bn)
        mod = tvm.IRModule.from_expr(func)
        mod = relay.transform.InferType()(mod)

        xg = xf_relay.from_relay(mod, {})
        layers = xg.get_layers()

        assert len(layers) == 2

        assert layers[0].type[0] == 'Input'
        assert 'relay_id' in layers[0].attrs

        bnl = layers[1]
        assert bnl.type[0] == 'BatchNorm'
        assert bnl.shapes == [-1, 4, 2, 2]
        np.testing.assert_array_equal(bnl.data[0],
                                      np.zeros((4, ), dtype=np.float32))
        np.testing.assert_array_equal(bnl.data[1],
                                      np.ones((4, ), dtype=np.float32))
        np.testing.assert_array_equal(bnl.data[2], 2. * np.ones(
            (4, ), dtype=np.float32))
        np.testing.assert_array_equal(bnl.data[3], 3. * np.ones(
            (4, ), dtype=np.float32))
        assert 'relay_id' in bnl.attrs
        assert bnl.attrs['axis'] == 1
Exemple #10
0
    def test_tuple_get_item(self):
        var1 = relay.var("var1", relay.TensorType((-1, 4, 2, 2), "int64"))
        var2 = relay.var("var2", relay.TensorType((-1, 3, 2, 2), "int64"))

        t = relay.Tuple([var1, var2])
        tgi = relay.TupleGetItem(t, 0)
        net = relay.Function([var1, var2], tgi)

        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xg = xf_relay.from_relay(mod, {})
        layers = xg.get_layers()

        assert len(layers) == 4

        assert layers[0].type[0] == "Input"
        assert isinstance(layers[0].attrs["dtype"], str)
        assert layers[0].attrs["dtype"] == "int64"
        assert "relay_id" in layers[0].attrs

        assert layers[1].type[0] == "Input"
        assert isinstance(layers[0].attrs["dtype"], str)
        assert layers[0].attrs["dtype"] == "int64"
        assert "relay_id" in layers[0].attrs

        assert layers[2].type[0] == "Tuple"
        assert layers[2].shapes == TupleShape([[-1, 4, 2, 2], [-1, 3, 2, 2]])

        assert layers[3].type[0] == "TupleGetItem"
        assert layers[3].attrs["index"] == 0
        assert layers[3].shapes == TensorShape([-1, 4, 2, 2])
Exemple #11
0
    def test_arange_full_and_reshape(self):
        start = relay.expr.const(0.0)
        stop = relay.expr.const(10.0)
        step = relay.expr.const(1.0)

        fill_val = relay.expr.const(1.0)
        fill_shape = [10, 1]
        dtype = "float32"

        left = relay.arange(start, stop, step, dtype)
        left = relay.reshape(left, [-1, 1])
        left = relay.reshape(left, [1, -1])

        right = relay.full(fill_val, fill_shape, dtype)
        right = relay.reshape(right, [1, -1])

        net = relay.multiply(left, right)

        mod = tvm.IRModule.from_expr(net)
        params = {}
        xgraph = xf_relay.from_relay(mod, params)
        layers = xgraph.get_layers()

        assert len(layers) == 10
        assert layers[0].type[0] == "Constant"
        assert layers[3].type[0] == "AnyOp"
        assert layers[7].type[0] == "AnyOp"
        assert layers[5].shapes == [1, 10]
        assert layers[8].shapes == [1, 10]
    def test_tuple(self):
        var1 = relay.var("var1", relay.TensorType((-1, 4, 2, 2), "int64"))
        var2 = relay.var("var2", relay.TensorType((-1, 3, 2, 2), "int64"))

        t = relay.Tuple([var1, var2])
        net = relay.Function([var1, var2], t)

        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xg = xf_relay.from_relay(mod, {})
        layers = xg.get_layers()

        assert layers[0].type[0] == 'Input'
        assert isinstance(layers[0].attrs['dtype'], str)
        assert layers[0].attrs['dtype'] == 'int64'
        assert 'relay_id' in layers[0].attrs

        assert layers[1].type[0] == 'Input'
        assert isinstance(layers[0].attrs['dtype'], str)
        assert layers[0].attrs['dtype'] == 'int64'
        assert 'relay_id' in layers[0].attrs

        assert layers[2].type[0] == 'Tuple'
        assert layers[2].shapes == TupleShape([[-1, 4, 2, 2], [-1, 3, 2, 2]])
Exemple #13
0
    def test_avg_pool2d(self):
        var = relay.var("var", relay.TensorType((-1, 2, 5, 5), "float32"))
        avg_pool = relay.nn.avg_pool2d(var,
                                       pool_size=(3, 3),
                                       strides=(2, 2),
                                       padding=(1, 1),
                                       ceil_mode=True,
                                       count_include_pad=True)

        func = relay.Function([var], avg_pool)
        mod = tvm.IRModule.from_expr(func)
        mod = relay.transform.InferType()(mod)

        xg = xf_relay.from_relay(mod, {})
        layers = xg.get_layers()

        assert len(layers) == 2

        assert layers[0].type[0] == 'Input'
        assert 'relay_id' in layers[0].attrs

        X = layers[1]
        assert X.type[0] == 'Pooling'
        assert X.shapes == [-1, 2, 3, 3]
        assert 'relay_id' in X.attrs
        assert X.attrs['padding'] == [[0, 0], [0, 0], [1, 1], [1, 1]]
        assert X.attrs['insize'] == [5, 5]
        assert X.attrs['outsize'] == [3, 3]
        assert X.attrs['data_layout'] == 'NCHW'
        assert X.attrs['strides'] == [2, 2]
        assert X.attrs['kernel_size'] == [3, 3]
        assert X.attrs['pool_type'] == 'Avg'
Exemple #14
0
    def test_conv2d(self):
        data = relay.var("data", relay.TensorType((-1, 1, 4, 4), "float32"))
        weight = relay.expr.const(np.ones((2, 1, 2, 2), dtype=np.float32))
        c = relay.nn.conv2d(data,
                            weight,
                            padding=(0, 0, 0, 0),
                            kernel_layout='OIHW')

        func = relay.Function([data], c)
        mod = tvm.IRModule.from_expr(func)
        mod = relay.transform.InferType()(mod)

        xg = xf_relay.from_relay(mod, {})
        layers = xg.get_layers()

        assert len(layers) == 2

        assert layers[0].type[0] == 'Input'
        assert 'relay_id' in layers[0].attrs

        X = layers[1]
        assert X.type[0] == 'Convolution'
        assert X.shapes == [-1, 2, 3, 3]
        np.testing.assert_array_equal(X.data[0],
                                      np.ones((2, 1, 2, 2), dtype=np.float32))
        assert 'relay_id' in X.attrs
        assert X.attrs['kernel_size'] == [2, 2]
        assert X.attrs['strides'] == [1, 1]
        assert X.attrs['padding'] == [[0, 0], [0, 0], [0, 0], [0, 0]]
        assert X.attrs['channels'] == [1, 2]
        assert X.attrs['data_layout'] == 'NCHW'
        assert X.attrs['kernel_layout'] == 'OIHW'
        assert X.attrs['groups'] == 1
Exemple #15
0
    def test_take(self):
        data = relay.var("data", relay.TensorType((-1, 3, 224, 224),
                                                  "float32"))

        indices = relay.var("indices", relay.TensorType([], "int32"))

        net = relay.take(data, indices, axis=1)

        net = relay.Function(relay.analysis.free_vars(net), net)

        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xgraph = xf_relay.from_relay(mod, {"indices": np.array(0, np.int32)})

        layers = xgraph.get_layers()

        assert layers[0].type[0] == "Input"
        assert layers[1].type[0] == "Constant"
        assert layers[1].data == np.array(0, np.int32)
        assert layers[2].type[0] == "Take"
        assert "relay_id" in layers[2].attrs
        assert layers[2].attrs["axis"] == 1
        assert layers[2].attrs["mode"] == "clip"
        assert layers[2].shapes == [-1, 224, 224]
Exemple #16
0
    def test_nn_upsampling(self):
        data = relay.var("data", relay.TensorType((-1, 4, 2, 2), "float32"))

        net = relay.nn.upsampling(data, scale_h=3, scale_w=2)

        net = relay.Function([data], net)

        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        params = {}

        xgraph = xf_relay.from_relay(mod, params)

        layers = xgraph.get_layers()

        assert layers[0].type[0] == 'Input'
        assert layers[1].type[0] == 'Upsampling2D'
        assert 'relay_id' in layers[1].attrs
        assert layers[1].shapes == [-1, 4, 6, 4]
        assert layers[1].attrs['scale_h'] == 3
        assert layers[1].attrs['scale_w'] == 2
        assert layers[1].attrs['data_layout'] == 'NCHW'
        assert layers[1].attrs['method'] == 'nearest_neighbor'
        assert layers[1].attrs['align_corners'] is False
Exemple #17
0
    def test_max_pool2d(self):
        var = relay.var("var", relay.TensorType((-1, 2, 4, 4), "float32"))
        avg_pool = relay.nn.max_pool2d(var,
                                       pool_size=(2, 2),
                                       strides=(2, 2),
                                       padding=(1, 1))

        func = relay.Function([var], avg_pool)
        mod = tvm.IRModule.from_expr(func)
        mod = relay.transform.InferType()(mod)

        xg = xf_relay.from_relay(mod, {})
        layers = xg.get_layers()

        assert len(layers) == 2

        assert layers[0].type[0] == "Input"
        assert "relay_id" in layers[0].attrs

        X = layers[1]
        assert X.type[0] == "Pooling"
        assert X.bottoms == ["var"]
        assert X.shapes == [-1, 2, 3, 3]
        assert "relay_id" in X.attrs
        assert X.attrs["padding"] == [[0, 0], [0, 0], [1, 1], [1, 1]]
        assert X.attrs["insize"] == [4, 4]
        assert X.attrs["outsize"] == [3, 3]
        assert X.attrs["data_layout"] == "NCHW"
        assert X.attrs["strides"] == [2, 2]
        assert X.attrs["kernel_size"] == [2, 2]
        assert X.attrs["pool_type"] == "Max"
Exemple #18
0
    def test_nn_upsampling(self):
        data = relay.var("data", relay.TensorType((-1, 4, 2, 2), "float32"))

        net = relay.nn.upsampling(data, scale_h=3, scale_w=2)

        net = relay.Function([data], net)

        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        params = {}

        xgraph = xf_relay.from_relay(mod, params)

        layers = xgraph.get_layers()

        assert layers[0].type[0] == "Input"
        assert layers[1].type[0] == "Upsampling2D"
        assert "relay_id" in layers[1].attrs
        assert layers[1].shapes == [-1, 4, 6, 4]
        assert layers[1].attrs["scale_h"] == 3
        assert layers[1].attrs["scale_w"] == 2
        assert layers[1].attrs["data_layout"] == "NCHW"
        assert layers[1].attrs["method"] == "nearest_neighbor"
        assert layers[1].attrs["align_corners"] is False
Exemple #19
0
    def test_conv2d_transpose(self):
        data = relay.var("data", relay.TensorType((-1, 1, 3, 3), "float32"))
        weight = relay.var("weight")

        simple_net = relay.nn.conv2d_transpose(
            data=data,
            weight=weight,
            kernel_size=(2, 2),
            channels=1,
            padding=(0, 0),
            strides=(2, 2),
            data_layout="NCHW",
            kernel_layout="IOHW",
        )

        simple_net = relay.Function(relay.analysis.free_vars(simple_net),
                                    simple_net)

        mod, params = testing.create_workload(simple_net)

        xgraph = xf_relay.from_relay(mod, params)

        layers = xgraph.get_layers()

        assert layers[0].type[0] == "Input"
        assert layers[0].shapes == [-1, 1, 3, 3]

        assert layers[1].type[0] == "Conv2DTranspose"
        assert layers[1].shapes == [-1, 1, 6, 6]
        assert layers[1].sizes == [36]
        assert layers[1].attrs["padding"] == [[0, 0], [0, 0], [0, 0], [0, 0]]
        assert layers[1].attrs["strides"] == [2, 2]
        assert layers[1].attrs["dilation"] == [1, 1]
Exemple #20
0
    def test_simple_network(self):
        data = relay.var(
            "data",
            relay.TensorType((-1, 3, 224, 224), "float32")
        )
        weight = relay.var("weight")
        bn_gamma = relay.var("bn_gamma")
        bn_beta = relay.var("bn_beta")
        bn_mmean = relay.var("bn_mean")
        bn_mvar = relay.var("bn_var")

        simple_net = relay.nn.pad(data, ((0, 0), (0, 0), (1, 1), (1, 1)))
        simple_net = relay.nn.conv2d(
            data=simple_net,
            weight=weight,
            kernel_size=(3, 3),
            channels=16,
            padding=(0, 0)
        )
        simple_net = relay.nn.batch_norm(
            simple_net,
            bn_gamma,
            bn_beta,
            bn_mmean,
            bn_mvar
        )[0]
        simple_net = relay.nn.relu(simple_net)
        simple_net = relay.op.reduce.mean(simple_net, axis=(2, 3))
        simple_net = relay.op.transform.squeeze(simple_net)

        dense_weight = relay.var("dense_weight")
        dense_bias = relay.var('dense_bias')
        simple_net = relay.nn.dense(simple_net, weight=dense_weight, units=10)
        simple_net = relay.nn.bias_add(simple_net, dense_bias, axis=1)

        simple_net = relay.nn.softmax(simple_net, axis=1)
        simple_net = relay.op.transform.reshape(simple_net, newshape=(-1, 10))

        simple_net = relay.Function(
            relay.analysis.free_vars(simple_net),
            simple_net
        )

        mod, params = testing.create_workload(simple_net)

        xgraph = xf_relay.from_relay(mod, params)

        layers = xgraph.get_layers()

        assert(layers[0].type[0] == 'Input')
        assert(layers[1].type[0] == 'Pad')
        assert(layers[2].type[0] == 'Convolution')
        assert(layers[3].type[0] == 'BatchNorm')
        assert(layers[4].type[0] == 'ReLU')
        assert(layers[5].type[0] == 'Mean')
        assert(layers[6].type[0] == 'Squeeze')
        assert(layers[7].type[0] == 'Dense')
        assert(layers[8].type[0] == 'BiasAdd')
        assert(layers[9].type[0] == 'Softmax')
        assert(layers[10].type[0] == 'Reshape')
Exemple #21
0
    def test_nn_adaptive_avg_pool2d_3(self):
        warnings.filterwarnings("ignore")
        data = relay.var("data", relay.TensorType((-1, 6, 6, 4), "float32"))

        net = relay.nn.adaptive_avg_pool2d(data,
                                           output_size=(6, 6),
                                           layout="NHWC")

        net = relay.Function(relay.analysis.free_vars(net), net)

        mod, params = testing.create_workload(net)

        xgraph = xf_relay.from_relay(mod, params)

        layers = xgraph.get_layers()

        assert layers[0].type[0] == "Input"
        assert layers[0].shapes.tolist() == [-1, 6, 6, 4]
        assert layers[1].type[0] == "Transpose"
        assert layers[1].shapes.tolist() == [-1, 4, 6, 6]
        assert layers[2].type[0] == "Pooling"
        assert layers[2].shapes.tolist() == [-1, 4, 6, 6]
        assert layers[2].attrs["padding"] == [[0, 0], [0, 0], [0, 0], [0, 0]]
        assert layers[2].attrs["insize"] == [6, 6]
        assert layers[2].attrs["outsize"] == [6, 6]
        assert layers[2].attrs["data_layout"] == "NCHW"
        assert layers[2].attrs["strides"] == [1, 1]
        assert layers[2].attrs["kernel_size"] == [1, 1]
        assert layers[2].attrs["pool_type"] == "Avg"
    def test_concatenate(self):
        var1 = relay.var("data1", relay.TensorType((-1, 4, 2, 2), "float32"))
        var2 = relay.var("data2", relay.TensorType((-1, 8, 2, 2), "float32"))
        c = relay.concatenate([var1, var2], axis=1)

        net = relay.Function([var1, var2], c)
        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xg = xf_relay.from_relay(mod, {})
        layers = xg.get_layers()

        assert len(layers) == 3

        assert layers[0].type[0] == 'Input'
        assert layers[0].shapes == [-1, 4, 2, 2]
        assert 'relay_id' in layers[0].attrs

        assert layers[1].type[0] == 'Input'
        assert layers[1].shapes == [-1, 8, 2, 2]
        assert 'relay_id' in layers[1].attrs

        assert layers[2].type[0] == 'Concat'
        assert layers[2].shapes == [-1, 12, 2, 2]
        assert layers[2].bottoms == ['data1', 'data2']
        assert 'relay_id' in layers[2].attrs
        assert layers[2].attrs['axis'] == 1
Exemple #23
0
    def test_arange(self):
        start = relay.expr.const(1.0)
        stop = relay.expr.const(5.0)
        interval = relay.expr.const(1.5)
        a = relay.arange(start, stop, interval)
        net = relay.Function([], a)
        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xgraph = xf_relay.from_relay(mod, {})
        layers = xgraph.get_layers()

        assert len(layers) == 4

        assert layers[0].type[0] == "Constant"
        assert layers[0].shapes == [1]

        assert layers[1].type[0] == "Constant"
        assert layers[1].shapes == [1]

        assert layers[2].type[0] == "Constant"
        assert layers[2].shapes == [1]

        assert layers[3].type[0] == "AnyOp"
        assert layers[3].shapes == [3]
    def test_nn_adaptive_avg_pool2d_3(self):
        warnings.filterwarnings("ignore")
        data = relay.var("data", relay.TensorType((-1, 6, 6, 4), "float32"))

        net = relay.nn.adaptive_avg_pool2d(data,
                                           output_size=(6, 6),
                                           layout='NHWC')

        net = relay.Function(relay.analysis.free_vars(net), net)

        mod, params = testing.create_workload(net)

        xgraph = xf_relay.from_relay(mod, params)

        layers = xgraph.get_layers()

        assert layers[0].type[0] == 'Input'
        assert layers[0].shapes.tolist() == [-1, 6, 6, 4]
        assert layers[1].type[0] == 'Transpose'
        assert layers[1].shapes.tolist() == [-1, 4, 6, 6]
        assert layers[2].type[0] == 'Pooling'
        assert layers[2].shapes.tolist() == [-1, 4, 6, 6]
        assert layers[2].attrs['padding'] == [[0, 0], [0, 0], [0, 0], [0, 0]]
        assert layers[2].attrs['insize'] == [6, 6]
        assert layers[2].attrs['outsize'] == [6, 6]
        assert layers[2].attrs['data_layout'] == 'NCHW'
        assert layers[2].attrs['strides'] == [1, 1]
        assert layers[2].attrs['kernel_size'] == [1, 1]
        assert layers[2].attrs['pool_type'] == 'Avg'
Exemple #25
0
    def test_global_max_pool2d(self):
        var = relay.var("var", relay.TensorType((-1, 2, 5, 5), "float32"))
        avg_pool = relay.nn.global_max_pool2d(var)

        func = relay.Function([var], avg_pool)
        mod = tvm.IRModule.from_expr(func)
        mod = relay.transform.InferType()(mod)

        xg = xf_relay.from_relay(mod, {})
        layers = xg.get_layers()

        assert len(layers) == 2

        assert layers[0].type[0] == 'Input'
        assert 'relay_id' in layers[0].attrs

        X = layers[1]
        assert X.type[0] == 'Pooling'
        assert X.bottoms == ['var']
        assert X.shapes == [-1, 2, 1, 1]
        assert 'relay_id' in X.attrs
        assert X.attrs['padding'] == [[0, 0], [0, 0], [0, 0], [0, 0]]
        assert X.attrs['insize'] == [5, 5]
        assert X.attrs['outsize'] == [1, 1]
        assert X.attrs['data_layout'] == 'NCHW'
        assert X.attrs['strides'] == [1, 1]
        assert X.attrs['kernel_size'] == [5, 5]
        assert X.attrs['pool_type'] == 'Max'
Exemple #26
0
    def test_yolo_reorg(self):
        data = relay.var("data", relay.TensorType((-1, 4, 2, 2), "float32"))

        net = relay.vision.yolo_reorg(data, stride=2)
        net = relay.Function(relay.analysis.free_vars(net), net)
        mod, params = testing.create_workload(net)

        xgraph = xf_relay.from_relay(mod, params)
        layers = xgraph.get_layers()

        assert layers[0].type[0] == 'Input'
        assert layers[1].type[0] == 'YoloReorg'
    def test_mean(self):
        data = relay.var("data", relay.TensorType((-1, 4, 1, 1), "float32"))
        m = relay.mean(data, axis=1)
        net = relay.Function([data], m)
        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xgraph = xf_relay.from_relay(mod, {})
        layers = xgraph.get_layers()
        assert layers[0].type[0] == "Input"
        assert layers[1].type[0] == "Mean"
        assert layers[1].shapes == [-1, 1, 1]
Exemple #28
0
    def test_simple_network(self):
        data = relay.var(
            "data",
            relay.TensorType((-1, 1, 4, 4), "float32")
        )
        weight = relay.var("weight")

        # simple_net = relay.nn.pad(data, ((0, 0), (0, 0), (1, 1), (1, 1)))
        simple_net = relay.nn.conv2d(
            data=data,
            weight=weight,
            kernel_size=(2, 2),
            channels=2,
            padding=(0, 0)
        )

        simple_net = relay.Function(
            relay.analysis.free_vars(simple_net),
            simple_net
        )

        mod, params = testing.create_workload(simple_net)

        weight = np.reshape(np.array([[[1, 2], [3, 0]], [[1, 1], [0, 1]]],
                                     dtype=np.float32),
                            (2, 1, 2, 2))

        xgraph = xf_relay.from_relay(mod, {'weight': weight})

        layers = xgraph.get_layers()

        inputs = {
            'data': np.reshape(np.array([
                [10, 10, 0, 40],
                [50, 10, 0, 80],
                [30, 50, 10, 0],
                [10, 90, 30, 40]]), (1, 1, 4, 4))
        }
        res = run._run_network_cpu(xgraph, inputs)
        # print(res[0])

        expected_outpt = np.array([[
            [[180., 40., 80.],
             [160., 160., 190.],
             [160., 340., 100.]],

            [[30., 10., 120.],
             [110., 20., 80.],
             [170., 90., 50.]]
        ]])

        np.testing.assert_array_equal(res[0], expected_outpt)
    def test_strided_slice(self):
        c = relay.expr.const(np.ones((2, 3, 4), np.float32))
        m = relay.strided_slice(c, (0, 0, 1), (2, 3, 4), (1, 1, 1))
        net = relay.Function([], m)
        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xgraph = xf_relay.from_relay(mod, {})
        layers = xgraph.get_layers()

        assert layers[0].type[0] == "Constant"
        assert layers[1].type[0] == "StridedSlice"
        assert layers[1].shapes == [2, 3, 3]
Exemple #30
0
    def test_ones_like(self):
        c = relay.expr.const(np.ones((1, 6, 4, 4), np.float32))
        net = relay.ones_like(c)
        net = relay.Function([], net)
        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xgraph = xf_relay.from_relay(mod, {})
        layers = xgraph.get_layers()

        assert layers[0].type[0] == "Constant"
        assert layers[1].type[0] == "AnyOp"
        assert layers[1].shapes == [1, 6, 4, 4]