Ejemplo n.º 1
0
def test_fold_batch_norm():
    def expected():
        data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
        weight = relay.const(np.zeros((16, 3, 3, 3)))
        bias = relay.const(np.zeros((16, 1, 1)))
        conv = relay.nn.conv2d(
            data=data, weight=weight, kernel_size=(3, 3), channels=16, padding=(1, 1)
        )
        add = relay.add(conv, bias)
        return relay.Function(relay.analysis.free_vars(add), add)

    remove_bn_pass = tvm.transform.Sequential(
        [
            relay.transform.InferType(),
            relay.transform.SimplifyInference(),
            relay.transform.FoldConstant(),
            relay.transform.FoldScaleAxis(),
        ]
    )

    data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
    weight = relay.var("weight")
    bn_gamma = relay.var("bn_gamma")
    bn_beta = relay.var("bn_beta")
    bn_mmean = relay.var("bn_mean")
    bn_mvar = relay.var("bn_var")

    conv = relay.nn.conv2d(
        data=data, weight=weight, kernel_size=(3, 3), channels=16, padding=(1, 1)
    )
    bn_output = relay.nn.batch_norm(conv, bn_gamma, bn_beta, bn_mmean, bn_mvar)

    def initializer(_, param):
        param = np.zeros(param.shape)

    mod, params = create_workload(bn_output[0], initializer)
    mod["main"] = bind_params_by_name(mod["main"], params)

    with tvm.transform.PassContext(opt_level=3):
        mod = remove_bn_pass(mod)

    expect = run_infer_type(expected())
    assert tvm.ir.structural_equal(mod["main"], expect)
Ejemplo n.º 2
0
def test_broadcast_to_like():
    shape = (4, 1, 6)
    shape_like = (3, 4, 5, 6)
    dtype = "float32"
    x = relay.Var("x", relay.ty.TensorType(shape, dtype))
    y = relay.Var("y", relay.ty.TensorType(shape_like, dtype))
    z = relay.broadcast_to_like(x, y)
    zz = run_infer_type(z)
    assert zz.checked_type == relay.ty.TensorType(shape_like, dtype)

    func = relay.Function([x, y], z)
    x = np.random.uniform(size=shape).astype(dtype)
    y = np.random.uniform(size=shape_like).astype(dtype)
    ref_res = np.broadcast_to(x, shape_like)
    for target, ctx in ctx_list():
        for kind in ["graph", "debug"]:
            intrp = relay.create_executor(kind, ctx=ctx, target=target)
            op_res = intrp.evaluate(func)(x, y)
            tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
Ejemplo n.º 3
0
    def verify_affine_grid(num_batch, target_shape):
        dtype = "float32"
        data_shape = (num_batch, 2, 3)
        data = relay.var("data", relay.ty.TensorType(data_shape, dtype))
        y = relay.image.affine_grid(data, target_shape)
        yy = run_infer_type(y)
        assert yy.checked_type == relay.ty.TensorType(
            (num_batch, len(target_shape), *target_shape), dtype
        )

        func = relay.Function([data], y)
        data_np = np.random.uniform(size=data_shape).astype(dtype)
        ref_res = tvm.topi.testing.affine_grid_python(data_np, target_shape)

        for target, ctx in tvm.testing.enabled_targets():
            for kind in ["graph", "debug"]:
                intrp1 = relay.create_executor(kind, ctx=ctx, target=target)
                op_res1 = intrp1.evaluate(func)(data_np)
                tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
Ejemplo n.º 4
0
def test_collapse_sum_like():
    shape = (3, 4, 5, 6)
    shape_like = (4, 5, 6)
    dtype = "float32"
    x = relay.Var("x", relay.ty.TensorType(shape, dtype))
    y = relay.Var("y", relay.ty.TensorType(shape_like, dtype))
    z = relay.collapse_sum_like(x, y)
    zz = run_infer_type(z)
    assert zz.checked_type == relay.ty.TensorType(shape_like, dtype)

    func = relay.Function([x, y], z)
    x = np.random.uniform(size=shape).astype(dtype)
    y = np.random.uniform(size=shape_like).astype(dtype)
    ref_res = np.sum(x, 0)
    for target, dev in tvm.testing.enabled_targets():
        for kind in ["graph", "debug"]:
            op_res = relay.create_executor(kind, device=dev,
                                           target=target).evaluate(func)(x, y)
            tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
    def verify_reshape(shape, newshape):
        x = relay.var("x", relay.TensorType(shape, "float32"))
        y = relay.var("y", relay.TensorType(newshape, "float32"))
        z1 = relay.reshape(x, relay.shape_of(y))
        z2 = relay.reshape(z1, relay.shape_of(x))
        z3 = relay.reshape(z2, relay.shape_of(z1))
        z4 = relay.reshape(z3, relay.shape_of(z2))
        func = run_infer_type(relay.Function([x, y], z4))
        func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())

        zz = func2.body
        assert isinstance(zz, relay.Call)
        assert zz.op == relay.op.get("reshape")
        assert "newshape=" in zz.astext()
        assert zz.checked_type == relay.ty.TensorType(shape, "float32")

        x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
        y_data = np.random.uniform(low=-1, high=1, size=newshape).astype("float32")
        verify_func(func2, [x_data, y_data], x_data)
Ejemplo n.º 6
0
    def verify_reshape(shape, newshape, oshape):
        x = relay.var("x", relay.TensorType(shape, "float32"))
        z = relay.reshape(x, newshape=newshape)
        zz = run_infer_type(z)
        assert "newshape=" in z.astext()
        assert zz.checked_type == relay.ty.TensorType(oshape, "float32")

        func = relay.Function([x], z)
        check_grad(func)
        x_data = np.random.uniform(low=-1, high=1,
                                   size=shape).astype("float32")
        ref_res = np.reshape(x_data, oshape)
        for target, ctx in ctx_list():
            for kind in ["graph", "debug"]:
                intrp = relay.create_executor(kind, ctx=ctx, target=target)
                op_res = intrp.evaluate(func)(x_data)
                tvm.testing.assert_allclose(op_res.asnumpy(),
                                            ref_res,
                                            rtol=1e-5)
Ejemplo n.º 7
0
    def _verify(indices_shape, depth, on_value, off_value, axis, dtype):
        indices = relay.var("indices", relay.TensorType(indices_shape, "int32"))
        on_value_const = relay.const(on_value)
        off_value_const = relay.const(off_value)
        out = relay.one_hot(indices, on_value_const, off_value_const, depth, axis, dtype)
        checked = run_infer_type(out)
        assert checked.checked_type == relay.ty.TensorType(
            _get_oshape(indices_shape, depth, axis), dtype
        )
        func = relay.Function([indices], out)
        indices_np = np.random.randint(0, depth, size=indices_shape).astype("int32")
        out_np = tvm.topi.testing.one_hot(indices_np, on_value, off_value, depth, axis, dtype)

        for target, dev in tvm.testing.enabled_targets():
            for kind in ["graph", "debug"]:
                out_relay = relay.create_executor(kind, device=dev, target=target).evaluate(func)(
                    indices_np
                )
                tvm.testing.assert_allclose(out_relay.numpy(), out_np)
Ejemplo n.º 8
0
    def verify_sparse_to_dense(sparse_indices, sparse_values, default_value,
                               output_shape, xpected):
        sparse_indices_data = np.array(sparse_indices)
        sparse_values_data = np.array(sparse_values)
        default_value_data = np.array(default_value)

        a = relay.var(
            "a",
            relay.TensorType(sparse_indices_data.shape,
                             str(sparse_indices_data.dtype)))
        b = relay.var(
            "b",
            relay.TensorType(sparse_values_data.shape,
                             str(sparse_values_data.dtype)))
        if default_value is None:
            args = [a, b]
            d = relay.sparse_to_dense(a, output_shape, b)
        else:
            c = relay.var(
                "c",
                relay.TensorType(default_value_data.shape,
                                 str(default_value_data.dtype)))
            args = [a, b, c]
            d = relay.sparse_to_dense(a, output_shape, b, c)

        zz = run_infer_type(d)
        assert zz.checked_type == relay.ty.TensorType(
            output_shape, str(sparse_values_data.dtype))

        func = relay.Function(args, d)
        for target, ctx in ctx_list():
            for kind in ["graph", "debug"]:
                intrp = relay.create_executor(kind, ctx=ctx, target=target)
                if default_value is None:
                    op_res = intrp.evaluate(func)(sparse_indices_data,
                                                  sparse_values_data)
                else:
                    op_res = intrp.evaluate(func)(sparse_indices_data,
                                                  sparse_values_data,
                                                  default_value_data)
                tvm.testing.assert_allclose(op_res.asnumpy(),
                                            xpected,
                                            rtol=1e-5)
Ejemplo n.º 9
0
 def verify(dshape, begin, end, strides, output, test_ref=True):
     x = relay.var("x", relay.TensorType(dshape, "float32"))
     z = relay.strided_slice(x, begin=begin, end=end, strides=strides)
     func = relay.Function([x], z)
     func = run_infer_type(func)
     text = func.astext()
     assert "begin=" in text
     assert "end=" in text
     if output:
         assert func.body.checked_type == relay.ty.TensorType(output, "float32")
     if not test_ref:
         return
     x_data = np.random.uniform(size=dshape).astype("float32")
     ref_res = topi.testing.strided_slice_python(
         x_data, begin, end, strides)
     for target, ctx in ctx_list():
         intrp = relay.create_executor("graph", ctx=ctx, target=target)
         op_res = intrp.evaluate(func)(x_data)
         tvm.testing.assert_allclose(op_res.asnumpy(), ref_res)
Ejemplo n.º 10
0
    def _verify(data_shape, mask_value, axis, dtype, itype):
        max_length = data_shape[axis]
        nbatch = data_shape[1 - axis]
        data = relay.var("data", relay.TensorType(data_shape, dtype))
        valid_length = relay.var("valid_length", relay.TensorType((nbatch,), itype))
        out = relay.sequence_mask(data, valid_length, mask_value, axis)
        checked = run_infer_type(out)
        assert checked.checked_type == relay.ty.TensorType(data_shape, dtype)
        func = relay.Function([data, valid_length], out)
        data_np = np.random.uniform(size=data_shape).astype(dtype)
        valid_length_np = np.random.randint(0, max_length, size=nbatch).astype(itype)
        gt_out_np = tvm.topi.testing.sequence_mask(data_np, valid_length_np, mask_value, axis)

        for target, dev in tvm.testing.enabled_targets():
            for kind in ["graph", "debug"]:
                out_relay = relay.create_executor(kind, device=dev, target=target).evaluate(func)(
                    data_np, valid_length_np
                )
                tvm.testing.assert_allclose(out_relay.numpy(), gt_out_np)
Ejemplo n.º 11
0
def test_dropout(executor_kind):
    for dtype in ["float16", "float32"]:
        n, t, d = te.size_var("n"), te.size_var("t"), te.size_var("d")
        input_ty = relay.TensorType((n, t, d), dtype)
        x = relay.var("x", input_ty)
        y = relay.nn.dropout(x, rate=0.75)
        assert "rate=" in y.astext()
        yy = run_infer_type(y)
        assert yy.checked_type == input_ty

    in_np = np.random.random([4, 5, 6]).astype("float32")
    x = relay.const(in_np)
    y = relay.nn.dropout(x, rate=0.5)
    func = relay.Function([], y)
    for target, dev in tvm.testing.enabled_targets():
        op_res = relay.create_executor(executor_kind,
                                       device=dev,
                                       target=target).evaluate(func)()
        tvm.testing.assert_allclose(op_res.numpy(), in_np, rtol=0.01)
Ejemplo n.º 12
0
def test_batch_matmul():
    b, m, n, k = te.size_var("b"), te.size_var("m"), te.size_var("n"), te.size_var("k")
    x = relay.var("x", relay.TensorType((b, m, k), "float32"))
    y = relay.var("y", relay.TensorType((b, n, k), "float32"))
    z = relay.nn.batch_matmul(x, y)
    zz = run_infer_type(z)
    assert zz.checked_type == relay.TensorType((b, m, n), "float32")

    verify_batch_matmul((1, 16, 32), (1, 16, 32), (1, 16, 16), trans_x=False, trans_y=True)
    verify_batch_matmul((5, 16, 32), (5, 16, 32), (5, 16, 16), trans_x=False, trans_y=True)
    verify_batch_matmul((5, 16, 32), (5, 20, 32), (5, 16, 20), trans_x=False, trans_y=True)
    verify_batch_matmul((30, 16, 32), (30, 20, 32), (30, 16, 20), trans_x=False, trans_y=True)
    verify_batch_matmul((1, 32, 16), (1, 16, 32), (1, 16, 16), trans_x=True, trans_y=True)
    verify_batch_matmul((5, 16, 32), (5, 32, 16), (5, 16, 16), trans_x=False, trans_y=False)
    verify_batch_matmul((5, 32, 16), (5, 32, 20), (5, 16, 20), trans_x=True, trans_y=False)

    x_np = np.random.randn(10, 27, 64).astype("float32")
    x = relay.var("x", shape=x_np.shape)
    verify_batch_matmul_with_inputs(x, x, x_np, x_np, (10, 27, 27))
    def verify_broadcast_to(shape, broadcast_shape):
        x = relay.var("x", relay.TensorType(shape, "float32"))
        y = relay.var("y", relay.TensorType(broadcast_shape, "float32"))
        z = relay.broadcast_to(x, shape=relay.shape_of(y))

        func = run_infer_type(relay.Function([x, y], z))
        func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()),
                             transform.InferType())

        zz = func2.body
        assert isinstance(zz, relay.Call)
        assert zz.op == relay.op.get("broadcast_to")
        assert zz.checked_type == relay.ty.TensorType(broadcast_shape, "float32")

        x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
        y_data = np.random.uniform(low=-1, high=1, size=broadcast_shape).astype("float32")

        ref_res = np.broadcast_to(x_data, y_data.shape)
        verify_func(func2, [x_data, y_data], ref_res)
    def verify_squeeze(shape, axis, oshape):
        x = relay.var("x", relay.TensorType(shape, "float32"))
        y = relay.var("y", relay.TensorType(axis, "float32"))
        z = relay.squeeze(x, relay.shape_of(y))
        func = run_infer_type(relay.Function([x, y], z))
        func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()),
                             transform.InferType())

        zz = func2.body
        assert isinstance(zz, relay.Call)
        assert zz.op == relay.op.get("squeeze")
        assert "axis=" in zz.astext()
        assert zz.checked_type == relay.ty.TensorType(oshape, "float32")

        x_data = np.random.uniform(low=-1, high=1,
                                   size=shape).astype("float32")
        y_data = np.random.uniform(low=-1, high=1, size=axis).astype("float32")
        ref_res = np.squeeze(x_data, axis)
        verify_func(func2, [x_data, y_data], ref_res)
Ejemplo n.º 15
0
def test_softmax():
    for dtype in ["float16", "float32"]:
        # Softmax accuracy for float16 is poor
        if dtype == "float16":
            return
        shape = (10, 4)
        x = relay.var("x", shape=shape, dtype=dtype)
        y = relay.nn.softmax(x, axis=1)
        assert "nn.softmax" in y.astext()
        yy = run_infer_type(y)
        assert yy.checked_type == relay.TensorType(shape, dtype)
        func = relay.Function([x], y)
        x_data = np.random.uniform(size=shape).astype(dtype)
        ref_res = tvm.topi.testing.softmax_python(x_data)
        for target, dev in tvm.testing.enabled_targets():
            op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
                x_data
            )
            np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
    def verify(dshape, begin, end, strides, output, slice_mode="end",
               test_ref=True, dtype="int32"):
        x = relay.var("x", relay.TensorType(dshape, "float32"))
        ndim = len(dshape)
        begin = begin if begin else [0] * ndim
        end = end if end else list(dshape)
        if strides:
            if len(strides) == 1:
                strides = strides * ndim
        else:
            strides = [1] * ndim

        # target numpy result
        x_data = np.random.uniform(size=dshape).astype("float32")
        ref_res = tvm.topi.testing.strided_slice_python(
            x_data, begin, end, strides, slice_mode)
        data = [x_data, np.array(begin), np.array(end)]
        
        begin = relay.const(begin, dtype=dtype)
        end = relay.const(end, dtype=dtype)

        
        if strides:
            data.append(np.array(strides))
            strides = relay.const(strides, dtype=dtype)
            z = relay.strided_slice(x,
                                    begin=begin,
                                    end=end,
                                    strides=strides,
                                    slice_mode=slice_mode)
        else:
            z = relay.strided_slice(x,
                                    begin=begin,
                                    end=end,
                                    slice_mode=slice_mode)
        func = relay.Function([x], z)

        func = run_infer_type(func)
        func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())
        assert isinstance(func2.body, relay.Call)
        assert func2.body.op == relay.op.get("strided_slice")
        verify_func(func2, [x_data], ref_res)
Ejemplo n.º 17
0
    def verify_get_valid_counts(dshape, score_threshold, id_index,
                                score_index):
        dtype = "float32"
        batch_size, num_anchor, elem_length = dshape
        np_data = np.random.uniform(low=-2, high=2, size=dshape).astype(dtype)
        np_out1 = np.zeros(shape=(batch_size, ))
        np_out2 = np.zeros(shape=dshape).astype(dtype)
        for i in range(batch_size):
            np_out1[i] = 0
            inter_idx = 0
            for j in range(num_anchor):
                score = np_data[i, j, score_index]
                if score > score_threshold and (id_index < 0 or
                                                np_data[i, j, id_index] >= 0):
                    for k in range(elem_length):
                        np_out2[i, inter_idx, k] = np_data[i, j, k]
                    np_out1[i] += 1
                    inter_idx += 1
                if j >= np_out1[i]:
                    for k in range(elem_length):
                        np_out2[i, j, k] = -1.0

        x = relay.var("x", relay.ty.TensorType(dshape, dtype))
        z = relay.vision.get_valid_counts(x, score_threshold, id_index,
                                          score_index)
        assert "score_threshold" in z.astext()
        func = relay.Function([x], z.astuple())
        func = run_infer_type(func)
        for target, ctx in ctx_list():
            intrp = relay.create_executor("debug", ctx=ctx, target=target)
            out = intrp.evaluate(func)(np_data)
            tvm.testing.assert_allclose(out[0].asnumpy(),
                                        np_out1,
                                        rtol=1e-3,
                                        atol=1e-04)
            # get_valid_count for cuda doesn't do data rearrangement
            if target == 'cuda':
                return
            tvm.testing.assert_allclose(out[1].asnumpy(),
                                        np_out2,
                                        rtol=1e-3,
                                        atol=1e-04)
Ejemplo n.º 18
0
    def verify_grid_sample(data_shape, grid_shape):
        dtype = "float32"
        batch, channel, _, _ = data_shape
        _, _, out_height, out_width = grid_shape
        data = relay.var("data", relay.ty.TensorType(data_shape, dtype))
        grid = relay.var("grid", relay.ty.TensorType(grid_shape, dtype))
        y = relay.image.grid_sample(data, grid, method="bilinear", layout="NCHW")
        yy = run_infer_type(y)
        assert yy.checked_type == relay.TensorType((batch, channel, out_height, out_width), dtype)
        func = relay.Function([data, grid], y)

        data_np = np.random.uniform(size=data_shape).astype(dtype)
        grid_np = np.random.uniform(size=grid_shape, low=-1.5, high=1.5).astype(dtype)
        ref_res = tvm.topi.testing.grid_sample_nchw_python(data_np, grid_np, method="bilinear")

        for target, ctx in tvm.testing.enabled_targets():
            for kind in ["graph", "debug"]:
                intrp1 = relay.create_executor(kind, ctx=ctx, target=target)
                op_res1 = intrp1.evaluate(func)(data_np, grid_np)
                tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
    def verify_upsampling(data_shape, scale_h_val, scale_w_val, dtype):
        x = relay.var("x", relay.TensorType(data_shape, dtype))
        scale_h = relay.const(scale_h_val)
        scale_w = relay.const(scale_w_val)
        z = relay.nn.upsampling(x, scale_h, scale_w)

        func = run_infer_type(relay.Function([x], z))
        func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()),
                             transform.InferType())

        zz = func2.body
        assert isinstance(zz, relay.Call)
        assert zz.op == relay.op.get("nn.upsampling")

        x_data = np.random.uniform(size=data_shape).astype(dtype)
        ref_res = tvm.topi.testing.resize2d_python(x_data,
                                                   (scale_h_val, scale_w_val),
                                                   "NCHW", "nearest_neighbor",
                                                   "asymmetric")
        verify_func(func2, [x_data], ref_res)
Ejemplo n.º 20
0
    def verify_batch_to_space_nd(dshape, block_shape, crops):
        x_data = np.random.uniform(size=dshape).astype("float32")
        crop_begin_list, crop_end_list = map(list, zip(*crops))
        ref_res = tvm.topi.testing.batch_to_space_nd_python(
            x_data, block_shape, crop_begin_list, crop_end_list
        )

        x = relay.var("x", relay.TensorType(dshape, "float32"))
        z = relay.nn.batch_to_space_nd(x, block_shape, crops)
        assert "block_shape=" in z.astext()
        assert "crops=" in z.astext()
        zz = run_infer_type(z)
        assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
        func = relay.Function([x], z)

        for target, ctx in tvm.testing.enabled_targets():
            for kind in ["graph", "debug"]:
                intrp = relay.create_executor(kind, ctx=ctx, target=target)
                op_res = intrp.evaluate(func)(x_data)
                tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-4)
Ejemplo n.º 21
0
    def verify_yolo_reorg(shape, stride):
        x_data = np.random.uniform(low=-1, high=1,
                                   size=shape).astype("float32")
        ref_res = topi.testing.reorg_python(x_data, stride)

        x = relay.var("x", relay.TensorType(shape, "float32"))
        z = relay.vision.yolo_reorg(x, stride=stride)
        zz = run_infer_type(z)
        assert "stride=" in z.astext()
        assert zz.checked_type == relay.ty.TensorType(ref_res.shape, "float32")

        func = relay.Function([x], z)

        for target, ctx in ctx_list():
            for kind in ["graph", "debug"]:
                intrp = relay.create_executor(kind, ctx=ctx, target=target)
                op_res = intrp.evaluate(func)(x_data)
                tvm.testing.assert_allclose(op_res.asnumpy(),
                                            ref_res,
                                            rtol=1e-5)
Ejemplo n.º 22
0
def test_where():
    shape = (3, 4)
    dtype = "float32"
    cond = relay.var("cond", relay.TensorType(shape, dtype))
    x = relay.var("x", relay.TensorType(shape, dtype))
    y = relay.var("y", relay.TensorType(shape, dtype))
    z = relay.where(cond, x, y)
    zz = run_infer_type(z)
    assert zz.checked_type == relay.TensorType(shape, dtype)

    func = relay.Function([cond, x, y], z)
    condition = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
    x = np.random.uniform(size=shape).astype(dtype)
    y = np.random.uniform(size=shape).astype(dtype)
    ref_res = np.where(condition, x, y)
    for target, ctx in tvm.testing.enabled_targets():
        for kind in ["graph", "debug"]:
            intrp = relay.create_executor(kind, ctx=ctx, target=target)
            op_res = intrp.evaluate(func)(condition, x, y)
            tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
Ejemplo n.º 23
0
def verify_batch_matmul_with_inputs(
    x, y, x_np, y_np, out_shape, dtype="float32", trans_x=False, trans_y=True
):
    z = relay.nn.batch_matmul(x, y, transpose_a=trans_x, transpose_b=trans_y)
    zz = run_infer_type(z)
    assert zz.checked_type == relay.ty.TensorType(out_shape, dtype)

    input_vars = relay.analysis.free_vars(z)
    func = relay.Function(input_vars, z)
    z_np = tvm.topi.testing.batch_matmul(x_np, y_np, trans_x=trans_x, trans_y=trans_y)

    for target, dev in tvm.testing.enabled_targets():
        for kind in ["graph", "debug"]:
            if len(input_vars) == 2:
                z = relay.create_executor(kind, device=dev, target=target).evaluate(func)(
                    x_np, y_np
                )
            else:
                z = relay.create_executor(kind, device=dev, target=target).evaluate(func)(x_np)
            tvm.testing.assert_allclose(z.numpy(), z_np, rtol=1e-5, atol=1e-5)
Ejemplo n.º 24
0
def test_fold_dropout():
    def before():
        # A constant graph to fire fold constant
        data = relay.const(np.arange(10).astype(np.float32))
        dropout = relay.nn.dropout(data)
        add = dropout + relay.const(1.0)
        return relay.Function(relay.analysis.free_vars(add), add)

    passes = tvm.transform.Sequential([
        relay.transform.InferType(),
        relay.transform.FoldConstant(),
    ])

    before_mod = tvm.IRModule.from_expr(before())

    with tvm.transform.PassContext(opt_level=3):
        after_mod = passes(before_mod)

    tvm.ir.assert_structural_equal(run_infer_type(before_mod["main"]),
                                   after_mod["main"])
Ejemplo n.º 25
0
def test_sequential_with_scoping():
    shape = (1, 2, 3)
    c_data = np.array(shape).astype("float32")
    tp = relay.TensorType(shape, "float32")

    def before():
        c = relay.const(c_data)
        x = relay.var("x", tp)
        y = relay.add(c, c)
        y = relay.multiply(y, relay.const(2, "float32"))
        y = relay.add(x, y)
        z = relay.add(y, c)
        z1 = relay.add(y, c)
        z2 = relay.add(z, z1)
        return relay.Function([x], z2)

    def expected():
        x = relay.var("x", tp)
        c_folded = (c_data + c_data) * 2
        y = relay.add(x, relay.const(c_folded))
        z = relay.add(y, relay.const(c_data))
        z1 = relay.add(z, z)
        return relay.Function([x], z1)

    seq = tvm.transform.Sequential(
        [
            relay.transform.InferType(),
            relay.transform.FoldConstant(),
            relay.transform.EliminateCommonSubexpr(),
            relay.transform.AlterOpLayout(),
        ]
    )

    mod = tvm.IRModule({"main": before()})
    with tvm.transform.PassContext(opt_level=3):
        with tvm.target.Target("llvm"):
            mod = seq(mod)

    zz = mod["main"]
    zexpected = run_infer_type(expected())
    assert tvm.ir.structural_equal(zz, zexpected)
Ejemplo n.º 26
0
    def test_unary_op(self, target, dev, relay_op, ref_func, dtype):
        target = tvm.target.Target(target)
        if (
            dtype == "float16"
            and target.kind.name == "cuda"
            and not have_fp16(tvm.cuda(0).compute_version)
        ):
            pytest.xfail("No float16 support on local cuda device")
        elif (
            dtype == "float16"
            and target.kind.name == "cuda"
            and not target.attrs.get("supports_float16", False)
        ):
            pytest.xfail("No float16 support on vulkan target")

        if target.kind.name == "vulkan" and relay_op in [
            tvm.relay.erf,
            tvm.relay.tan,
            tvm.relay.atan,
        ]:
            pytest.xfail(f"Vulkan runtime doesn't yet support {relay_op}")

        shape = (10, 4)
        dtype = dtype
        tp = relay.TensorType(shape)
        x = relay.var("x", tp, dtype=dtype)
        y = relay_op(x)
        # test printer
        assert ("{}(%x)".format(y.op.name)) in y.astext()
        # test type inference
        yy = run_infer_type(y)
        assert yy.checked_type == tp

        if ref_func is not None:
            data = np.random.rand(*shape).astype(dtype)
            ref_res = ref_func(data)
            func = relay.Function([x], y)
            # use graph by execuor default for testing, as we need
            # create function explicitly to avoid constant-folding.
            op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data)
            np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)
Ejemplo n.º 27
0
def get_funcs(data_shape,
              data_dtype,
              kernel_shape,
              kernel_dtype,
              input_zero_point,
              kernel_zero_point,
              kernel_size,
              padding,
              strides,
              dilation,
              data_layout,
              kernel_layout,
              out_dtype):
    data = relay.var("data", shape=data_shape,
            dtype=data_dtype)
    kernel = relay.var("kernel", shape=kernel_shape,
            dtype=kernel_dtype)
    ref_func = get_ref_func(data,
                            kernel,
                            input_zero_point,
                            kernel_zero_point,
                            kernel_size,
                            padding,
                            strides,
                            dilation,
                            data_layout,
                            kernel_layout,
                            out_dtype)
    ref_func = run_infer_type(ref_func)
    qnn_func = get_qnn_func(data,
                            kernel,
                            input_zero_point,
                            kernel_zero_point,
                            kernel_size,
                            padding,
                            strides,
                            dilation,
                            data_layout,
                            kernel_layout,
                            out_dtype)
    return (ref_func, qnn_func)
Ejemplo n.º 28
0
def _verify_infiniteness_ops(relay_op, ref_op):
    for dtype in ['float32', 'float16', 'float16', 'int32', 'int16']:
        shape = (2, 8, 8)
        x = relay.var("x", relay.TensorType(shape, dtype))
        y = relay_op(x)
        yy = run_infer_type(y)
        assert yy.checked_type == relay.TensorType(shape, "bool")

        data = np.random.uniform(size=shape).astype(dtype)
        if dtype.startswith('float'):
            data.ravel()[np.random.choice(data.size,
                                          int(data.size * 0.5),
                                          replace=False)] = np.infty
            data.ravel()[np.random.choice(data.size,
                                          int(data.size * 0.5),
                                          replace=False)] = np.nan

        intrp = create_executor()
        op_res = intrp.evaluate(y, {x: data})
        ref_res = ref_op(data)
        np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
Ejemplo n.º 29
0
 def test_infer_type(batch, in_channel, size, out_channel, deformable_groups, groups):
     data_shape = (batch, in_channel, size, size)
     data = relay.var("data", shape=data_shape)
     offset = relay.var("offset")
     kernel = relay.var("kernel")
     kernel_size = (3, 3)
     y = relay.nn.deformable_conv2d(data, offset, kernel,
         strides=(1, 1),
         padding=(1, 1),
         dilation=(1, 1),
         kernel_size=kernel_size,
         deformable_groups=deformable_groups,
         groups=groups,
         channels=out_channel)
     weight_shape = (out_channel, in_channel // groups, kernel_size[0], kernel_size[1])
     out_shape = (batch, out_channel, size, size)
     offset_shape = (batch, 2 * kernel_size[0] * kernel_size[1] * deformable_groups, out_shape[2], out_shape[3])
     yy = run_infer_type(y)
     assert yy.checked_type == relay.TensorType(out_shape)
     assert yy.args[1].checked_type == relay.TensorType(offset_shape), yy.args[1].checked_type
     assert yy.args[2].checked_type == relay.TensorType(weight_shape)
Ejemplo n.º 30
0
def test_broadcast_to_like(executor_kind):
    shape = (4, 1, 6)
    shape_like = (3, 4, 5, 6)
    dtype = "float32"
    x = relay.Var("x", relay.ty.TensorType(shape, dtype))
    y = relay.Var("y", relay.ty.TensorType(shape_like, dtype))
    z = relay.broadcast_to_like(x, y)

    zz = run_infer_type(z)
    assert zz.checked_type == relay.ty.TensorType(shape_like, dtype)

    func = relay.Function([x, y], z)
    x = np.random.uniform(size=shape).astype(dtype)
    y = np.random.uniform(size=shape_like).astype(dtype)
    ref_res = np.broadcast_to(x, shape_like)

    for target, dev in tvm.testing.enabled_targets():
        op_res = relay.create_executor(executor_kind,
                                       device=dev,
                                       target=target).evaluate(func)(x, y)
        tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)