Example #1
0
    def check_binary_op(opfunc, ref):
        n = tvm.var("n")
        t1 = relay.TensorType((5, n, 5))
        t2 = relay.TensorType((n, 1))
        x = relay.var("x", t1)
        y = relay.var("y", t2)
        z = opfunc(x, y)
        # test printer
        assert ("{}(%x, %y)".format(z.op.name)) in z.astext()
        assert relay.ir_pass.infer_type(z).checked_type == t1

        if ref is not None:
            t1 = relay.TensorType((5, 10, 5))
            t2 = relay.TensorType((5, 10, 5))
            x = relay.var("x", t1)
            y = relay.var("y", t2)
            z = opfunc(x, y)
            x_data = np.random.rand(5, 10, 5).astype(t1.dtype)
            y_data = np.random.rand(5, 10, 5).astype(t2.dtype)
            ref_res = ref(x_data, y_data)
            func = relay.Function([x, y], z)

            for target, ctx in ctx_list():
                intrp = relay.create_executor("graph", ctx=ctx, target=target)
                op_res = intrp.evaluate(func)(x_data, y_data)
                tvm.testing.assert_allclose(op_res.asnumpy(), ref_res)
Example #2
0
def test_too_many_args():
    x = relay.var('x', shape=(10, 10))
    f = relay.Function([x], x)
    y = relay.var('y', shape=(10, 10))
    check_type_err(
        f(x, y),
        "the function is provided too many arguments expected 1, found 2;")
Example #3
0
 def before(dim):
     X = relay.var("X", shape=(1, dim))
     W = relay.var("W", shape=(3 * dim, dim))
     matmul = relay.nn.dense(X, W)
     splitted = relay.split(matmul, indices_or_sections=3, axis=1)
     out = relay.sigmoid(splitted[0]) + relay.tanh(splitted[1]) * relay.exp(splitted[2])
     return relay.Function([X, W], out)
Example #4
0
def test_cmp_type():
    for op, ref in ((relay.greater, np.greater),
               (relay.greater_equal, np.greater_equal),
               (relay.less, np.less),
               (relay.less_equal, np.less_equal),
               (relay.equal, np.equal),
               (relay.not_equal, np.not_equal)):
        x = relay.var("x", relay.TensorType((10, 4), "float32"))
        y = relay.var("y", relay.TensorType((5, 10, 1), "float32"))
        z = op(x, y)
        z.astext()
        zz = relay.ir_pass.infer_type(z)
        assert zz.checked_type == relay.TensorType((5, 10, 4), "bool")

        if ref is not None:
            x_shape = (10, 4)
            y_shape = (5, 10, 1)
            t1 = relay.TensorType(x_shape)
            t2 = relay.TensorType(y_shape)
            x = relay.var("x", t1)
            y = relay.var("y", t2)
            z = op(x, y)
            x_data = np.random.rand(*x_shape).astype(t1.dtype)
            y_data = np.random.rand(*y_shape).astype(t2.dtype)
            ref_res = ref(x_data, y_data)
            func = relay.Function([x, y], z)

            for target, ctx in ctx_list():
                intrp = relay.create_executor("graph", ctx=ctx, target=target)
                op_res = intrp.evaluate(func)(x_data, y_data)
                tvm.testing.assert_allclose(op_res.asnumpy(), ref_res)
Example #5
0
File: layers.py Project: bddppq/tvm
def dense_add_bias(data, weight=None, bias=None, units=None, **kwargs):
    """Wrapper of dense which automatically creates weights if not given.

    Parameters
    ----------
    data : relay.Expr
        The input expression.

    weight : relay.Expr
        The weight to conv2d.

    bias : relay.Expr
        The bias.

    kwargs : dict
        Additional arguments.

    Returns
    -------
    result : relay.Expr
        The result.
    """
    name = kwargs.get("name")
    kwargs.pop("name")
    if not weight:
        weight = relay.var(name + "_weight")
    if not bias:
        bias = relay.var(name + "_bias")
    data = relay.nn.dense(data, weight, units, **kwargs)
    data = relay.nn.bias_add(data, bias, axis=-1)
    return data
Example #6
0
def test_nms():
    num_anchors = 60

    overlap_threshold = 0.5
    force_suppress = True
    nms_topk = 10

    n = tvm.var("n")
    x0 = relay.var("x0", relay.ty.TensorType((n, num_anchors, 6), "float32"))
    x1 = relay.var("x1", relay.ty.TensorType((n,), "int"))

    z = relay.vision.nms(x0, x1, overlap_threshold, force_suppress, nms_topk)

    assert "overlap_threshold" in z.astext()
    zz = relay.ir_pass.infer_type(z)
    assert zz.checked_type == relay.ty.TensorType(
        (n, num_anchors, 6), "float32")

    n = tvm.var("n")
    x0 = relay.var("x0", relay.ty.TensorType((n, num_anchors, 6), "float32"))
    x1 = relay.var("x1", relay.ty.TensorType((n,), "int"))

    z = relay.vision.nms(x0, x1)

    zz = relay.ir_pass.infer_type(z)
    assert zz.checked_type == relay.ty.TensorType(
        (n, num_anchors, 6), "float32")
def test_mul_param():
    x = relay.var('x', shape=(10, 10))
    y = relay.var('y', shape=(1, 10))
    func = relay.Function([x, y], relay.multiply(x, y))
    x_data = np.random.rand(10, 10).astype('float32')
    y_data = np.random.rand(1, 10).astype('float32')
    check_eval(func, [x_data, y_data], x_data * y_data)
Example #8
0
def test_depthwise_conv2d():
    batch_size = 1
    dshape = (batch_size, 64, 56, 56)
    weight_conv = relay.var("weight_depthwiseconv", shape=(64, 1, 3, 3))
    data1 = relay.var("data1", shape=dshape)
    data2 = relay.var("data2", shape=dshape)
    depthwise_conv2d_1 = relay.nn.conv2d(
        data1,
        weight_conv,
        kernel_size=(3, 3),
        padding=(1, 1),
        groups=64)
    depthwise_conv2d_2 = relay.nn.conv2d(
        data2,
        weight_conv,
        kernel_size=(3, 3),
        padding=(1, 1),
        groups=64)
    add = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)
    func = relay.Function([data1, data2, weight_conv],
                            relay.Tuple(tvm.convert([depthwise_conv2d_1,
                                                    depthwise_conv2d_2,
                                                    add])))
    func = relay.ir_pass.infer_type(func)
    compute_count = relay.ir_pass.get_total_mac_number(func)
    assert compute_count == 2 * np.prod(dshape) * 3*3
Example #9
0
def test_conv():
    batch_size = 1
    input_channel = 3
    h = 224
    w = 224
    output_channel = 64
    kh = 7
    kw = 7
    h_padding = 1
    w_padding = 1
    oh = h + h_padding * 2 - kh + 1
    ow = w + w_padding * 2 - kw + 1
    dshape = (batch_size, input_channel, h, w)
    weight = relay.var("weight", shape=(output_channel, input_channel, kh, kw))
    data = relay.var("data", shape=dshape)
    conv2d = relay.nn.conv2d(
        data,
        weight,
        channels=output_channel,
        kernel_size=(kh, kw),
        padding=(1, 1))
    func = relay.Function([data, weight],
                            relay.Tuple(tvm.convert([conv2d])))
    func = relay.ir_pass.infer_type(func)
    compute_count = relay.ir_pass.get_total_mac_number(func)
    expect_count = batch_size * input_channel * oh * ow * output_channel * kh * kw
    assert compute_count == expect_count
Example #10
0
def test_annotate_all():
    ctx1 = tvm.context(1)
    ctx2 = tvm.context(2)
    x = relay.var("x", shape=(3,))
    y = relay.var("y", shape=(3,))
    z = relay.var("z", shape=(3,))

    def annotated():
        add = relay.add(x, y)
        _add = relay.annotation.on_device(add, ctx2)
        sub = relay.subtract(add, z)
        _sub = relay.annotation.on_device(sub, ctx2)

        func = relay.Function([x, y, z],
                              relay.Tuple(tvm.convert([_add, _sub,
                                                       sub])))
        func = relay.ir_pass.infer_type(func)
        func = relay.ir_pass.rewrite_annotated_ops(func,
                                                   ctx1.device_type)
        func = relay.ir_pass.infer_type(func)
        return relay.Function(relay.ir_pass.free_vars(func.body[2]),
                              func.body[2])

    def expected():
        add = relay.add(x, y)
        sub = relay.subtract(add, z)
        func = relay.Function([x, y, z], sub)
        return func

    annotated_func = relay.ir_pass.infer_type(annotated())
    expected_func = relay.ir_pass.infer_type(expected())
    assert relay.ir_pass.alpha_equal(annotated_func, expected_func)
Example #11
0
def test_annotate_none():
    ctx1 = tvm.context(1)
    ctx2 = tvm.context(2)
    x = relay.var("x", shape=(3,))
    y = relay.var("y", shape=(3,))
    z = relay.var("z", shape=(3,))

    def annotated():
        add = relay.add(x, y)
        sub = relay.subtract(add, z)
        func = relay.Function([x, y, z], sub)
        func = relay.ir_pass.infer_type(func)
        func = relay.ir_pass.rewrite_annotated_ops(func,
                                                   ctx1.device_type)
        return func

    def expected():
        add = relay.add(x, y)
        sub = relay.subtract(add, z)
        func = relay.Function([x, y, z], sub)
        return func

    annotated_func = relay.ir_pass.infer_type(annotated())
    expected_func = relay.ir_pass.infer_type(expected())
    assert relay.ir_pass.alpha_equal(annotated_func, expected_func)
Example #12
0
def test_broadcast_add():
    shape1 = (3, 4, 1)
    shape2 = (1, 5)
    dtype = 'float32'
    x_nd = rand(dtype, *shape1)
    y_nd = rand(dtype, *shape2)
    x_np = x_nd.asnumpy()
    y_np = y_nd.asnumpy()
    expected_forward = x_np + y_np
    t1 = relay.TensorType(shape1, dtype)
    t2 = relay.TensorType(shape2, dtype)
    x = relay.var("x", t1)
    y = relay.var("y", t2)
    func = relay.Function([x, y], x + y)
    full_func = relay.ir_pass.infer_type(gradient(func))
    assert full_func.checked_type == relay.FuncType([t1, t2],
                                                    relay.TupleType([relay.TensorType(expected_forward.shape, dtype),
                                                                     relay.TupleType([t1, t2])]))
    ex = create_executor()
    forward, (grad_x, grad_y) = ex.evaluate(full_func)(x_nd, y_nd)
    tvm.testing.assert_allclose(forward.asnumpy(), expected_forward)
    tvm.testing.assert_allclose(grad_x.asnumpy(),
                                np.ones_like(expected_forward).sum(axis=2, keepdims=True))
    tvm.testing.assert_allclose(grad_y.asnumpy(),
                                np.ones_like(expected_forward).sum(axis=(0, 1), keepdims=True).squeeze(axis=0))
Example #13
0
def test_recursion():
    """
    Program:
       def @f(%n: int32, %data: float32) -> float32 {
          if (%n == 0) {
              %data
          } else {
              @f(%n - 1, log(%data))
          }
       }
    """
    sb = relay.ScopeBuilder()
    f = relay.GlobalVar("f")
    ti32 = relay.scalar_type("int32")
    tf32 = relay.scalar_type("float32")
    n = relay.var("n", ti32)
    data = relay.var("data", tf32)

    with sb.if_scope(relay.equal(n, relay.const(0, ti32))):
        sb.ret(data)
    with sb.else_scope():
        sb.ret(f(relay.subtract(n, relay.const(1, ti32)), relay.log(data)))
    mod = relay.Module()
    mod[f] = relay.Function([n, data], sb.get())
    assert "@f(%1, %2) /* ty=float32 */" in mod.astext()
    assert mod[f].checked_type == relay.FuncType([ti32, tf32], tf32)
Example #14
0
def verify_infer_type_prelu(data, alpha, axis, output, dtype="float32"):
    x = relay.var("data", relay.TensorType(data, dtype))
    if alpha:
        y = relay.var("alpha", relay.TensorType(alpha, dtype))
    else:
        y = relay.var("alpha", relay.IncompleteType())
    z = relay.nn.prelu(x, y, axis=axis)
    zz = relay.ir_pass.infer_type(z)
    if axis != 1:
        assert "axis" in z.astext()
    assert zz.checked_type == relay.ty.TensorType(output, dtype)
    if not alpha:
        axis = axis if axis else 1
        alpha_shape = (data[axis],)
        assert zz.args[1].checked_type == relay.TensorType(alpha_shape, "float32")

    if all(isinstance(v, tvm.expr.Var) == 1 for v in data) or not alpha:
        return

    func = relay.Function([x, y], z)
    x_data = np.random.uniform(low=-1, high=1, size=data).astype(dtype)
    a_data = np.random.uniform(low=-1, high=1, size=alpha).astype(dtype)

    if axis == 1:
        ref_res = (x_data < 0) * (x_data * a_data.reshape(3, 1, 1)) + (x_data>=0) * x_data
    else:
        ref_res = (x_data < 0) * (x_data * a_data.reshape(1, 1, 3)) + (x_data>=0) * x_data

    for target, ctx in ctx_list():
        intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
        intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
        op_res1 = intrp1.evaluate(func)(x_data, a_data)
        tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
        op_res2 = intrp2.evaluate(func)(x_data, a_data)
        tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
Example #15
0
def test_tuple():
    shape = (10, 10)
    dtype = 'float32'
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    y = relay.var("y", t)
    z = relay.var("z", t)
    tup = relay.Var("tup")
    func = relay.Function([x, y, z], relay.Let(tup, relay.Tuple([x, y, z]),
                                               relay.TupleGetItem(tup, 0) +
                                               relay.TupleGetItem(tup, 1) -
                                               relay.TupleGetItem(tup, 2)))
    back_func = relay.ir_pass.infer_type(gradient(func))
    assert back_func.checked_type == relay.FuncType([t, t, t], relay.TupleType([t, relay.TupleType([t, t, t])]))
    x_nd = rand(dtype, *shape)
    y_nd = rand(dtype, *shape)
    z_nd = rand(dtype, *shape)
    x_np = x_nd.asnumpy()
    y_np = y_nd.asnumpy()
    z_np = z_nd.asnumpy()
    expected_forward = x_np + y_np - z_np
    ex = create_executor()
    forward, (grad_x, grad_y, grad_z) = ex.evaluate(back_func)(x_nd, y_nd, z_nd)
    tvm.testing.assert_allclose(forward.asnumpy(), expected_forward)
    tvm.testing.assert_allclose(grad_x.asnumpy(), np.ones_like(grad_x.asnumpy()))
    tvm.testing.assert_allclose(grad_y.asnumpy(), np.ones_like(grad_y.asnumpy()))
    tvm.testing.assert_allclose(grad_z.asnumpy(), -1 * np.ones_like(grad_z.asnumpy()))
Example #16
0
 def verify_take(dshape, indices_shape, oshape, axis=None):
     x = relay.var("x", relay.TensorType(dshape, "float32"))
     indices = relay.var("indices", relay.TensorType(indices_shape, "int32"))
     y = relay.take(x, indices, axis=axis)
     y.astext()
     yy = relay.ir_pass.infer_type(y)
     assert yy.checked_type == relay.TensorType(oshape, "float32")
Example #17
0
def test_infer_type_leaky_relu():
    n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
    x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
    y = relay.nn.leaky_relu(x, alpha=0.1)
    "alpha=0.1" in y.astext()
    yy = relay.ir_pass.infer_type(y)
    assert yy.checked_type == relay.TensorType((n, c, h, w), "float32")

    shape = (1, 5, 10, 10)
    dtype = "float32"
    x = relay.var("x", relay.TensorType(shape, dtype))
    z = relay.nn.leaky_relu(x, alpha=0.1)
    assert "alpha=0.1" in z.astext()
    yy = relay.ir_pass.infer_type(z)
    assert yy.checked_type == relay.TensorType(shape, dtype)
    func = relay.Function([x], z)
    x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
    ref_res = np.where(x_data > 0, x_data, x_data * 0.1)

    for target, ctx in ctx_list():
        intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
        intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
        op_res1 = intrp1.evaluate(func)(x_data)
        tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
        op_res2 = intrp2.evaluate(func)(x_data)
        tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
Example #18
0
def test_plan_memory():
    # it is sufficient to cycle through two memories.

    x = relay.var("x", shape=(10,))
    y = relay.var("x", shape=(1,))
    y2 = relay.exp(y)
    z = relay.add(x, y2)
    z = relay.exp(z)
    z = relay.exp(z)
    z = relay.exp(z)
    z = relay.exp(z)
    z = relay.exp(z)
    func = relay.Function([x, y], z)
    func = relay.ir_pass.infer_type(func)
    func = relay.ir_pass.fuse_ops(func, opt_level=0)
    func = relay.ir_pass.infer_type(func)
    smap = relay.backend._backend.GraphPlanMemory(func)
    storage_ids = set()
    device_types = set()
    for k, v in smap.items():
        assert len(v) == 2
        for x in v[0]:
            storage_ids.add(x.value)
        for x in v[1]:
            device_types.add(x.value)

    # Current rule requires vars have unique storage id
    # because we don't do inplace, we will need another
    # two alternating temporary space.
    assert len(storage_ids) == 4
    assert len(device_types) == 1
Example #19
0
def get_net(batch_size, random_len=100, oshape=(3, 64, 64), ngf=128, code=None, dtype="float32"):
    """get net of dcgan generator"""
    assert oshape[-1] == 64, "Only support 64x64 image"
    assert oshape[-2] == 64, "Only support 64x64 image"

    code = relay.var("data", dtype=dtype, shape=(batch_size, random_len)) if code is None else code
    dense_weight = relay.var("dense_weight")
    dense = relay.nn.dense(code, weight=dense_weight, units=4*4*ngf*8)
    relu = relay.nn.relu(dense)
    # 4 x 4
    reshape = relay.reshape(relu, newshape=(-1, ngf * 8, 4, 4))
    # 8 x 8
    dc8 = deconv2d_bn_relu(
        reshape, ishape=(ngf * 8, 4, 4), oshape=(ngf * 4, 8, 8), kshape=(4, 4), prefix="g2")
    # 16x16
    dc16 = deconv2d_bn_relu(
        dc8, ishape=(ngf * 4, 8, 8), oshape=(ngf * 2, 16, 16), kshape=(4, 4), prefix="g3")
    # 32x32
    dc32 = deconv2d_bn_relu(
        dc16, ishape=(ngf * 2, 16, 16), oshape=(ngf, 32, 32), kshape=(4, 4), prefix="g4")
    # 64x64
    dc64 = deconv2d(
        dc32, ishape=(ngf, 32, 32), oshape=oshape[-3:], kshape=(4, 4), name="g5_deconv")
    tanh = relay.tanh(dc64)

    args = relay.ir_pass.free_vars(tanh)
    return relay.Function(args, tanh)
Example #20
0
    def test_run(batch, in_channel, size, out_channel, deformable_groups, groups):
        kernel_size = (3, 3)
        data_shape = (batch, in_channel, size, size)
        offset_shape = (batch, 2 * kernel_size[0] * kernel_size[1] * deformable_groups, size, size)
        kernel_shape = (out_channel, in_channel // groups, kernel_size[0], kernel_size[1])
        dtype = 'float32'
        data = relay.var("data", shape=data_shape, dtype=dtype)
        offset = relay.var("offset")
        kernel = relay.var("kernel")
        y = relay.nn.deformable_conv2d(data, offset, kernel,
            strides=(1, 1),
            padding=(1, 1),
            dilation=(1, 1),
            kernel_size=kernel_size,
            deformable_groups=deformable_groups,
            groups=groups,
            channels=out_channel)
        func = relay.Function([data, offset, kernel], y)
        data = np.random.uniform(size=data_shape).astype(dtype)
        offset = np.random.uniform(size=offset_shape).astype(dtype)
        kernel = np.random.uniform(size=kernel_shape).astype(dtype)
        ref_res = topi.testing.deformable_conv2d_nchw_python(data, offset, kernel, stride=(1, 1), padding=(1, 1), dilation=(1, 1), deformable_groups=deformable_groups, groups=groups)

        for target, ctx in ctx_list():
            for kind in ["graph", "debug"]:
                intrp1 = relay.create_executor(kind, ctx=ctx, target=target)
                op_res1 = intrp1.evaluate(func)(data, offset, kernel)
                tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
Example #21
0
    def check_binary_op(opfunc, ref):
        # TODO(@jroesch): this piece of code improperly uses type variables.
        n = tvm.var("n")
        s1 = (5, n, 5)
        s2 = (n, 1)
        t1 = relay.TensorType(s1)
        t2 = relay.TensorType(s2)
        x = relay.var("x", t1)
        y = relay.var("y", t2)
        z = opfunc(x, y)
        # test printer
        assert ("{}(%x, %y)".format(z.op.name)) in z.astext()
        assert relay.ir_pass.infer_type(z).checked_type == t1

        if ref is not None:
            t1 = relay.TensorType((5, 10, 5))
            t2 = relay.TensorType((5, 10, 5))
            x = relay.var("x", t1)
            y = relay.var("y", t2)
            z = opfunc(x, y)
            x_data = np.random.rand(5, 10, 5).astype(t1.dtype)
            y_data = np.random.rand(5, 10, 5).astype(t2.dtype)
            ref_res = ref(x_data, y_data)
            func = relay.Function([x, y], z)

            for target, ctx in ctx_list():
                # use graph by execuor default for testing, as we need
                # create function explicitly to avoid constant-folding.
                intrp = relay.create_executor("graph", ctx=ctx, target=target)
                op_res = intrp.evaluate(func)(x_data, y_data)
                np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
Example #22
0
    def test_threshold():
        num_anchors = 5
        num_classes = 5
        n = tvm.var("n")
        cls_prob = relay.var(
            "cls_prob",
            relay.ty.TensorType((n, num_anchors, num_classes), "float32"))
        loc_pred = relay.var(
            "loc_pred", relay.ty.TensorType((n, num_anchors * 4), "float32"))
        anchors = relay.var(
            "anchors", relay.ty.TensorType((1, num_anchors, 4), "float32"))
        threshold = 0.02
        variances = (0.2, 0.2, 0.3, 0.3)

        ret = relay.vision.multibox_transform_loc(
            cls_prob=cls_prob,
            loc_pred=loc_pred,
            anchor=anchors,
            threshold=threshold,
            variances=variances)
        ret = relay.ir_pass.infer_type(ret.astuple())
        ref_type = relay.ty.TupleType(
            tvm.convert([
                relay.ty.TensorType((n, num_anchors, 6), "float32"),
                relay.ty.TensorType((n, ), "int")
            ]))
        assert ret.checked_type == ref_type
Example #23
0
def _test_upsampling(layout, method):
    n, c, h, w = tvm.var("n"), 16, 32, 32
    scale = 2
    dtype = "float32"
    def get_shape():
        if layout == "NCHW":
            return (c, h, w), (c, h*scale, w*scale)
        else:
            return (h, w, c), (h*scale, w*scale, c)
    ishape, oshape = get_shape()
    x = relay.var("x", relay.TensorType((n,) + ishape, dtype))
    y = relay.nn.upsampling(x, scale=scale, layout=layout, method=method)
    yy = relay.ir_pass.infer_type(y)
    assert yy.checked_type == relay.TensorType((n,) + oshape, dtype)
    dshape = (1,) + ishape
    x = relay.var("x", shape=dshape)
    y = relay.nn.upsampling(x, scale=scale, layout=layout, method=method)
    func = relay.Function([x], y)
    data = np.random.uniform(size=dshape).astype(dtype)
    if method == "NEAREST_NEIGHBOR":
        ref = topi.testing.upsampling_python(data, scale, layout)
    else:
        ref = topi.testing.bilinear_resize_python(data, (h*scale, w*scale), layout)
    for target, ctx in ctx_list():
        executor = relay.create_executor("graph", ctx=ctx, target=target)
        out = executor.evaluate(func)(data)
        tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5)
Example #24
0
def test_lrn():
    n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
    x = relay.var("x", shape=(n, c , h, w))
    y = relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=.00001, beta=0.75)
    "alpha=" in y.astext()
    yy = relay.ir_pass.infer_type(y)
    assert yy.checked_type == relay.TensorType((n, c , h, w))

    shape = (1, 5, 10, 10)
    dtype = "float32"
    x = relay.var("x", relay.TensorType(shape, dtype))
    size=5
    axis=1
    bias=0.5
    alpha=.00001
    beta=0.75
    z = relay.nn.lrn(x, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta)
    yy = relay.ir_pass.infer_type(z)
    assert yy.checked_type == relay.TensorType(shape, dtype)
    func = relay.Function([x], z)
    x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
    ref_res = topi.testing.lrn_python(x_data, size, axis, bias, alpha, beta)

    for target, ctx in ctx_list():
        intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
        intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
        op_res1 = intrp1.evaluate(func)(x_data)
        tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
        op_res2 = intrp2.evaluate(func)(x_data)
        tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
Example #25
0
def test_l2_normalize():
    n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
    x = relay.var("x", shape=(n, c , h, w))
    y = relay.nn.l2_normalize(x, eps=0.001, axis=[1])
    "axis=" in y.astext()
    yy = relay.ir_pass.infer_type(y)
    assert yy.checked_type == relay.TensorType((n, c , h, w))

    shape = (1, 5, 10, 10)
    dtype = "float32"
    x = relay.var("x", relay.TensorType(shape, dtype))
    eps=0.001
    axis=1
    z = relay.nn.l2_normalize(x, eps=0.001, axis=[axis])
    yy = relay.ir_pass.infer_type(z)
    assert yy.checked_type == relay.TensorType(shape, dtype)
    func = relay.Function([x], z)
    x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
    ref_res = topi.testing.l2_normalize_python(x_data, eps, axis)

    for target, ctx in ctx_list():
        intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
        intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
        op_res1 = intrp1.evaluate(func)(x_data)
        tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
        op_res2 = intrp2.evaluate(func)(x_data)
        tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
Example #26
0
def test_conv2d_transpose_infer_type():
    # symbolic in batch dimension
    n, c, h, w = tvm.var("n"), 10, 10, 12
    x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
    w = relay.var("w", relay.IncompleteType())
    y = relay.nn.conv2d_transpose(x, w,
                                  kernel_size=(3, 3),
                                  padding=(1, 1),
                                  channels=15)
    assert "channels=15" in y.astext()
    yy = relay.ir_pass.infer_type(y)
    assert yy.checked_type == relay.TensorType(
        (n, 15, 10, 12), "float32")
    assert yy.args[1].checked_type == relay.TensorType(
        (10, 15, 3, 3), "float32")

    # infer by shape of w, mixed precision
    n, c, h, w = tvm.var("n"), 10, 10, 12
    x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
    w = relay.var("w", relay.TensorType((12, 11, 5, 5), "float32"))
    y = relay.nn.conv2d_transpose(x, w,
                                  output_padding=(1, 1),
                                  channels=11,
                                  data_layout="NHWC")
    yy = relay.ir_pass.infer_type(y)
    assert yy.checked_type == relay.TensorType(
        (n, 15, 15, 11), "float32")
Example #27
0
def test_flatten_infer_type():
    d1, d2, d3, d4 = tvm.var("d1"), tvm.var("d2"), tvm.var("d3"), tvm.var("d4")
    x = relay.var("x", relay.TensorType((d1, d2, d3, d4), "float32"))
    y = relay.nn.batch_flatten(x)
    yy = relay.ir_pass.infer_type(y)
    assert yy.checked_type == relay.TensorType((d1, ((d2*d3)*d4)), "float32")

    x = relay.var("x", relay.TensorType((3, 2, 4, 3), "float32"))
    y = relay.nn.batch_flatten(x)
    yy = relay.ir_pass.infer_type(y)
    assert yy.checked_type == relay.TensorType((3, 24), "float32")

    x = relay.var("x", relay.TensorType((d1, 2, d3, 3), "float32"))
    y = relay.nn.batch_flatten(x)
    yy = relay.ir_pass.infer_type(y)
    assert yy.checked_type == relay.TensorType((d1, ((2*d3)*3)), "float32")

    shape = (1, 5, 10, 10)
    o_shape = (1, 500)
    dtype = "float32"
    x = relay.var("x", relay.TensorType(shape, dtype))
    z = relay.nn.batch_flatten(x)
    yy = relay.ir_pass.infer_type(z)
    assert yy.checked_type == relay.TensorType(o_shape, dtype)
    func = relay.Function([x], z)
    x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
    ref_res = x_data.flatten().reshape(o_shape)

    for target, ctx in ctx_list():
        intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
        intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
        op_res1 = intrp1.evaluate(func)(x_data)
        tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
        op_res2 = intrp2.evaluate(func)(x_data)
        tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
Example #28
0
    def run_test_conv2d(dtype, out_dtype, scale, dshape, kshape,
                        padding=(1, 1),
                        fref=None,
                        groups=1,
                        dilation=(1, 1),
                        except_targets=None,
                        **attrs):
        if except_targets is None:
          except_targets = []
          
        x = relay.var("x", shape=dshape, dtype=dtype)
        w = relay.var("w", dtype=dtype)
        y = relay.nn.conv2d(x, w,
                            padding=padding,
                            dilation=dilation,
                            groups=groups,
                            **attrs)
        func = relay.Function([x, w], y)
        data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
        kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
        dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation)
        if fref is None:
            ref_res = topi.testing.conv2d_nchw_python(
                data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding,
                groups=groups)
        else:
            ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype))


        for target, ctx in ctx_list():
            if target in except_targets:
                continue
            intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
            op_res1 = intrp1.evaluate(func)(data, kernel)
            tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
Example #29
0
def test_binary_int_broadcast():
    for op, ref in [(relay.right_shift, np.right_shift),
               (relay.left_shift, np.left_shift),
                (relay.mod, np.mod),
               (relay.maximum, np.maximum),
               (relay.minimum, np.minimum)]:
        x = relay.var("x", relay.TensorType((10, 4), "int32"))
        y = relay.var("y", relay.TensorType((5, 10, 1), "int32"))
        z = op(x, y)
        zz = relay.ir_pass.infer_type(z)
        assert zz.checked_type == relay.TensorType((5, 10, 4), "int32")

    if ref is not None:
        x_shape = (10, 4)
        y_shape = (5, 10, 1)
        t1 = relay.TensorType(x_shape, 'int32')
        t2 = relay.TensorType(y_shape, 'int32')
        x_data = np.random.rand(*x_shape).astype(t1.dtype)
        y_data = np.random.rand(*y_shape).astype(t2.dtype)
        func = relay.Function([x, y], z)
        ref_res = ref(x_data, y_data)

        for target, ctx in ctx_list():
            intrp = relay.create_executor("graph", ctx=ctx, target=target)
            op_res = intrp.evaluate(func)(x_data, y_data)
            tvm.testing.assert_allclose(op_res.asnumpy(), ref_res)
Example #30
0
    def verify_roi_pool(data_shape, rois_shape, pooled_size, spatial_scale):
        data = relay.var("data", relay.ty.TensorType(data_shape, "float32"))
        rois = relay.var("rois", relay.ty.TensorType(rois_shape, "float32"))
        z = relay.vision.roi_pool(data, rois, pooled_size=(pooled_size, pooled_size),
                                   spatial_scale=spatial_scale, layout="NCHW")
        zz = relay.ir_pass.infer_type(z)

        batch, channel, in_size, _ = data_shape
        num_roi = rois_shape[0]
        assert zz.checked_type == relay.ty.TensorType(
                (num_roi, channel, pooled_size, pooled_size), "float32")

        func = relay.Function([data, rois], z)
        func = relay.ir_pass.infer_type(func)
        np_data = np.random.uniform(size=data_shape).astype("float32")
        np_rois = np.random.uniform(size=rois_shape).astype('float32') * in_size
        np_rois[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi).astype('float32')
        ref_res = topi.testing.roi_pool_nchw_python(np_data, np_rois, pooled_size=pooled_size,
                                                     spatial_scale=spatial_scale)
        for target, ctx in ctx_list():
            intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
            op_res1 = intrp1.evaluate(func)(np_data, np_rois)
            tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-4)
            intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
            op_res2 = intrp2.evaluate(func)(np_data, np_rois)
            tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-4)
Example #31
0
def test_softmax():
    shape = (10, 10)
    x = relay.var("x", shape=shape)
    y = relay.nn.softmax(x, axis=1)
    func = relay.Function([x], y)
    _construct_model(func)
Example #32
0
def test_multibox_prior():
    def get_ref_result(dshape,
                       sizes=(1.0, ),
                       ratios=(1.0, ),
                       steps=(-1.0, -1.0),
                       offsets=(0.5, 0.5),
                       clip=True):
        in_height = dshape[2]
        in_width = dshape[3]
        num_sizes = len(sizes)
        num_ratios = len(ratios)
        size_ratio_concat = sizes + ratios
        steps_h = steps[0] if steps[0] > 0 else 1.0 / in_height
        steps_w = steps[1] if steps[1] > 0 else 1.0 / in_width
        offset_h = offsets[0]
        offset_w = offsets[1]

        oshape = (1, in_height * in_width * (num_sizes + num_ratios - 1), 4)
        dtype = "float32"
        np_out = np.zeros(oshape).astype(dtype)

        for i in range(in_height):
            center_h = (i + offset_h) * steps_h
            for j in range(in_width):
                center_w = (j + offset_w) * steps_w
                for k in range(num_sizes + num_ratios - 1):
                    w = size_ratio_concat[k] * in_height / in_width / 2.0 if k < num_sizes else \
                        size_ratio_concat[0] * in_height / in_width * math.sqrt(size_ratio_concat[k + 1]) / 2.0
                    h = size_ratio_concat[k] / 2.0 if k < num_sizes else \
                        size_ratio_concat[0] / math.sqrt(size_ratio_concat[k + 1]) / 2.0
                    count = i * in_width * (num_sizes + num_ratios - 1) + j * (
                        num_sizes + num_ratios - 1) + k
                    np_out[0][count][0] = center_w - w
                    np_out[0][count][1] = center_h - h
                    np_out[0][count][2] = center_w + w
                    np_out[0][count][3] = center_h + h
        if clip:
            np_out = np.clip(np_out, 0, 1)

        return np_out

    def verify_multibox_prior(x,
                              dshape,
                              ref_res,
                              sizes=(1.0, ),
                              ratios=(1.0, ),
                              steps=(-1.0, -1.0),
                              offsets=(0.5, 0.5),
                              clip=True,
                              check_size=False,
                              check_type_only=False):

        z = relay.vision.multibox_prior(x, sizes, ratios, steps, offsets, clip)
        zz = relay.ir_pass.infer_type(z)
        if check_size:
            assert "sizes=" in z.astext()
        assert zz.checked_type == relay.TensorType(
            (1, dshape[2] * dshape[3] * (len(sizes) + len(ratios) - 1), 4),
            "float32")

        if check_type_only:
            return

        data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
        func = relay.Function([x], z)
        func = relay.ir_pass.infer_type(func)
        for target, ctx in ctx_list():
            intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
            op_res1 = intrp1.evaluate(func)(data)
            tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
            intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
            op_res2 = intrp2.evaluate(func)(data)
            tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)

    sizes = (0.3, 1.5, 0.7)
    ratios = (1.3, 2.4)
    steps = (2.0, 1.5)
    offsets = (0.2, 0.3)
    dshape = (1, 3, 56, 56)
    ref_res = get_ref_result(dshape, sizes, ratios, steps, offsets)
    x = relay.var("x", relay.TensorType(dshape, "float32"))
    verify_multibox_prior(x,
                          dshape,
                          ref_res,
                          sizes,
                          ratios,
                          steps,
                          offsets,
                          check_size=True)
    y = relay.var("y", relay.TensorType((tvm.var("n"), 3, 56, 56), "float32"))
    verify_multibox_prior(x,
                          dshape,
                          ref_res,
                          sizes,
                          ratios,
                          steps,
                          offsets,
                          check_size=True,
                          check_type_only=True)

    dshape = (1, 24, 32, 32)
    ref_res = get_ref_result(dshape, clip=False)
    x = relay.var("x", relay.TensorType(dshape, "float32"))
    verify_multibox_prior(x, dshape, ref_res, clip=False)
    y = relay.var("y", relay.TensorType((tvm.var("n"), 24, 32, 32), "float32"))
    verify_multibox_prior(x, dshape, ref_res, clip=False, check_type_only=True)
Example #33
0
 def verify_yolo_reorg(shape, stride, out_shape):
     x = relay.var("x", relay.TensorType(shape, "float32"))
     z = relay.vision.yolo_reorg(x, stride=stride)
     zz = relay.ir_pass.infer_type(z)
     assert "stride=" in z.astext()
     assert zz.checked_type == relay.ty.TensorType(out_shape, "float32")
def test_match():
    # pair each match keyword with whether it specifies a complete match or not
    match_keywords = [("match", True), ("match?", False)]
    for (match_keyword, is_complete) in match_keywords:
        mod = tvm.IRModule()

        list_var = relay.GlobalTypeVar("List")
        typ_var = relay.TypeVar("A")
        cons_constructor = relay.Constructor(
            "Cons", [typ_var, list_var(typ_var)], list_var)
        nil_constructor = relay.Constructor("Nil", [], list_var)
        list_def = relay.TypeData(
            list_var,
            [typ_var],
            [cons_constructor, nil_constructor])
        mod[list_var] = list_def

        length_var = relay.GlobalVar("length")
        typ_var = relay.TypeVar("A")
        input_type = list_var(typ_var)
        input_var = relay.Var("xs", input_type)
        rest_var = relay.Var("rest")
        cons_case = relay.Let(
            relay.var("", type_annotation=None),
            UNIT,
            relay.add(relay.const(1), relay.Call(length_var, [rest_var])))
        body = relay.Match(input_var,
            [relay.Clause(
                relay.PatternConstructor(
                    cons_constructor,
                    [relay.PatternWildcard(), relay.PatternVar(rest_var)]),
                cons_case),
            relay.Clause(
                relay.PatternConstructor(nil_constructor, []),
                relay.const(0))],
            complete=is_complete
        )
        length_func = relay.Function(
            [input_var],
            body,
            int32,
            [typ_var]
        )
        mod[length_var] = length_func

        assert_parse_module_as(
            """
            %s

            def @length[A](%%xs: List[A]) -> int32 {
              %s (%%xs) {
                Cons(_, %%rest : List[A]) => {
                  ();
                  1 + @length(%%rest)
                },
                Nil => 0,
              }
            }
            """ % (LIST_DEFN, match_keyword),
            mod
        )
Example #35
0
def quantized_conv2d(data,
                     kernel_dtype,
                     name,
                     input_channels,
                     kernel_size,
                     output_channels,
                     strides=(1, 1),
                     padding=(0, 0),
                     weight=None,
                     add_bias=False,
                     input_scale=8.0,
                     kernel_scale=8.0,
                     input_zero_point=0.0,
                     kernel_zero_point=0.0,
                     data_layout='NCHW',
                     kernel_layout='OIHW',
                     **kwargs):
    """Wrapper of qnn.conv2d
    Parameters
    ----------
    data : relay.Expr
        The input expression.

    weight : relay.Expr
        The weight to conv2d.

    name : str
        The name of this convolution.

    input_channels: int
        The number of input channels.

    out_channels: int
        The number of output channels.

    input_scale : float
        The scale of input.

    kernel_scale : float
        The scale of kernel.

    input_zero_point : float
        The zero point of input.

    kernel_zero_point : float
        The zero point of kernel.

    kwargs : dict
        Additional arguments.

    Returns
    -------
    result : relay.Expr
        The result.
    """

    # print("%s, %s, %d, %d, %d, %d, %d" % (, kernel_dtype, input_channels, output_channels, kernel_size[0], strides[0], padding[0]))

    input_zero_point = relay.const(input_zero_point, 'int32')
    kernel_zero_point = relay.const(kernel_zero_point, 'int32')

    if isinstance(input_scale, float):
        input_scale = relay.const(input_scale, 'float32')
    else:
        input_scale = relay.const(input_scale.astype('float32'), 'float32')

    if isinstance(kernel_scale, float):
        kernel_scale = relay.const(kernel_scale, 'float32')
    else:
        kernel_scale = relay.const(kernel_scale.astype('float32'), 'float32')

    if kernel_layout == "OIHW":
        kernel_shape = (output_channels, input_channels, kernel_size[0],
                        kernel_size[1])
    elif kernel_layout == "HWIO":
        kernel_shape = (kernel_size[0], kernel_size[1], input_channels,
                        output_channels)
    elif kernel_layout == "HWOI":
        kernel_shape = (kernel_size[0], kernel_size[1], output_channels,
                        input_channels)
    elif kernel_layout == "OHWI":
        kernel_shape = (output_channels, kernel_size[0], kernel_size[1],
                        input_channels)
    else:
        raise RuntimeError(
            "Unsupported kernel layout {}".format(kernel_layout))

    if weight is None:
        weight = relay.var(name + "_weight",
                           shape=kernel_shape,
                           dtype=kernel_dtype)

    conv2d = relay.qnn.op.conv2d(data,
                                 weight,
                                 input_zero_point,
                                 kernel_zero_point,
                                 input_scale,
                                 kernel_scale,
                                 kernel_size=kernel_size,
                                 channels=output_channels,
                                 data_layout=data_layout,
                                 kernel_layout=kernel_layout,
                                 strides=strides,
                                 padding=padding,
                                 **kwargs)

    if add_bias:
        if data_layout == 'NCHW':
            bias_shape = (1, output_channels, 1, 1)
        elif data_layout == 'NHWC':
            bias_shape = (1, 1, 1, output_channels)
        elif data_layout == 'HWCN':
            bias_shape = (1, 1, output_channels, 1)
        elif data_layout == 'HWNC':
            bias_shape = (1, 1, 1, output_channels)
        else:
            raise RuntimeError(
                "Unsupported conv2d layout {}".format(data_layout))

        bias = relay.var(name + "_bias", shape=bias_shape, dtype="int32")
        return relay.add(conv2d, bias)
    else:
        return conv2d
Example #36
0
def quantized_dense(data,
                    name,
                    input_zero_point,
                    kernel_zero_point,
                    input_scale,
                    kernel_scale,
                    units,
                    kernel_shape,
                    kernel_dtype,
                    add_bias=False,
                    out_dtype="int32"):
    """Qnn Dense operator.
    Applies a quantized linear transformation

     .. math::

     `Y = X * W`

    Parameters
    ----------
    data : tvm.relay.Expr
        The quantized input data to the operator.
    weight : tvm.relay.Expr
        The quantized weight expressions.
    input_zero_point: tvm.relay.Expr
        The input zero point.
    kernel_zero_point: tvm.relay.Expr
        The kernel zero point.
    input_scale: tvm.relay.Expr
        The scale for the input tensor.
    kernel_scale: tvm.relay.Expr
        The scale for the weight tensor. The scale for the weight tensor is
        stored for access to this during relay. This information is not
        needed in the pass pipeline after qnn.conv2d is lowered to the
        sequence of steps as in nn.conv2d. See also input_scale in Requantize.
    units : int
        Number of hidden units of the dense transformation.
    out_dtype : str, optional
        Specifies the output data type for mixed precision dense can be int32 or int16.

    Returns
    -------
    result : tvm.relay.Expr
        The computed result.
    """

    input_zero_point = relay.const(input_zero_point, 'int32')
    kernel_zero_point = relay.const(kernel_zero_point, 'int32')
    if isinstance(input_scale, float):
        input_scale = relay.const(input_scale, 'float32')
    else:
        input_scale = relay.const(input_scale.astype('float32'), 'float32')

    if isinstance(kernel_scale, float):
        kernel_scale = relay.const(kernel_scale, 'float32')
    else:
        kernel_scale = relay.const(kernel_scale.astype('float32'), 'float32')

    weight = relay.var(name + "_weight",
                       shape=kernel_shape,
                       dtype=kernel_dtype)

    dense = relay.qnn.op.dense(data, weight, input_zero_point,
                               kernel_zero_point, input_scale, kernel_scale,
                               units, out_dtype)
    if add_bias:
        bias = relay.var(name + "_bias", dtype="int32")
        return relay.nn.bias_add(dense, bias, axis=-1)
    else:
        return dense
Example #37
0
def test_global_avg_pool2d():
    shape = (10, 10, 10, 10)
    x = relay.var("x", shape=shape)
    y = relay.nn.global_avg_pool2d(x)
    func = relay.Function([x], y)
    _construct_model(func)
Example #38
0
def test_batch_flatten():
    shape = (10, 10, 10)
    x = relay.var("x", shape=shape)
    y = relay.nn.batch_flatten(x)
    func = relay.Function([x], y)
    _construct_model(func)
Example #39
0
def test_multiply():
    shape = (10, 10)
    x = relay.var("x", shape=shape)
    y = x * x
    func = relay.Function([x], y)
    _construct_model(func)
Example #40
0
def test_relu():
    shape = (10, 10)
    x = relay.var("x", shape=shape)
    y = relay.nn.relu(x)
    func = relay.Function([x], y)
    _construct_model(func)
Example #41
0
 def get_graph(op, x_shape=(1, 2, 3, 4), axis=(2, 3), keepdims=False):
     x = relay.var("x", shape=(x_shape), dtype="float32")
     out = op(x, axis=axis, keepdims=keepdims)
     f = relay.Function([x], out)
     return f, {"x": x_shape}, []
Example #42
0
def test_clip():
    shape = (10, 10)
    x = relay.var("x", shape=shape)
    y = relay.clip(x, a_min=0.0, a_max=1.0)
    func = relay.Function([x], y)
    _construct_model(func)
Example #43
0
 def get_graph(x_shape=(1, 8, 3, 3)):
     x = relay.var("x", shape=(x_shape), dtype="float32")
     out = relay.clip(x, a_min=-0.2, a_max=0.4)
     f = relay.Function([x], out)
     return f, {"x": x_shape}, []
Example #44
0
 def get_graph(op, x_shape=(1, 3, 32, 32), out_size=(1, 1)):
     x = relay.var("x", shape=(x_shape), dtype="float32")
     out = op(x, out_size)
     f = relay.Function([x], out)
     return f, {"x": x_shape}, []
Example #45
0
 def get_graph(x_shape, pad_width):
     x = relay.var("x", shape=(x_shape), dtype="float32")
     out = relay.nn.pad(x, pad_width=pad_width)
     f = relay.Function([x], out)
     return f, {"x": x_shape}, []
Example #46
0
 def get_graph(x_shape=(1, 8, 3, 3)):
     x = relay.var("x", shape=(x_shape), dtype="float32")
     out = relay.nn.leaky_relu(x, alpha=0.1)
     f = relay.Function([x], out)
     return f, {"x": x_shape}, []
Example #47
0
 def get_graph(x_shape, order):
     x = relay.var("x", shape=(x_shape), dtype="float32")
     out = relay.transpose(x, order)
     f = relay.Function([x], out)
     return f, {"x": x_shape}, []
Example #48
0
 def get_graph(x_shape, axis):
     x = relay.var("x", shape=(x_shape), dtype="float32")
     out = relay.nn.softmax(x, axis=axis)
     f = relay.Function([x], out)
     return f, {"x": x_shape}, []
Example #49
0
 def get_graph(x_shape=(1, 3), axis=1, num_newaxis=1):
     x = relay.var("x", shape=(x_shape), dtype="float32")
     out = relay.expand_dims(x, axis, num_newaxis)
     f = relay.Function([x], out)
     return f, {"x": x_shape}, []
Example #50
0
 def get_graph(x_shape=(1, 16)):
     x = relay.var("x", shape=(x_shape), dtype="float32")
     beta = relay.const(1, dtype="float32")
     out = relay.multiply(x, beta)
     f = relay.Function([x], out)
     return f, {"x": x_shape}, []
Example #51
0
 def get_graph(x_shape=(1, 16), channels=16):
     x = relay.var("x", shape=(x_shape), dtype="float32")
     bias = relay.var("bias", shape=(channels,), dtype="float32")
     out = relay.nn.bias_add(x, bias)
     f = relay.Function([x, bias], out)
     return f, {"x": x_shape, "bias": (channels,)}, ["bias"]
Example #52
0
 def get_graph(x_shape, new_shape):
     x = relay.var("x", shape=(x_shape), dtype="float32")
     out = relay.reshape(x, new_shape)
     f = relay.Function([x], out)
     return f, {"x": x_shape}, []
Example #53
0
    def expected(dshape):
        # segment 0
        x = relay.var("p0", shape=dshape)
        y = relay.add(x, relay.const(1, "float32"))
        f0 = relay.Function([x], y)
        f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))

        # segment 1
        x = relay.var("p0", shape=dshape)
        w = relay.var("p1")
        y = relay.nn.conv2d(x,
                            w,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            channels=16)
        y1 = relay.add(relay.const(1, "float32"), y)
        y = relay.add(y, y1)
        f1 = relay.Function([x, w], y)
        f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))

        # segment 2
        x = relay.var("p0", shape=dshape)
        w = relay.var("p1")
        z2 = relay.nn.conv2d(x,
                             w,
                             kernel_size=(3, 3),
                             padding=(1, 1),
                             channels=16)
        f2 = relay.Function([x, w], z2)
        f2 = f2.with_attr("Primitive", tvm.tir.IntImm("int32", 1))

        # segment 3
        x = relay.var("p0", shape=dshape)
        w = relay.var("p1")
        offset = relay.var("p2", shape=dshape)
        z3 = relay.nn.conv2d(x,
                             w,
                             kernel_size=(1, 1),
                             padding=(0, 0),
                             channels=16)
        z3 = relay.add(z3, offset)
        f3 = relay.Function([x, w, offset], z3)
        f3 = f3.with_attr("Primitive", tvm.tir.IntImm("int32", 1))

        # compose
        x = relay.var("x", shape=dshape)
        y = relay.Call(f0, [x])
        y = relay.Call(f1, [y, relay.var("w1")])
        z2 = relay.Call(f2, [y, relay.var("w3")])
        z3 = relay.Call(f3, [y, relay.var("w2"), z2])
        z = z3
        return relay.Function(relay.analysis.free_vars(z), z)
Example #54
0
 def get_graph(x_shape=(1, 3, 4, 6)):
     x = relay.var("x", shape=(x_shape), dtype="float32")
     out = relay.nn.batch_flatten(x)
     f = relay.Function([x], out)
     return f, {"x": x_shape}, []
Example #55
0
 def before(branch_len, num_diamond):
     x = relay.var("x", shape=(10, 20))
     out = x
     for _ in range(num_diamond):
         out = create_diamond(out, branch_len)
     return relay.Function([x], out)
Example #56
0
 def before():
     x = relay.var("x", shape=(16, channel_size))
     softmax = relay.nn.softmax(x)
     out = relay.cast(softmax, "float16")
     return relay.Function([x], out)
Example #57
0
 def before():
     shape = (tvm.tir.const(10, "int64"), tvm.tir.const(1, "int64"))
     x = relay.var("x", shape=shape)
     concat = relay.concatenate([x, x], axis=-1)
     out = relay.op.take(concat, indices=relay.const([0], dtype="int64"))
     return relay.Function(relay.analysis.free_vars(out), out)
Example #58
0
 def create_diamond_func(inp):
     inp_var = relay.var("p", shape=(10, 20))
     d = create_diamond(inp_var, branch_len)
     f = relay.Function([inp_var], d)
     f = f.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
     return relay.Call(f, [inp])
Example #59
0
 def before(n):
     x = relay.var("x", shape=(10, 20))
     y = x
     for i in range(n):
         y = relay.exp(y)
     return relay.Function([x], y)
Example #60
0
 def before():
     x = relay.var("x", shape=(), dtype="int32")
     less = relay.less(x, relay.const(10, dtype="int32"))
     z = relay.min(less)
     return relay.Function([x], z)