def create_diamond(x, branch_len):
     x1 = x
     x2 = x
     for _ in range(branch_len):
         x1 = relay.exp(x1)
         x2 = relay.exp(x2)
     return relay.add(x1, x2)
Example #2
0
 def expected():
     x = relay.var("p", shape=(10, 20))
     y = x
     for i in range(max_fused_ops):
         y = relay.exp(y)
     f1 = relay.Function([x], y)
     x = relay.var("x", shape=(10, 20))
     z = relay.Call(f1, [x])
     xx = relay.var("pp", shape=(10, 20))
     yy = xx
     for i in range(n - max_fused_ops):
         yy = relay.exp(yy)
     f2 = relay.Function([xx], yy)
     zz = relay.Call(f2, [z])
     return relay.Function([x], zz)
    def build_export_vm(device):
        """relay build & export graph"""
        x = relay.var("x", shape=(10, 5))
        y = relay.var("y", shape=(1, 5))
        z = relay.add(x, y)
        z = relay.exp(z)
        func = relay.Function([x, y], z)
        x_data = np.random.rand(10, 5).astype("float32")
        y_data = np.random.rand(1, 5).astype("float32")

        pt_device = torch.device(device)
        if pt_device.type == "cuda":
            target = "cuda"
            ctx = tvm.cuda(pt_device.index)
        else:
            target = "llvm"
            ctx = tvm.cpu(0)
        exe = relay.vm.compile(tvm.IRModule.from_expr(func),
                               target=target,
                               params={})
        code, lib = exe.save()
        export_dir = tempfile.mkdtemp("tvm_export")
        # export to tempdir
        lib.export_library(os.path.join(export_dir, TVM_ASSETS[0]))
        with open(os.path.join(export_dir, TVM_ASSETS[1]), "wb") as fout:
            fout.write(code)
        vm = tvm.runtime.vm.VirtualMachine(exe, ctx)
        res = vm.run(x_data, y_data)
        ref_res = np.exp(y_data + x_data)
        tvm.testing.assert_allclose(res.numpy(), ref_res, atol=1e-5, rtol=1e-5)
        return export_dir
 def expected():
     x = relay.var("x", shape=(8, 8))
     y = relay.var("y", shape=(8, 8))
     x0 = relay.var("x0", shape=(8, 8))
     y0 = relay.var("y0", shape=(8, 8))
     add = x0 + y0
     # Function that uses C compiler
     func = relay.Function([x0, y0], add)
     func = func.set_attribute("Primitive", tvm.expr.IntImm("int32", 1))
     func = func.set_attribute("Compiler",
                               tvm.expr.StringImm("ccompiler"))
     func = func.set_attribute("ExternalSymbol",
                               tvm.expr.StringImm("ccompiler_0"))
     add_call = relay.Call(func, [x, y])
     # Function that uses default compiler. Ops are fused in this function.
     p0 = relay.var("p0", shape=(8, 8))
     log = relay.log(p0)
     exp = relay.exp(p0)
     concat = relay.concatenate([log, exp], axis=0)
     fused_func = relay.Function([p0], concat)
     fused_func = fused_func.set_attribute("Primitive",
                                           tvm.expr.IntImm("int32", 1))
     fused_call = relay.Call(fused_func, [add_call])
     main = relay.Function([x, y], fused_call)
     mod = relay.Module()
     mod["main"] = main
     return mod
Example #5
0
def test_concatenate():
    n, t, d = tvm.var("n"), tvm.var("t"), 100
    x = relay.var("x", shape=(n, t, d))
    y = relay.var("y", shape=(n, t, d))
    z = relay.concatenate((x, y), axis=-1)
    assert "axis=" in z.astext()
    zz = relay.ir_pass.infer_type(z)
    assert zz.checked_type == relay.TensorType((n, t, 200))

    x = relay.exp(x)
    z = relay.concatenate((x, y), axis=2)
    zz = relay.ir_pass.infer_type(z)
    assert zz.checked_type == relay.TensorType((n, t, 200))

    z = relay.concatenate((x, y), axis=1)
    zz = relay.ir_pass.infer_type(z)
    assert zz.checked_type == relay.TensorType((n, t + t, 100))

    x = relay.var("x", shape=(10, 5))
    y = relay.var("y", shape=(10, 5))
    z = relay.concatenate((x, y), axis=1)

    # Check result.
    func = relay.Function([x, y], z)
    x_data = np.random.rand(10, 5).astype('float32')
    y_data = np.random.rand(10, 5).astype('float32')
    ref_res = np.concatenate((x_data, y_data), axis=1)

    for target, ctx in ctx_list():
        intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
        intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
        op_res1 = intrp1.evaluate(func)(x_data, y_data)
        tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=0.01)
        op_res2 = intrp2.evaluate(func)(x_data, y_data)
        tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=0.01)
 def expected():
     mod = tvm.IRModule()
     x = relay.var("x", shape=(8, 8))
     y = relay.var("y", shape=(8, 8))
     x0 = relay.var("x0", shape=(8, 8))
     y0 = relay.var("y0", shape=(8, 8))
     add = x0 + y0
     # Function that uses C compiler
     func = relay.Function([x0, y0], add)
     func = set_func_attr(func, "ccompiler", "ccompiler_0")
     glb_0 = relay.GlobalVar("ccompiler_0")
     mod[glb_0] = func
     add_call = relay.Call(glb_0, [x, y])
     # Function that uses default compiler. Ops are fused in this function.
     p0 = relay.var("p0", shape=(8, 8))
     log = relay.log(p0)
     exp = relay.exp(p0)
     concat = relay.concatenate([log, exp], axis=0)
     fused_func = relay.Function([p0], concat)
     fused_func = fused_func.with_attr("Primitive",
                                       tvm.tir.IntImm("int32", 1))
     fused_call = relay.Call(fused_func, [add_call])
     main = relay.Function([x, y], fused_call)
     mod["main"] = main
     return mod
 def before(dim):
     X = relay.var("X", shape=(1, dim))
     W = relay.var("W", shape=(3 * dim, dim))
     matmul = relay.nn.dense(X, W)
     splitted = relay.split(matmul, indices_or_sections=3, axis=1)
     out = relay.sigmoid(splitted[0]) + relay.tanh(splitted[1]) * relay.exp(splitted[2])
     return relay.Function([X, W], out)
Example #8
0
 def get_inner_func_3():
     x = relay.var("x", shape=(1, 4, 5, 6), dtype="int8")
     x = relay.abs(x)
     x = relay.nn.relu(x)
     x = relay.exp(x)
     x = _create_primitive_function(x)
     return x
Example #9
0
 def before(dim):
     X = relay.var("X", shape=(1, dim))
     W = relay.var("W", shape=(3 * dim, dim))
     matmul = relay.nn.dense(X, W)
     splitted = relay.split(matmul, indices_or_sections=3, axis=1)
     out = relay.sigmoid(splitted[0]) + relay.tanh(splitted[1]) * relay.exp(splitted[2])
     return relay.Function([X, W], out)
Example #10
0
 def before():
     x = relay.var("x", shape=(10, 20))
     y = relay.add(x, relay.const(1, "float32"))
     z = relay.exp(y)
     w = relay.squeeze(z)
     mod = tvm.IRModule()
     mod["main"] = relay.Function([x], w)
     return mod
Example #11
0
 def expected():
     x = relay.var("p", shape=(10, 20))
     y = x
     for i in range(max_fused_ops):
         y = relay.exp(y)
     f1 = relay.Function([x], y)
     f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
     x = relay.var("x", shape=(10, 20))
     z = relay.Call(f1, [x])
     xx = relay.var("pp", shape=(10, 20))
     yy = xx
     for i in range(n - max_fused_ops):
         yy = relay.exp(yy)
     f2 = relay.Function([xx], yy)
     f2 = f2.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
     zz = relay.Call(f2, [z])
     return relay.Function([x], zz)
Example #12
0
 def expected():
     x = relay.var("p", shape=(10, 20))
     y = relay.add(x, relay.const(1, "float32"))
     z = relay.exp(y)
     f1 = relay.Function([x], z)
     x = relay.var("x", shape=(10, 20))
     y = relay.Call(f1, [x])
     return relay.Function([x], y)
Example #13
0
 def expected():
     x = relay.var("p", shape=(10, 20))
     y = relay.add(x, relay.const(1, "float32"))
     z = relay.exp(y)
     f1 = relay.Function([x], z)
     x = relay.var("x", shape=(10, 20))
     y = relay.Call(f1, [x])
     return relay.Function([x], y)
Example #14
0
def test_concatenate():
    for dtype in ["float16", "float32"]:
        n, t, d = te.size_var("n"), te.size_var("t"), 100
        x = relay.var("x", shape=(n, t, d))
        y = relay.var("y", shape=(n, t, d))
        z = relay.concatenate((x, y), axis=-1)
        assert "axis=" in z.astext()
        zz = run_infer_type(z)
        assert zz.checked_type == relay.TensorType((n, t, 200))

        x = relay.exp(x)
        z = relay.concatenate((x, y), axis=2)
        zz = run_infer_type(z)
        assert zz.checked_type == relay.TensorType((n, t, 200))

        z = relay.concatenate((x, y), axis=1)
        zz = run_infer_type(z)
        assert zz.checked_type == relay.TensorType((n, t + t, 100))

        # check shape mismatches (the following case is expected to raise tvm._ffi.base.TVMError.
        try:
            x = relay.var("p1", shape=(2, 5))
            y = relay.var("p2", shape=(2, 3))
            c = relay.concatenate([x, y], axis=0)
            func = relay.Function([x, y], c)
            zz = run_infer_type(func)
        except tvm._ffi.base.TVMError:
            pass
        else:
            assert False

        x = relay.var("x", shape=(10, 5), dtype=dtype)
        y = relay.var("y", shape=(10, 5), dtype=dtype)
        t = relay.var("z", shape=(), dtype=dtype)
        z = relay.concatenate((x, y), axis=1)
        z = relay.add(z, t)
        # Check result.
        func = relay.Function([x, y, t], z)
        x_data = np.random.rand(10, 5).astype(dtype)
        y_data = np.random.rand(10, 5).astype(dtype)
        t_data = np.random.uniform(size=()).astype(dtype)
        ref_res = np.concatenate((x_data, y_data), axis=1) + t_data

        for target, dev in tvm.testing.enabled_targets():
            if (
                dtype == "float16"
                and target == "cuda"
                and not have_fp16(tvm.cuda(0).compute_version)
            ):
                continue
            op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
                x_data, y_data, t_data
            )
            tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=0.01)
            op_res2 = relay.create_executor("debug", device=dev, target=target).evaluate(func)(
                x_data, y_data, t_data
            )
            tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=0.01)
Example #15
0
def test1():
    x = relay.Var('x')
    y = relay.exp(x)

    print('y', type(y), y)
    print('y.__dir__()', y.__dir__())
    print('y.op.__dir__()', y.op.__dir__())
    lib.test_call_node(y.handle)
    print('done')
Example #16
0
    def get_func():
        add = relay.add(x, y)
        sqrt = relay.sqrt(add)
        log = relay.log(add)
        subtract = relay.subtract(sqrt, log)
        exp = relay.exp(subtract)

        func = relay.Function([x, y], exp)
        return func
Example #17
0
    def get_func():
        add = relay.add(x, y)
        sqrt = relay.sqrt(add)
        log = relay.log(add)
        subtract = relay.subtract(sqrt, log)
        exp = relay.exp(subtract)

        func = relay.Function([x, y], exp)
        return func
Example #18
0
def test_fuse_simple():
    """Simple testcase."""
    x = relay.var("x", shape=(10, 20))
    y = relay.add(x, x)
    z = relay.exp(y)
    z = relay.ir_pass.infer_type(z)
    zz = relay.ir_pass.fuse_ops(z)
    zz = relay.ir_pass.fuse_ops(zz)
    zz = relay.ir_pass.infer_type(zz)
    zz.astext()
Example #19
0
def safe_exp(w):
    slope = relay.const(np.exp(1, dtype=np.float32))
    lin_bool = w > slope
    lin_region = relay.cast(lin_bool, "float32")

    lin_out = slope * w
    exp_out = relay.exp(relay.where(lin_bool, relay.zeros_like(w), w))

    out = lin_region * lin_out + (relay.const(1.) - lin_region) * exp_out
    return out
Example #20
0
 def expected():
     x = relay.var("p", shape=(10, 20))
     y = relay.add(x, relay.const(1, "float32"))
     z = relay.exp(y)
     w = relay.squeeze(z)
     f1 = relay.Function([x], w)
     f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
     x = relay.var("x", shape=(10, 20))
     y = relay.Call(f1, [x])
     return relay.Function([x], y)
Example #21
0
        def expected():
            add = relay.add(x, y)
            sqrt = relay.sqrt(add)
            log = relay.log(add)
            subtract = relay.subtract(sqrt, log)
            copy_sub_exp = relay.device_copy(subtract, dev_ctx, cpu_ctx)
            exp = relay.exp(copy_sub_exp)

            func = relay.Function([x, y], exp)
            return func
Example #22
0
        def expected():
            add = relay.add(x, y)
            sqrt = relay.sqrt(add)
            log = relay.log(add)
            subtract = relay.subtract(sqrt, log)
            copy_sub_exp = relay.device_copy(subtract, dev_dev, cpu_dev)
            exp = relay.exp(copy_sub_exp)

            func = relay.Function([x, y], exp)
            return func
Example #23
0
 def expected():
     x = relay.var("p", shape=(10, 20))
     y = relay.add(x, relay.const(1, "float32"))
     z = relay.exp(y)
     w = relay.squeeze(z)
     f1 = relay.Function([x], w)
     x = relay.var("x", shape=(10, 20))
     y = relay.Call(f1, [x])
     mod = relay.module.Module()
     mod["main"] = relay.Function([x], y)
     return mod
        def annotated():
            add = relay.add(x, y)
            sqrt = relay.sqrt(add)
            log = relay.log(add)
            subtract = relay.subtract(sqrt, log)
            exp = relay.exp(subtract)
            _exp = relay.annotation.on_device(exp, cpu_dev)

            func = relay.Function([x, y], _exp)
            func = run_opt_pass(func, transform.RewriteAnnotatedOps(dev_dev.device_type))
            return func
Example #25
0
 def expected():
     x = relay.var("p", shape=(10, 20))
     y = relay.add(x, relay.const(1, "float32"))
     z = relay.exp(y)
     w = relay.squeeze(z)
     f1 = relay.Function([x], w)
     f1 = f1.set_attribute("Primitive", tvm.tir.IntImm("int32", 1))
     x = relay.var("x", shape=(10, 20))
     y = relay.Call(f1, [x])
     mod = tvm.IRModule()
     mod["main"] = relay.Function([x], y)
     return mod
        def expected():
            add = relay.add(x, y)
            copy_add_sqrt = relay.device_copy(add, cpu_ctx, dev_ctx)
            sqrt = relay.sqrt(copy_add_sqrt)
            log = relay.log(add)
            copy_sqrt_subtract = relay.device_copy(sqrt, dev_ctx, cpu_ctx)
            subtract = relay.subtract(copy_sqrt_subtract, log)
            copy_sub_exp = relay.device_copy(subtract, cpu_ctx, dev_ctx)
            exp = relay.exp(copy_sub_exp)

            func = relay.Function([x, y], exp)
            return func
Example #27
0
def test_plan_memory():
    # it is sufficient to cycle through two memories.

    x = relay.var("x", shape=(10, ))
    y = relay.var("x", shape=(1, ))
    y2 = relay.exp(y)
    z = relay.add(x, y2)
    z = relay.exp(z)
    z = relay.exp(z)
    z = relay.exp(z)
    z = relay.exp(z)
    z = relay.exp(z)
    func = relay.Function([x, y], z)
    func = relay.ir_pass.infer_type(func)
    func = relay.ir_pass.fuse_ops(func, opt_level=0)
    func = relay.ir_pass.infer_type(func)
    smap = relay.backend._backend.GraphPlanMemory(func)
    storage_ids = set()
    for k, v in smap.items():
        for x in v:
            storage_ids.add(x.value)

    # Current rule requires vars have unique storage id
    # because we don't do inplace, we will need another
    # two alternating temporary space.
    assert len(storage_ids) == 4
Example #28
0
def test_plan_memory():
    # it is sufficient to cycle through two memories.

    x = relay.var("x", shape=(10,))
    y = relay.var("x", shape=(1,))
    y2 = relay.exp(y)
    z = relay.add(x, y2)
    z = relay.exp(z)
    z = relay.exp(z)
    z = relay.exp(z)
    z = relay.exp(z)
    z = relay.exp(z)
    func = relay.Function([x, y], z)
    func = relay.ir_pass.infer_type(func)
    func = relay.ir_pass.fuse_ops(func, opt_level=0)
    func = relay.ir_pass.infer_type(func)
    smap = relay.backend._backend.GraphPlanMemory(func)
    storage_ids = set()
    device_types = set()
    for k, v in smap.items():
        assert len(v) == 2
        for x in v[0]:
            storage_ids.add(x.value)
        for x in v[1]:
            device_types.add(x.value)

    # Current rule requires vars have unique storage id
    # because we don't do inplace, we will need another
    # two alternating temporary space.
    assert len(storage_ids) == 4
    assert len(device_types) == 1
def test_plan_memory():
    # it is sufficient to cycle through two memories.

    x = relay.var("x", shape=(10, ))
    y = relay.var("x", shape=(1, ))
    y2 = relay.exp(y)
    z = relay.add(x, y2)
    z = relay.exp(z)
    z = relay.exp(z)
    z = relay.exp(z)
    z = relay.exp(z)
    z = relay.exp(z)
    func = relay.Function([x, y], z)
    mod = tvm.IRModule.from_expr(func)
    mod = relay.transform.FuseOps(0)(mod)
    func = mod["main"]
    smap = relay.backend._backend.GraphPlanMemory(func)
    storage_ids = set()
    device_types = set()
    for k, v in smap.items():
        assert len(v) == 2
        for x in v[0]:
            storage_ids.add(x.value)
        for x in v[1]:
            device_types.add(x.value)

    # Current rule requires vars have unique storage id
    # because we don't do inplace, we will need another
    # two alternating temporary space.
    assert len(storage_ids) == 4
    assert len(device_types) == 1
Example #30
0
def test_extern_ccompiler_default_ops():
    def expected():
        mod = tvm.IRModule()
        x = relay.var("x", shape=(8, 8))
        y = relay.var("y", shape=(8, 8))
        x0 = relay.var("x0", shape=(8, 8))
        y0 = relay.var("y0", shape=(8, 8))
        add = x0 + y0
        # Function that uses C compiler
        func = relay.Function([x0, y0], add)
        func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
        func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1))
        func = func.with_attr("Compiler", tvm.tir.StringImm("ccompiler"))
        func = func.with_attr("ExternalSymbol",
                              tvm.tir.StringImm("ccompiler_0"))
        glb_0 = relay.GlobalVar("ccompiler_0")
        mod[glb_0] = func
        add_call = relay.Call(glb_0, [x, y])
        # Function that uses default compiler. Ops are fused in this function.
        p0 = relay.var("p0", shape=(8, 8))
        log = relay.log(p0)
        exp = relay.exp(p0)
        concat = relay.concatenate([log, exp], axis=0)
        fused_func = relay.Function([p0], concat)
        fused_func = fused_func.with_attr("Primitive",
                                          tvm.tir.IntImm("int32", 1))
        fused_call = relay.Call(fused_func, [add_call])
        main = relay.Function([x, y], fused_call)
        mod["main"] = main
        return mod

    x = relay.var("x", shape=(8, 8))
    y = relay.var("y", shape=(8, 8))
    add = x + y
    log = relay.log(add)
    exp = relay.exp(add)
    concat = relay.concatenate([log, exp], axis=0)
    f = relay.Function([x, y], concat)
    mod = tvm.IRModule()
    mod["main"] = f
    mod = WhiteListAnnotator(["add", "subtract", "multiply"], "ccompiler")(mod)
    mod = transform.PartitionGraph()(mod)

    fused_mod = transform.FuseOps(2)(mod)
    expected_mod = expected()
    assert relay.alpha_equal(fused_mod, expected_mod)

    x_data = np.random.rand(8, 8).astype('float32')
    y_data = np.random.rand(8, 8).astype('float32')
    np_add = x_data + y_data
    res = np.concatenate([np.log(np_add), np.exp(np_add)])
    check_result(mod, {"x": x_data, "y": y_data}, (16, 8), res)
Example #31
0
    def expected(dshape):
        x = relay.var("p0", shape=dshape)
        y = relay.add(x, relay.const(1, "float32"))
        f1 = relay.Function([x], y)

        x = relay.var("p01", shape=dshape)
        y = relay.exp(x)
        f2 = relay.Function([x], y)

        x = relay.var("x", shape=dshape)
        y = relay.Call(f1, [x])
        z = relay.Call(f2, [y])
        return relay.Function([x], z)
def test_exp():
    x = relay.var("x", shape=(1, 16, 16, 16), dtype="float32")
    y = relay.exp(x)
    func = relay.Function([x], y)
    mod = tvm.IRModule.from_expr(func)

    fast_mod = FastMath()(mod)
    assert "fast_exp" in fast_mod.astext()

    # Check that FastMath option works for relay.build.
    with relay.build_config(opt_level=3, required_pass=['FastMath']):
        fast_mod = relay.optimize(mod, target='llvm', params=None)
    assert "fast_exp" in fast_mod[0].astext()
Example #33
0
    def expected(dshape):
        x = relay.var("p0", shape=dshape)
        y = relay.add(x, relay.const(1, "float32"))
        f1 = relay.Function([x], y)

        x = relay.var("p01", shape=dshape)
        y = relay.exp(x)
        f2 = relay.Function([x], y)

        x = relay.var("x", shape=dshape)
        y = relay.Call(f1, [x])
        z = relay.Call(f2, [y])
        return relay.Function([x], z)
Example #34
0
        def annotated():
            add = relay.add(x, y)
            sqrt = relay.sqrt(add)
            log = relay.log(add)
            subtract = relay.subtract(sqrt, log)
            exp = relay.exp(subtract)
            _exp = relay.annotation.on_device(exp, cpu_ctx)

            func = relay.Function([x, y], _exp)
            func = relay.ir_pass.infer_type(func)
            func = relay.ir_pass.rewrite_annotated_ops(func,
                                                       dev_ctx.device_type)
            return func
Example #35
0
def test_concatenate():
    n, t, d = tvm.var("n"), tvm.var("t"), 100
    x = relay.var("x", shape=(n, t, d))
    y = relay.var("y", shape=(n, t, d))
    z = relay.concatenate((x, y), axis=-1)
    assert "axis=" in z.astext()
    zz = run_infer_type(z)
    assert zz.checked_type == relay.TensorType((n, t, 200))

    x = relay.exp(x)
    z = relay.concatenate((x, y), axis=2)
    zz = run_infer_type(z)
    assert zz.checked_type == relay.TensorType((n, t, 200))

    z = relay.concatenate((x, y), axis=1)
    zz = run_infer_type(z)
    assert zz.checked_type == relay.TensorType((n, t + t, 100))

    # check shape mismatches (the following case is expected to raise tvm._ffi.base.TVMError.
    try:
        x = relay.var('p1', shape=(2, 5))
        y = relay.var('p2', shape=(2, 3))
        c = relay.concatenate([x, y], axis=0)
        func = relay.Function([x, y], c)
        zz = run_infer_type(func)
    except tvm._ffi.base.TVMError:
        pass
    else:
        assert False

    x = relay.var("x", shape=(10, 5))
    y = relay.var("y", shape=(10, 5))
    t = relay.var("z", shape=())
    z = relay.concatenate((x, y), axis=1)
    z = relay.add(z, t)
    # Check result.
    func = relay.Function([x, y, t], z)
    x_data = np.random.rand(10, 5).astype('float32')
    y_data = np.random.rand(10, 5).astype('float32')
    t_data = np.random.uniform(size=()).astype('float32')
    ref_res = np.concatenate((x_data, y_data), axis=1) + t_data

    for target, ctx in ctx_list():
        intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
        intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
        op_res1 = intrp1.evaluate(func)(x_data, y_data, t_data)
        tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=0.01)
        op_res2 = intrp2.evaluate(func)(x_data, y_data, t_data)
        tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=0.01)
Example #36
0
    def expected(dshape):
        x = relay.var("p0", shape=dshape)
        y = relay.add(x, relay.const(1, "float32"))
        f1 = relay.Function([x], y)
        f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))

        x = relay.var("p01", shape=dshape)
        y = relay.exp(x)
        f2 = relay.Function([x], y)
        f2 = f2.with_attr("Primitive", tvm.tir.IntImm("int32", 1))

        x = relay.var("x", shape=dshape)
        y = relay.Call(f1, [x])
        z = relay.Call(f2, [y])
        return relay.Function([x], z)
Example #37
0
    def expected(dim):
        p0 = relay.var("p0", shape=(1, dim))
        p1 = relay.var("p1", shape=(3 * dim, dim))
        matmul = relay.nn.dense(p0, p1)
        f0 = relay.Function([p0, p1], matmul)

        p01 = relay.var("p01", shape=(1, 3 * dim))
        splitted = relay.split(p01, indices_or_sections=3, axis=1)
        out = relay.sigmoid(splitted[0]) + relay.tanh(splitted[1]) * relay.exp(splitted[2])
        f1 = relay.Function([p01], out)

        X = relay.var("X", shape=(1, dim))
        W = relay.var("W", shape=(3 * dim, dim))
        y = relay.Call(f0, [X, W])
        z = relay.Call(f1, [y])
        return relay.Function([X, W], z)
Example #38
0
        def annotated():
            add = relay.add(x, y)
            sqrt = relay.sqrt(add)
            log = relay.log(add)
            subtract = relay.subtract(sqrt, log)
            exp = relay.exp(subtract)
            _exp = relay.annotation.on_device(exp, cpu_ctx)

            func = relay.Function([x, y],
                                  relay.Tuple(tvm.convert([_exp, exp])))
            func = relay.ir_pass.infer_type(func)
            func = relay.ir_pass.rewrite_annotated_ops(func,
                                                       dev_ctx.device_type)
            func = relay.ir_pass.infer_type(func)
            return relay.Function(relay.ir_pass.free_vars(func.body[1]),
                                  func.body[1])
Example #39
0
def test_with_params():
    x = relay.var('x', shape=(10, 5))
    y = relay.var('y', shape=(1, 5))
    z = relay.add(x, y)
    z = relay.exp(z)
    func = relay.Function([x, y], z)
    x_data = np.random.rand(10, 5).astype('float32')
    y_data = np.random.rand(1, 5).astype('float32')
    params = {"y": y_data}
    graph, lib, params = relay.build(func, "llvm", params=params)
    mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0))
    mod.set_input(**params)
    mod.set_input(x=x_data)
    mod.run()
    res = mod.get_output(0).asnumpy()
    ref_res = np.exp(y_data + x_data)
    tvm.testing.assert_allclose(res, ref_res)
Example #40
0
def test_concatenate():
    n, t, d = tvm.var("n"), tvm.var("t"), 100
    x = relay.var("x", shape=(n, t, d))
    y = relay.var("y", shape=(n, t, d))
    z = relay.concatenate((x, y), axis=-1)
    assert "axis=" in z.astext()
    zz = relay.ir_pass.infer_type(z)
    assert zz.checked_type == relay.TensorType((n, t, 200))

    x = relay.exp(x)
    z = relay.concatenate((x, y), axis=2)
    zz = relay.ir_pass.infer_type(z)
    assert zz.checked_type == relay.TensorType((n, t, 200))

    z = relay.concatenate((x, y), axis=1)
    zz = relay.ir_pass.infer_type(z)
    assert zz.checked_type == relay.TensorType((n, t + t, 100))

    x = relay.var("x", shape=(10, 5))
    y = relay.var("y", shape=(10, 5))
    t = relay.var("z", shape=())
    z = relay.concatenate((x, y), axis=1)
    z = relay.add(z, t)
    # Check result.
    func = relay.Function([x, y, t], z)
    x_data = np.random.rand(10, 5).astype('float32')
    y_data = np.random.rand(10, 5).astype('float32')
    t_data = np.random.uniform(size=()).astype('float32')
    ref_res = np.concatenate((x_data, y_data), axis=1) + t_data

    for target, ctx in ctx_list():
        intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
        intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
        op_res1 = intrp1.evaluate(func)(x_data, y_data, t_data)
        tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=0.01)
        op_res2 = intrp2.evaluate(func)(x_data, y_data, t_data)
        tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=0.01)
Example #41
0
 def before():
     x = relay.var("x", shape=(10, 20))
     y = relay.add(x, relay.const(1, "float32"))
     z = relay.exp(y)
     w = relay.squeeze(z)
     return relay.Function([x], w)
Example #42
0
 def before(dshape):
     x = relay.var("x", shape=dshape)
     y = relay.add(x, relay.const(1, "float32"))
     y = relay.annotation.stop_fusion(y)
     z = relay.exp(y)
     return relay.Function([x], z)