예제 #1
0
def test_vulkan_pushconstants():
    # Three 32 bit pushconstants: any_dim, stride, stride
    dtype = "float32"
    x = relay.var("x", shape=(relay.Any(), ), dtype=dtype)
    mod = tvm.IRModule()
    mod["main"] = relay.Function([x], relay.sqrt(x))
    x_np = np.random.uniform(size=(10, )).astype(dtype)
    res_np = np.sqrt(x_np)

    check_mod(mod, x_np, res_np)

    # One 64 bit and one 32 bit constants
    dtype = "int32"
    x = relay.var("x", shape=(relay.Any(), ), dtype=dtype)
    mod = tvm.IRModule()
    mod["main"] = relay.Function([x], relay.argsort(x))
    x_np = np.random.randint(0, high=10, size=(10, )).astype(dtype)
    res_np = np.argsort(x_np)

    check_mod(mod, x_np, res_np)

    # One 64 bit and one 32 bit constants
    dtype = "int32"
    x = relay.var("x", shape=(relay.Any(), ), dtype=dtype)
    mod = tvm.IRModule()
    mod["main"] = relay.Function([x], relay.cumsum(x))
    x_np = np.random.randint(0, high=10, size=(10, )).astype(dtype)
    res_np = np.cumsum(x_np)

    check_mod(mod, x_np, res_np)
예제 #2
0
def test_get_direct_ancestor():
    data = relay.var("data")
    w0 = relay.var("w0")
    out1 = relay.nn.conv2d(data, w0)
    out2 = relay.add(out1, data * relay.expr.const(5.0))
    out3 = out2 + relay.expr.const(2.5)
    w1 = relay.var("w1")
    out = relay.nn.conv2d(out3, w1)
    net = relay.Function(relay.analysis.free_vars(out), out)
    net = bind_inputs(net, {
        "data": (1, 16, 224, 224),
        "w0": (16, 16, 1, 1),
        "w1": (16, 16, 1, 1)
    })
    target_ops = [relay.op.get("nn.conv2d")]
    node_list = []
    node_dict = {}
    expr2graph(net, target_ops, node_dict, node_list)
    visited_dict = {}
    input_names = ["data"]
    out = get_direct_ancestor(node_list, visited_dict, target_ops, 5,
                              input_names)
    assert out == [0], "Output mismatch: expecting [0] but got %s." % str(out)

    # non-regression test
    out = relay.add(relay.log(data), relay.sqrt(data))
    net = relay.Function(relay.analysis.free_vars(out), out)
    net = bind_inputs(net, {"data": (1, 16, 224, 224)})
    node_list = []
    node_dict = {}
    expr2graph(net, target_ops, node_dict, node_list)
    out = get_direct_ancestor(node_list, visited_dict, target_ops, 3,
                              input_names)
    assert out == [0], "Output mismatch: expecting [0] but got %s." % str(out)
예제 #3
0
def test_pushconstants():
    if not tvm.testing.device_enabled("vulkan"):
        return

    def check_mod(mod, x_np, res_np):
        target = "vulkan"
        ctx = tvm.context(target, 0)
        ex = relay.create_executor("vm", mod=mod, ctx=ctx, target=target)
        res = ex.evaluate()(x_np).asnumpy()
        tvm.testing.assert_allclose(res, res_np, atol=1e-5)

    # Three 32 bit pushconstants: any_dim, stride, stride
    dtype = "float32"
    x = relay.var("x", shape=(relay.Any(), ), dtype=dtype)
    mod = tvm.IRModule()
    mod["main"] = relay.Function([x], relay.sqrt(x))
    x_np = np.random.uniform(size=(10, )).astype(dtype)
    res_np = np.sqrt(x_np)

    check_mod(mod, x_np, res_np)

    # One 64 bit and one 32 bit constants
    dtype = "int32"
    x = relay.var("x", shape=(relay.Any(), ), dtype=dtype)
    mod = tvm.IRModule()
    mod["main"] = relay.Function([x], relay.argsort(x))
    x_np = np.random.randint(0, high=10, size=(10, )).astype(dtype)
    res_np = np.argsort(x_np)

    check_mod(mod, x_np, res_np)
예제 #4
0
    def get_func():
        add = relay.add(x, y)
        sqrt = relay.sqrt(add)
        log = relay.log(add)
        subtract = relay.subtract(sqrt, log)
        exp = relay.exp(subtract)

        func = relay.Function([x, y], exp)
        return func
예제 #5
0
    def get_func():
        add = relay.add(x, y)
        sqrt = relay.sqrt(add)
        log = relay.log(add)
        subtract = relay.subtract(sqrt, log)
        exp = relay.exp(subtract)

        func = relay.Function([x, y], exp)
        return func
예제 #6
0
def test_sqrt(target, dev):
    # Three 32 bit pushconstants: any_dim, stride, stride
    dtype = "float32"
    x = relay.var("x", shape=(relay.Any(), ), dtype=dtype)
    mod = tvm.IRModule()
    mod["main"] = relay.Function([x], relay.sqrt(x))
    x_np = np.random.uniform(size=(10, )).astype(dtype)
    res_np = np.sqrt(x_np)

    check_mod(target, dev, mod, x_np, res_np)
예제 #7
0
        def expected():
            add = relay.add(x, y)
            sqrt = relay.sqrt(add)
            log = relay.log(add)
            subtract = relay.subtract(sqrt, log)
            copy_sub_exp = relay.device_copy(subtract, dev_dev, cpu_dev)
            exp = relay.exp(copy_sub_exp)

            func = relay.Function([x, y], exp)
            return func
예제 #8
0
        def expected():
            add = relay.add(x, y)
            sqrt = relay.sqrt(add)
            log = relay.log(add)
            subtract = relay.subtract(sqrt, log)
            copy_sub_exp = relay.device_copy(subtract, dev_ctx, cpu_ctx)
            exp = relay.exp(copy_sub_exp)

            func = relay.Function([x, y], exp)
            return func
        def annotated():
            add = relay.add(x, y)
            sqrt = relay.sqrt(add)
            log = relay.log(add)
            subtract = relay.subtract(sqrt, log)
            exp = relay.exp(subtract)
            _exp = relay.annotation.on_device(exp, cpu_dev)

            func = relay.Function([x, y], _exp)
            func = run_opt_pass(func, transform.RewriteAnnotatedOps(dev_dev.device_type))
            return func
예제 #10
0
 def simple_bn(x, gamma, beta, moving_mean, moving_var,
               axis=1, epsilon=1e-5, shape=None):
     # expect = (x - moving_mean) / sqrt(moving_var + eps) * gamma + beta
     scale = rly.multiply(rly.const(1, 'float32') /
             rly.sqrt(moving_var + rly.const(epsilon, 'float32')), gamma)
     shift = rly.add(
         rly.multiply(rly.negative(moving_mean), scale), beta)
     num_newaxis = len(shape) - (axis + 1)
     if num_newaxis:
         scale = rly.expand_dims(scale, axis=1, num_newaxis=num_newaxis)
         shift = rly.expand_dims(shift, axis=1, num_newaxis=num_newaxis)
     return x * scale + shift
예제 #11
0
 def simple_bn(x, gamma, beta, moving_mean, moving_var,
               axis=1, epsilon=1e-5, shape=None):
     # expect = (x - moving_mean) / sqrt(moving_var + eps) * gamma + beta
     scale = rly.multiply(rly.const(1, dtype) /
             rly.sqrt(moving_var + rly.const(epsilon, dtype)), gamma)
     shift = rly.add(
         rly.multiply(rly.negative(moving_mean), scale), beta)
     num_newaxis = len(shape) - (axis + 1)
     if num_newaxis:
         scale = rly.expand_dims(scale, axis=1, num_newaxis=num_newaxis)
         shift = rly.expand_dims(shift, axis=1, num_newaxis=num_newaxis)
     return x * scale + shift
        def expected():
            add = relay.add(x, y)
            copy_add_sqrt = relay.device_copy(add, cpu_ctx, dev_ctx)
            sqrt = relay.sqrt(copy_add_sqrt)
            log = relay.log(add)
            copy_sqrt_subtract = relay.device_copy(sqrt, dev_ctx, cpu_ctx)
            subtract = relay.subtract(copy_sqrt_subtract, log)
            copy_sub_exp = relay.device_copy(subtract, cpu_ctx, dev_ctx)
            exp = relay.exp(copy_sub_exp)

            func = relay.Function([x, y], exp)
            return func
예제 #13
0
        def annotated():
            add = relay.add(x, y)
            sqrt = relay.sqrt(add)
            log = relay.log(add)
            subtract = relay.subtract(sqrt, log)
            exp = relay.exp(subtract)
            _exp = relay.annotation.on_device(exp, cpu_ctx)

            func = relay.Function([x, y], _exp)
            func = relay.ir_pass.infer_type(func)
            func = relay.ir_pass.rewrite_annotated_ops(func,
                                                       dev_ctx.device_type)
            return func
예제 #14
0
    def test_sqrt(self):
        data = relay.var("data", relay.TensorType((-1, 4, 2, 2), "float32"))

        net = relay.sqrt(data)
        net = relay.Function(relay.analysis.free_vars(net), net)
        mod, params = testing.create_workload(net)

        xgraph = xf_relay.from_relay(mod, params)
        layers = xgraph.get_layers()

        assert layers[0].type[0] == 'Input'
        assert layers[1].type[0] == 'Sqrt'
        assert 'relay_id' in layers[1].attrs
예제 #15
0
        def annotated():
            add = relay.add(x, y)
            _add = relay.annotation.on_device(add, dev_dev)
            sqrt = relay.sqrt(_add)
            _sqrt = relay.annotation.on_device(sqrt, dev_dev)
            log = relay.log(_add)
            _log = relay.annotation.on_device(log, dev_dev)
            subtract = relay.subtract(_sqrt, _log)
            _subtract = relay.annotation.on_device(subtract, dev_dev)
            exp = relay.exp(_subtract)
            _exp = relay.annotation.on_device(exp, dev_dev)

            func = relay.Function([x, y], _exp)
            func = run_opt_pass(func, transform.RewriteAnnotatedOps(cpu_dev.device_type))
            return func
예제 #16
0
def _get_pooling_model(shape, dtype, typef, sizes, strides, dilation, padding,
                       ceil_mode, count_include_pad, var_names):
    """Return a model and any parameters it may have."""
    if len(padding) == 2:
        padding = (padding[0], padding[1], padding[0], padding[1])
    out = relay.var(next(var_names), shape=shape, dtype=dtype)

    if typef == "nn.max_pool2d":
        out = relay.nn.max_pool2d(
            out,
            pool_size=sizes,
            strides=strides,
            dilation=dilation,
            padding=padding,
            ceil_mode=ceil_mode,
            layout="NHWC",
        )
    elif typef == "nn.avg_pool2d":
        if dtype == "uint8":
            out = relay.cast(out, "int32")
        out = relay.nn.avg_pool2d(
            out,
            pool_size=sizes,
            strides=strides,
            dilation=dilation,
            padding=padding,
            ceil_mode=ceil_mode,
            count_include_pad=count_include_pad,
            layout="NHWC",
        )
        if dtype == "uint8":
            out = relay.cast(out, "uint8")
    elif typef == "nn.l2_pool2d":
        out = relay.power(out, relay.const(2.0))
        out = relay.nn.avg_pool2d(
            out,
            pool_size=sizes,
            strides=strides,
            padding=padding,
            ceil_mode=ceil_mode,
            count_include_pad=count_include_pad,
            layout="NHWC",
        )
        out = relay.sqrt(out)
    else:
        raise ValueError("Function not supported")

    return out
예제 #17
0
        def annotated():
            add = relay.add(x, y)
            sqrt = relay.sqrt(add)
            log = relay.log(add)
            subtract = relay.subtract(sqrt, log)
            exp = relay.exp(subtract)
            _exp = relay.annotation.on_device(exp, cpu_ctx)

            func = relay.Function([x, y],
                                  relay.Tuple(tvm.convert([_exp, exp])))
            func = relay.ir_pass.infer_type(func)
            func = relay.ir_pass.rewrite_annotated_ops(func,
                                                       dev_ctx.device_type)
            func = relay.ir_pass.infer_type(func)
            return relay.Function(relay.ir_pass.free_vars(func.body[1]),
                                  func.body[1])
예제 #18
0
        def annotated():
            add = relay.add(x, y)
            sqrt = relay.sqrt(add)
            log = relay.log(add)
            subtract = relay.subtract(sqrt, log)
            exp = relay.exp(subtract)
            _exp = relay.annotation.on_device(exp, cpu_ctx)

            func = relay.Function([x, y], relay.Tuple(tvm.convert([_exp,
                                                                   exp])))
            func = relay.ir_pass.infer_type(func)
            func = relay.ir_pass.rewrite_annotated_ops(func,
                                                       dev_ctx.device_type)
            func = relay.ir_pass.infer_type(func)
            return relay.Function(relay.ir_pass.free_vars(func.body[1]),
                                  func.body[1])
예제 #19
0
        def annotated():
            add = relay.add(x, y)
            _add = relay.annotation.on_device(add, dev_ctx)
            sqrt = relay.sqrt(_add)
            _sqrt = relay.annotation.on_device(sqrt, dev_ctx)
            log = relay.log(_add)
            _log = relay.annotation.on_device(log, dev_ctx)
            subtract = relay.subtract(_sqrt, _log)
            _subtract = relay.annotation.on_device(subtract, dev_ctx)
            exp = relay.exp(_subtract)
            _exp = relay.annotation.on_device(exp, dev_ctx)

            func = relay.Function([x, y], _exp)
            func = relay.ir_pass.infer_type(func)
            func = relay.ir_pass.rewrite_annotated_ops(func,
                                                       cpu_ctx.device_type)
            return func
def test_reshape_nop():
    # test that reshape can be turned into nop
    x = relay.var("x", shape=(10, 4))
    xx = relay.abs(x)
    y = relay.expand_dims(xx, axis=1)
    t0 = relay.reshape(y, (1, 40))
    t1 = relay.abs(y)

    z0 = relay.reshape(t0, (2, 20))
    z1 = relay.sqrt(t1)
    z2 = relay.reshape(t1, (1, 40))

    func = relay.Function([x], relay.Tuple([z0, z1, z2]))
    x_data = np.random.rand(10, 4).astype("float32")
    graph = relay.build(tvm.IRModule.from_expr(func), "llvm")
    graph_json_str = graph.get_graph_json()

    graph_json = json.loads(graph_json_str)

    # reshape must force sharing memory
    storage_ids = graph_json["attrs"]["storage_id"][1]
    assert tuple(storage_ids) == (0, 1, 1, 2, 3, 2)
    assert graph_json["nodes"][2]["attrs"]["func_name"] == "__nop"
    assert graph_json["nodes"][5]["attrs"]["func_name"] == "__nop"

    gmod = graph_executor.GraphModule(graph["default"](tvm.cpu(0)))

    gmod.set_input(x=x_data)
    gmod.run()
    z0_np = x_data.reshape(2, 20)
    z1_np = np.sqrt(np.abs(x_data.reshape(
        10,
        1,
        4,
    )))
    z2_np = np.abs(x_data).reshape(1, 40)
    tvm.testing.assert_allclose(gmod.get_output(0).numpy(), z0_np)
    tvm.testing.assert_allclose(gmod.get_output(1).numpy(), z1_np)
    tvm.testing.assert_allclose(gmod.get_output(2).numpy(), z2_np)
예제 #21
0
 def expected():
     x = relay.var("x", shape=(1, 56, 56, 64))
     weight = relay.var("weight", shape=(3, 3, 64, 16))
     bias = relay.var("bias", shape=(1, 1, 1, 16))
     x = relay.layout_transform(x, src_layout="NHWC", dst_layout="NCHW")
     x = relay.layout_transform(x, src_layout="NCHW", dst_layout="NCHW16c")
     weight = relay.layout_transform(weight, src_layout="HWIO", dst_layout="OIHW")
     y = relay.nn.conv2d(
         x, weight, channels=16, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
     )
     bias = relay.layout_transform(bias, src_layout="NHWC", dst_layout="NCHW")
     bias = relay.layout_transform(bias, src_layout="NCHW", dst_layout="NCHW16c")
     add = relay.add(y, bias)
     y = relay.layout_transform(add, src_layout="NCHW16c", dst_layout="NCHW")
     y = relay.layout_transform(y, src_layout="NCHW", dst_layout="NHWC")
     mean = relay.mean(y, axis=3, exclude=True)
     var = relay.variance(y, axis=3, exclude=True)
     denom = relay.const(1.0) / relay.sqrt(var + relay.const(1e-05))
     gamma = relay.var("gamma", shape=(16,))
     denom = denom * gamma
     denom_expand1 = relay.expand_dims(denom, axis=1, num_newaxis=2)
     denom_expand2 = relay.expand_dims(denom_expand1, axis=0)
     denom_nchwc16 = relay.layout_transform(
         denom_expand2, src_layout="NCHW", dst_layout="NCHW16c"
     )
     out = add * denom_nchwc16
     beta = relay.var("beta", shape=(16,))
     numerator = (-mean) * denom + beta
     numerator_expand1 = relay.expand_dims(numerator, axis=1, num_newaxis=2)
     numerator_expand2 = relay.expand_dims(numerator_expand1, axis=0)
     numerator_nchwc16 = relay.layout_transform(
         numerator_expand2, src_layout="NCHW", dst_layout="NCHW16c"
     )
     out = out + numerator_nchwc16
     out = relay.layout_transform(out, src_layout="NCHW16c", dst_layout="NCHW")
     y = relay.layout_transform(out, src_layout="NCHW", dst_layout="NHWC")
     y = relay.Function(analysis.free_vars(y), y)
     return y
예제 #22
0
def sqrt(expr):
    return relay.sqrt(relay.abs(expr))