Ejemplo n.º 1
0
    def validate(shape, value, dtype):
        def before_left(x, elem_op, full):
            return elem_op(full, x)

        def after_left(x, elem_op, value):
            return elem_op(relay.const(value, dtype), x)

        def before_right(x, elem_op, full):
            return elem_op(x, full)

        def after_right(x, elem_op, value):
            return elem_op(x, relay.const(value, dtype))

        x = relay.var("x", shape=shape, dtype=dtype)
        elem_ops = [relay.add, relay.multiply, relay.subtract, relay.divide]
        full_ops = []
        if value == 0:
            full_ops.append(relay.zeros(shape, dtype))
            full_ops.append(relay.zeros_like(x))
        if value == 1:
            full_ops.append(relay.ones(shape, dtype))
            full_ops.append(relay.ones_like(x))
        else:
            full_ops.append(relay.full(relay.const(value, dtype), shape))
            full_ops.append(relay.full_like(x, relay.const(value, dtype)))
        for op in elem_ops:
            for full in full_ops:
                z = before_left(x, op, full)
                zz = run_opt_pass(z, transform.SimplifyExpr())
                after = run_opt_pass(after_left(x, op, value),
                                     transform.InferType())
                assert tvm.ir.structural_equal(zz, after)

                z = before_right(x, op, full)
                zz = run_opt_pass(z, transform.SimplifyExpr())
                after = run_opt_pass(after_right(x, op, value),
                                     transform.InferType())
                assert tvm.ir.structural_equal(zz, after)

        # Test the case in which x is broadcast to full's shape
        full_ops = []
        if value == 0:
            full_ops.append(relay.zeros(shape * 2, dtype))
        if value == 1:
            full_ops.append(relay.ones(shape * 2, dtype))
        else:
            full_ops.append(relay.full(relay.const(value, dtype), shape * 2))
        for op in elem_ops:
            for full in full_ops:
                z = before_left(x, op, full)
                zz = run_opt_pass(z, transform.SimplifyExpr())
                after = run_opt_pass(before_left(x, op, full),
                                     transform.InferType())
                assert tvm.ir.structural_equal(zz, after)

                z = before_right(x, op, full)
                zz = run_opt_pass(z, transform.SimplifyExpr())
                after = run_opt_pass(before_right(x, op, full),
                                     transform.InferType())
                assert tvm.ir.structural_equal(zz, after)
Ejemplo n.º 2
0
def test_full_infer_type():
    x = relay.var('x', relay.TensorType((), 'int8'))
    y = relay.full(x, ())
    yy = run_infer_type(y)
    assert (yy.checked_type == relay.TensorType((), 'int8'))
    x = relay.var('x', relay.TensorType((), 'float32'))
    y = relay.full(x, (1, 2), 'int8')
    ('shape=' in y.astext())
    yy = run_infer_type(y)
    assert (yy.checked_type == relay.TensorType((1, 2), 'int8'))
Ejemplo n.º 3
0
def test_full():
    # default settings: match input dtype
    x = relay.var("x", relay.TensorType((), "int8"))
    y = relay.full(x, ())
    yy = relay.ir_pass.infer_type(y)
    assert yy.checked_type == relay.TensorType((), "int8")

    # change the shape and dtype
    x = relay.var("x", relay.TensorType((), "float32"))
    y = relay.full(x, (1, 2), "int8")
    "shape=" in y.astext()
    yy = relay.ir_pass.infer_type(y)
    assert yy.checked_type == relay.TensorType((1, 2), "int8")
Ejemplo n.º 4
0
def test_full():
    # default settings: match input dtype
    x = relay.var("x", relay.TensorType((), "int8"))
    y = relay.full(x, ())
    yy = relay.ir_pass.infer_type(y)
    assert yy.checked_type == relay.TensorType((), "int8")

    # change the shape and dtype
    x = relay.var("x", relay.TensorType((), "float32"))
    y = relay.full(x, (1, 2), "int8")
    "shape=" in y.astext()
    yy = relay.ir_pass.infer_type(y)
    assert yy.checked_type == relay.TensorType((1, 2), "int8")
Ejemplo n.º 5
0
    def test_arange_full_and_reshape(self):
        start = relay.expr.const(0.0)
        stop = relay.expr.const(10.0)
        step = relay.expr.const(1.0)

        fill_val = relay.expr.const(1.0)
        fill_shape = [10, 1]
        dtype = "float32"

        left = relay.arange(start, stop, step, dtype)
        left = relay.reshape(left, [-1, 1])
        left = relay.reshape(left, [1, -1])

        right = relay.full(fill_val, fill_shape, dtype)
        right = relay.reshape(right, [1, -1])

        net = relay.multiply(left, right)

        mod = tvm.IRModule.from_expr(net)
        params = {}
        xgraph = xf_relay.from_relay(mod, params)
        layers = xgraph.get_layers()

        assert len(layers) == 10
        assert layers[0].type[0] == "Constant"
        assert layers[3].type[0] == "AnyOp"
        assert layers[7].type[0] == "AnyOp"
        assert layers[5].shapes == [1, 10]
        assert layers[8].shapes == [1, 10]
Ejemplo n.º 6
0
    def verify_full(fill_value, src_shape, dtype):
        x = relay.var("x", relay.scalar_type(dtype))
        rank = len(src_shape)
        dyn_src_shape = relay.var("dyn_scr_shape", relay.ty.TensorType((rank,), 'int64'))
        z = relay.full(x, dyn_src_shape, dtype)
        func = relay.Function([x, dyn_src_shape], z)
        ref_res = np.full(src_shape, fill_value).astype(dtype)

        verify_func(func, [np.array(fill_value).astype(dtype), np.array(src_shape).astype('int64')], ref_res)
def test_concretize_full_like():
    dtype = "int32"
    shape_like = relay.var("shape_like", shape=(3, 4, 5), dtype=dtype)
    fill_value = relay.var("fill", relay.TensorType((), "float32"))
    expr = relay.full_like(shape_like, fill_value)

    expected = run_infer_type(relay.full(fill_value, (3, 4, 5), dtype))
    actual = run_opt_pass(expr, relay.transform.SimplifyExpr())
    assert tvm.ir.structural_equal(actual, expected)
Ejemplo n.º 8
0
 def verify_full(fill_value, src_shape, dtype):
     x = relay.var("x", relay.scalar_type(dtype))
     z = relay.full(x, src_shape, dtype)
     func = relay.Function([x], z)
     ref_res = np.full(src_shape, fill_value)
     for target, ctx in ctx_list():
         for kind in ["graph", "debug"]:
             intrp = relay.create_executor(kind, ctx=ctx, target=target)
             op_res = intrp.evaluate(func)(np.array(fill_value, dtype))
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
Ejemplo n.º 9
0
 def verify_full(fill_value, src_shape, dtype):
     x = relay.var('x', relay.scalar_type(dtype))
     z = relay.full(x, src_shape, dtype)
     func = relay.Function([x], z)
     ref_res = np.full(src_shape, fill_value)
     for (target, ctx) in tvm.testing.enabled_targets():
         for kind in ['graph', 'debug']:
             intrp = relay.create_executor(kind, ctx=ctx, target=target)
             op_res = intrp.evaluate(func)(np.array(fill_value, dtype))
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-05)
Ejemplo n.º 10
0
def test_compile_full():
    # Shape calculations can happen in int64. The test checks that full operator
    # can handle when shapes are not int32
    shape = (tvm.tir.IntImm('int32', 1), tvm.tir.IntImm('int64', 16),
             tvm.tir.IntImm('int64', 16), tvm.tir.IntImm('int32', 64))
    output = relay.full(relay.const(0, 'int32'), shape=shape, dtype='int32')
    f = relay.Function([], output)
    mod = tvm.IRModule.from_expr(f)
    mod = relay.qnn.transform.CanonicalizeOps()(mod)
    relay.build(mod, 'llvm')
Ejemplo n.º 11
0
 def verify_full(fill_value, src_shape, dtype):
     x = relay.var("x", relay.scalar_type(dtype))
     z = relay.full(x, src_shape, dtype)
     func = relay.Function([x], z)
     ref_res = np.full(src_shape, fill_value)
     for target, ctx in ctx_list():
         for kind in ["graph", "debug"]:
             intrp = relay.create_executor(kind, ctx=ctx, target=target)
             op_res = intrp.evaluate(func)(np.array(fill_value, dtype))
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
Ejemplo n.º 12
0
def test_full():
    # default settings: match input dtype
    ib = relay.ir_builder.IRBuilder()
    x = ib.param("x", relay.TensorType((), "int8"))
    with ib.function(x) as func:
        ib.ret(relay.full(x, ()))
    ib.ret(func)
    func = relay.ir_pass.infer_type(ib.env, func.to_func())
    ftype = func.checked_type
    assert ftype.ret_type == relay.TensorType((), "int8")

    # change the shape and dtype
    ib = relay.ir_builder.IRBuilder()
    x = ib.param("x", relay.TensorType((), "float32"))
    with ib.function(x) as func:
        ib.ret(relay.full(x, (1, 2), "int8"))
    ib.ret(func)
    func = relay.ir_pass.infer_type(ib.env, func.to_func())
    ftype = func.checked_type
    assert ftype.ret_type == relay.TensorType((1, 2), "int8")
    def verify_full(fill_value, fill_shape, dtype):
        x = relay.var("x", relay.scalar_type(dtype))
        y = relay.var("y", relay.TensorType(fill_shape, 'int64'))
        z = relay.full(x, relay.shape_of(y), dtype)

        func = run_infer_type(relay.Function([x, y], z))
        func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())
        
        zz = func2.body
        assert isinstance(zz, relay.Call)
        assert zz.op == relay.op.get("full")

        ref_res = np.full(fill_shape, fill_value).astype(dtype)
        y_data = np.random.uniform(low=-1, high=1, size=fill_shape).astype('int64')
        verify_func(func2, [fill_value, y_data], ref_res)
Ejemplo n.º 14
0
    def test_full(self):
        fill_val = relay.expr.const(1.0)
        fill_shape = [10, 1]

        net = relay.full(fill_val, fill_shape, "float32")
        net = relay.reshape(net, [1, -1])
        mod = tvm.IRModule.from_expr(net)
        params = {}
        xgraph = xf_relay.from_relay(mod, params)
        layers = xgraph.get_layers()

        assert layers[0].type[0] == "Constant"
        assert layers[0].shapes == [1]
        assert layers[1].type[0] == "AnyOp"
        assert layers[1].shapes == [10, 1]
        assert layers[2].type[0] == "Reshape"
        assert layers[2].shapes == [1, 10]
Ejemplo n.º 15
0
 def before():
     dtype = 'float32'
     return relay.full(relay.const(1.0, dtype), c_shape, dtype=dtype)