Пример #1
0
def test_recursion():
    """
    Program:
       let f(n: i32, data: f32) -> f32 = {
          if (n == 0) {
              return data;
          } else {
              return f(n - 1, log(data));
          }
       }
       f(2, 10000);
    """
    f = relay.Var("f")
    f1 = relay.Var("f1")
    n = relay.Var("n", e.int32)
    data = relay.Var("data", e.float32)
    funcbody = relay.If(equal(n, relay.const(0)),
                        data,
                        relay.Call(f1, [subtract(n, relay.const(1)),
                                       log(data)]))
    value = relay.Function([n, data], funcbody, e.float32, [])
    orig = relay.Let(f, value, relay.Call(f, [relay.const(2), relay.const(10000.0)]))
    dced = transform.OptimizeOnExpr(orig, transform.DeadCodeElimination())
    orig = transform.OptimizeOnExpr(orig, transform.InferType())
    assert graph_equal(dced, orig)
    dced = transform.OptimizeOnExpr(relay.Let(f, value, e.three),
                                    transform.DeadCodeElimination())
    assert alpha_equal(dced, e.three)
Пример #2
0
def test_tuple_get_item():
    tt = relay.TupleType([e.float32, e.float32])
    t = relay.Var('t', tt)
    a = relay.Var('a')
    g = relay.TupleGetItem(t, 0)
    dced = transform.OptimizeOnExpr(g, transform.DeadCodeElimination())
    assert alpha_equal(Function(free_vars(dced), dced), Function(free_vars(g), g))
    orig = relay.TupleGetItem(relay.Let(a, e.one, t), 0)
    dced = transform.OptimizeOnExpr(orig, transform.DeadCodeElimination())
    assert alpha_equal(Function(free_vars(dced), dced), Function(free_vars(g), g))
Пример #3
0
def test_round_trip():
    x = relay.Var('x')
    y = relay.Var('y')
    z = relay.Var('z')
    body = relay.Let(z, op.add(y, y), op.add(z, z))
    body = relay.Let(y, op.add(x, x), body)
    f = relay.Function([], relay.Let(x, relay.const(1), body))
    g = transform.OptimizeOnExpr(f, transform.ToGraphNormalForm())
    h = transform.OptimizeOnExpr(g, transform.ToANormalForm())
    assert Feature.fLet in detect_feature(f)
    assert not Feature.fLet in detect_feature(g)
    check_eval(f, [], 8.0)
    check_eval(g, [], 8.0)
    check_eval(h, [], 8.0)
Пример #4
0
def test_tuple():
    t = TypeVar("t")
    x = Var("x", t)
    body = TupleGetItem(relay.Tuple([relay.const(4.0), x]), 1)
    f = Function([x], body, None, [t])
    expected = relay.Function([x], x, None, [t])
    expected = transform.OptimizeOnExpr(expected, transform.InferType())
    assert alpha_equal(dcpe(f), expected)
Пример #5
0
def test_let():
    x = relay.Var("x")
    y = relay.Var("y")
    d = relay.const(4.0, 'float32')
    body = relay.Let(y, x, x + y)
    body = relay.Let(x, d, body)
    check_eval(body, 8)
    opt_body = transform.OptimizeOnExpr(body, transform.ToANormalForm())
    check_eval(opt_body, 8)
Пример #6
0
def test_if():
    cond = relay.const(True)
    x = relay.If(cond, relay.const(2), relay.const(3))
    anf = transform.OptimizeOnExpr(
        x, [transform.ToANormalForm(),
            transform.InferType()])
    a = relay.Var('a', relay.IncompleteType())
    b = relay.Var('b', relay.IncompleteType())
    c = relay.Var('c', relay.IncompleteType())
    d = relay.Var('d', relay.IncompleteType())
    true_branch = relay.Let(a, relay.const(2), a)
    false_branch = relay.Let(b, relay.const(3), b)
    expected_output = relay.If(c, true_branch, false_branch)
    expected_output = relay.Let(d, expected_output, d)
    expected_output = relay.Let(c, cond, expected_output)
    expected_output = transform.OptimizeOnExpr(expected_output,
                                               transform.InferType())
    assert alpha_equal(anf, expected_output)
Пример #7
0
def test_function():
    t = relay.TensorType((), 'float32')
    x = relay.Var("x", t)
    f = relay.Function([x], x + x)
    d = relay.const(4.0, 'float32')
    anf_f = transform.OptimizeOnExpr(f, transform.ToANormalForm())
    assert isinstance(anf_f, relay.Function)
    check_eval(f(d), 8)
    check_eval(anf_f(d), 8)
Пример #8
0
def test_empty_ad():
    shape = (10, 10)
    dtype = "float32"
    t = TensorType(shape, dtype)
    d = Var("d", t)
    f = Function([d], d)
    g = dcpe(f, grad=True)
    expected = Function([d], Tuple([d, Tuple([op.ones_like(d)])]))
    expected = transform.OptimizeOnExpr(expected, transform.InferType())
    assert alpha_equal(g, expected)
Пример #9
0
def test_explicit_bound():
    x = relay.const(1)
    y = op.add(x, x)
    z = op.add(y, y)
    f = relay.Function([], op.add(z, z))
    assert not Feature.fLet in detect_feature(f)
    anf = transform.OptimizeOnExpr(f, transform.ToANormalForm())
    assert Feature.fLet in detect_feature(anf)
    check_eval(f(), 8.0)
    check_eval(anf(), 8.0)
Пример #10
0
def test_ref():
    t = relay.TensorType([], "float32")
    d = relay.Var("d", t)
    r = relay.Var("r", relay.RefType(t))
    x = relay.Var("x")
    body = relay.RefRead(r)
    body = Let(x, RefWrite(r, RefRead(r) * RefRead(r)), body)
    body = Let(r, RefCreate(d), body)
    square = Function([d], body)
    expected = transform.OptimizeOnExpr(Function([d], d * d),
                                        transform.InferType())
    assert alpha_equal(dcpe(square), expected)
Пример #11
0
def test_ref():
    i = relay.Var('i')
    iv = relay.Var('iv')
    u = relay.Var('u')
    uv = relay.Var('uv')
    body = relay.add(iv, uv)
    body = relay.Let(uv, relay.RefRead(i), body)
    body = relay.Let(u, relay.RefWrite(i, relay.const(2)), body)
    body = relay.Let(iv, relay.RefRead(i), body)
    body = relay.Let(i, relay.RefCreate(relay.const(1)), body)
    check_eval(body, 3)
    opt_body = transform.OptimizeOnExpr(body, transform.ToANormalForm())
    check_eval(opt_body, 3)
Пример #12
0
def dcpe(expr, mod=None, grad=False):
    passes = [
        transform.PartialEvaluate(),
        transform.DeadCodeElimination(inline_once=True)
    ]
    if grad:
        expr = gradient(expr)
    if mod:
        assert isinstance(expr, Function)
        mod[mod.entry_func] = expr
        seq = transform.Sequential(passes)
        mod = seq(mod)
        return mod[mod.entry_func]
    return transform.OptimizeOnExpr(expr, passes)
Пример #13
0
def test_order():
    z = relay.const(3)
    y = relay.const(2)
    x = relay.const(1)
    val = x + y * z
    check_eval(val, 7.0)
    anf = transform.OptimizeOnExpr(
        val, [transform.ToANormalForm(),
              transform.InferType()])
    a = relay.Var('a', relay.IncompleteType())
    b = relay.Var('b', relay.IncompleteType())
    c = relay.Var('c', relay.IncompleteType())
    d = relay.Var('d', relay.IncompleteType())
    e = relay.Var('e', relay.IncompleteType())
    expected_output = e
    expected_output = relay.Let(e, a + d, expected_output)
    expected_output = relay.Let(d, b * c, expected_output)
    expected_output = relay.Let(c, z, expected_output)
    expected_output = relay.Let(b, y, expected_output)
    expected_output = relay.Let(a, x, expected_output)
    expected_output = transform.OptimizeOnExpr(expected_output,
                                               transform.InferType())
    assert alpha_equal(anf, expected_output)
Пример #14
0
def test_ad():
    shape = (10, 10)
    dtype = "float32"
    t = TensorType(shape, dtype)
    d = Var("d", t)
    f = Function([d], d * d)
    g = dcpe(f, grad=True)
    m = d * d
    x = relay.Var("x")
    o = op.ones_like(x)
    x1 = relay.Var("x1")
    grad = op.zeros_like(d) + op.collapse_sum_like(
        x1 * d, d) + op.collapse_sum_like(x1 * d, d)
    body = Tuple([x, Tuple([grad])])
    body = relay.Let(x1, o, body)
    expected = Function([d], relay.Let(x, m, body))
    expected = transform.OptimizeOnExpr(expected, transform.InferType())
    assert alpha_equal(g, expected)
Пример #15
0
def test_op_let():
    dced = transform.OptimizeOnExpr(add(relay.Let(e.a, e.one, e.three), e.two),
                                   transform.DeadCodeElimination())
    assert alpha_equal(dced, add(e.three, e.two))
Пример #16
0
def test_let():
    orig = relay.Let(e.x, e.y, e.z)
    orig = transform.OptimizeOnExpr(orig, transform.DeadCodeElimination())
    assert alpha_equal(Function(free_vars(orig), orig), Function([e.z], e.z))
Пример #17
0
def tipe(expr):
    return transform.OptimizeOnExpr(expr, [
        transform.InferType(),
        transform.PartialEvaluate(),
        transform.InferType()
    ])
Пример #18
0
def test_used_let():
    orig = relay.Let(e.c, e.one, e.c + e.c)
    orig = transform.OptimizeOnExpr(orig, transform.DeadCodeElimination())
    expected = relay.Let(e.c, e.one, e.c + e.c)
    assert alpha_equal(Function([e.c], orig), Function([e.c], expected))
Пример #19
0
def test_inline():
    orig = relay.Let(e.a, e.b, relay.Let(e.c, e.d, e.c))
    orig = transform.OptimizeOnExpr(orig, transform.DeadCodeElimination())
    assert alpha_equal(Function(free_vars(orig), orig), Function([e.d], e.d))
Пример #20
0
def test_chain_unused_let():
    orig = relay.Let(e.a, e.b, relay.Let(e.c, e.d, e.e))
    orig = transform.OptimizeOnExpr(orig, transform.DeadCodeElimination())
    assert alpha_equal(Function(free_vars(orig), orig), Function([e.e], e.e))