Exemple #1
0
def test_checkpoint_alpha_equal():
    xs = [relay.var("x{}".format(i), relay.TensorType((1,), "float32")) for i in range(4)]
    f = relay.Function(xs, relay.annotation.checkpoint(
        relay.multiply(relay.add(xs[0], xs[1]), relay.add(xs[2], xs[3]))
    ))
    df = transform.gradient(run_infer_type(f))

    # run PE and DCE
    with tvm.transform.PassContext(opt_level=3):
        passes = [transform.PartialEvaluate(),
                  transform.DeadCodeElimination(inline_once=True)]
        mod = tvm.transform.Sequential(passes)(tvm.IRModule.from_expr(df))
        df = mod["main"]

    df_parsed = relay.parser.fromtext(
        """
        v0.0.4
        fn (%x: Tensor[(1), float32], %y: Tensor[(1), float32],
            %z: Tensor[(1), float32], %w: Tensor[(1), float32])
            ->  (Tensor[(1), float32],
                (Tensor[(1), float32], Tensor[(1), float32],
                 Tensor[(1), float32], Tensor[(1), float32])) {
            %0 = add(%x, %y);
            %1 = add(%z, %w);
            let %x1: Tensor[(1), float32] = multiply(%0, %1);
            let %x2: Tensor[(1), float32] = ones_like(%x1);
            let %x3: Tensor[(1), float32] = add(%x, %y);
            let %x4: Tensor[(1), float32] = add(%z, %w);
            %2 = zeros_like(%x3);
            %3 = multiply(%x2, %x4);
            %4 = collapse_sum_like(%3, %x3);
            let %x5: Tensor[(1), float32] = add(%2, %4);
            %5 = zeros_like(%x4);
            %6 = multiply(%x2, %x3);
            %7 = collapse_sum_like(%6, %x4);
            let %x6: Tensor[(1), float32] = add(%5, %7);
            %8 = zeros_like(%x);
            %9 = collapse_sum_like(%x5, %x);
            %10 = add(%8, %9);
            %11 = zeros_like(%y);
            %12 = collapse_sum_like(%x5, %y);
            %13 = add(%11, %12);
            %14 = zeros_like(%z);
            %15 = collapse_sum_like(%x6, %z);
            %16 = add(%14, %15);
            %17 = zeros_like(%w);
            %18 = collapse_sum_like(%x6, %w);
            %19 = add(%17, %18);
            %20 = (%10, %13, %16, %19);
            (%x1, %20)
        }
        """
    )

    tvm.ir.assert_structural_equal(df, df_parsed)
def test_zeros_ones_grad_const_ints():
    # when shape is static (i.e. not an input), there is no gradient at all
    static_ty = relay.TensorType([2, 3, 4], dtype="float32")
    expected_ty = relay.TupleType([static_ty, relay.TupleType([])])

    for op in [relay.zeros, relay.ones]:
        fwd_func = relay.Function([],
                                  op(static_ty.concrete_shape,
                                     static_ty.dtype))
        bwd_func = run_infer_type(gradient(run_infer_type(fwd_func)))
        tvm.ir.assert_structural_equal(bwd_func.ret_type, expected_ty)
def test_zeros_ones_grad_const_expr():
    # when shape is static (i.e. not an input), there is no gradient at all
    shape_const = relay.const(np.array([2, 3, 4]),
                              dtype="int32") * relay.const(1, dtype="int32")
    static_ty = relay.TensorType([2, 3, 4], dtype="float32")
    dyn_ty = relay.TensorType(
        [relay.Any(), relay.Any(), relay.Any()], dtype="float32")
    expected_ty_static = relay.TupleType([static_ty, relay.TupleType([])])
    expected_ty_dyn = relay.TupleType([dyn_ty, relay.TupleType([])])

    for op in [relay.zeros, relay.ones]:
        # with DynamicToStatic, the shape should be concretized
        fwd_func = relay.Function([], op(shape_const, static_ty.dtype))
        fwd_func = run_opt_pass(fwd_func, relay.transform.DynamicToStatic())
        bwd_func = run_infer_type(gradient(run_infer_type(fwd_func)))
        tvm.ir.assert_structural_equal(bwd_func.ret_type, expected_ty_static)

        fwd_func = relay.Function([], op(shape_const, static_ty.dtype))
        bwd_func = run_infer_type(gradient(run_infer_type(fwd_func)))
        tvm.ir.assert_structural_equal(bwd_func.ret_type, expected_ty_dyn)
Exemple #4
0
def test_concat():
    shape = (10, 10)
    dtype = 'float32'
    t = relay.TensorType(shape, dtype)
    rt = relay.TensorType((10, 20), dtype)
    x = relay.var("x", t)
    y = op.concatenate([x, x], axis=1)
    func = relay.Function([x], y)
    func = run_infer_type(func)
    back_func = run_infer_type(gradient(func))
    tvm.ir.assert_structural_equal(back_func.checked_type, relay.FuncType([t], relay.TupleType([rt, relay.TupleType([t])])))
Exemple #5
0
def test_square_second_order():
    shape = (10, 10)
    dtype = 'float32'
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    func = relay.Function([x], x * x)
    back_func = run_infer_type(gradient(func))
    y = relay.var("y", t)
    back_func_adjusted = relay.Function(
        [y], relay.TupleGetItem(relay.TupleGetItem(back_func(y), 1), 0))
    back_func_adjusted = run_infer_type(back_func_adjusted)
    back_back_func = run_infer_type(gradient(back_func_adjusted))
    assert back_func.checked_type == relay.FuncType(
        [t], relay.TupleType([t, relay.TupleType([t])]))
    x_nd = rand(dtype, *shape)
    ex = create_executor()
    forward, (grad_x, ) = ex.evaluate(back_back_func)(x_nd)
    tvm.testing.assert_allclose(forward.asnumpy(), 2 * x_nd.asnumpy())
    tvm.testing.assert_allclose(grad_x.asnumpy(),
                                2 * np.ones_like(grad_x.asnumpy()))
Exemple #6
0
def test_if():
    x = relay.var("x", shape=(1, 16, 64, 64))
    y = relay.var("y", shape=(1, 16, 64, 64))
    cond = relay.var("cond", shape=(), dtype='uint1')
    net = relay.If(cond, x, y)
    net = relay.log(net)
    func = relay.Function(free_vars(net), net)
    func = run_infer_type(func)
    net = run_infer_type(func)
    net = gradient(net, mode='higher_order')
    net = run_infer_type(net)
Exemple #7
0
def test_checkpoint_alpha_equal_tuple():
    xs = [relay.var("x{}".format(i), relay.TensorType((1,), "float32")) for i in range(4)]
    f = relay.Function(
        xs,
        relay.annotation.checkpoint(
            relay.Tuple([relay.add(xs[0], xs[1]), relay.add(xs[2], xs[3])])
        ),
    )
    df = transform.gradient(run_infer_type(f))

    # run PE and DCE
    with tvm.transform.PassContext(opt_level=3):
        # See comment in test_checkpoint_alpha_equal above.
        # TODO(mbs): Revisit once DCE supports dead reference writes.
        passes = [
            transform.PartialEvaluate(),
            transform.DeadCodeElimination(inline_once=True, ignore_impurity=True),
        ]
        mod = tvm.transform.Sequential(passes)(tvm.IRModule.from_expr(df))
        df = mod["main"]

    df_parsed = tvm.parser.parse_expr(
        """
        #[version = "0.0.5"]
        fn (%x: Tensor[(1), float32], %y: Tensor[(1), float32],
            %z: Tensor[(1), float32], %w: Tensor[(1), float32])
            -> ((Tensor[(1), float32], Tensor[(1), float32]),
                (Tensor[(1), float32], Tensor[(1), float32],
                 Tensor[(1), float32], Tensor[(1), float32])) {
        let %x1: Tensor[(1), float32] = add(%x, %y) /* ty=Tensor[(1), float32] */;
        let %x2: Tensor[(1), float32] = add(%z, %w) /* ty=Tensor[(1), float32] */;
        let %x3: Tensor[(1), float32] = zeros_like(%x2) /* ty=Tensor[(1), float32] */;
        let %x4: Tensor[(1), float32] = ones_like(%x1) /* ty=Tensor[(1), float32] */;
        %0 = (%x1, %x2);
        %1 = zeros_like(%x) /* ty=Tensor[(1), float32] */;
        %2 = collapse_sum_like(%x4, %x) /* ty=Tensor[(1), float32] */;
        %3 = add(%1, %2) /* ty=Tensor[(1), float32] */;
        %4 = zeros_like(%y) /* ty=Tensor[(1), float32] */;
        %5 = collapse_sum_like(%x4, %y) /* ty=Tensor[(1), float32] */;
        %6 = add(%4, %5) /* ty=Tensor[(1), float32] */;
        %7 = zeros_like(%z) /* ty=Tensor[(1), float32] */;
        %8 = collapse_sum_like(%x3, %z) /* ty=Tensor[(1), float32] */;
        %9 = add(%7, %8) /* ty=Tensor[(1), float32] */;
        %10 = zeros_like(%w) /* ty=Tensor[(1), float32] */;
        %11 = collapse_sum_like(%x3, %w) /* ty=Tensor[(1), float32] */;
        %12 = add(%10, %11) /* ty=Tensor[(1), float32] */;
        %13 = (%3, %6, %9, %12);
        (%0, %13)
        }
        """
    )

    tvm.ir.assert_structural_equal(df, df_parsed)
def dcpe(expr, mod=None, grad=False):
    passes = [transform.PartialEvaluate(),
              transform.DeadCodeElimination(inline_once=True)]
    if grad:
        expr = gradient(run_infer_type(expr))
    if mod:
        assert isinstance(expr, Function)
        mod["main"] = expr
        seq = transform.Sequential(passes)
        mod = seq(mod)
        return mod["main"]
    return run_opt_pass(expr, passes)
Exemple #9
0
def test_checkpoint_alpha_equal_tuple():
    xs = [
        relay.var("x{}".format(i), relay.TensorType((1, ), "float32"))
        for i in range(4)
    ]
    f = relay.Function(
        xs,
        relay.annotation.checkpoint(
            relay.Tuple([relay.add(xs[0], xs[1]),
                         relay.add(xs[2], xs[3])])))
    df = transform.gradient(run_infer_type(f))

    # run PE and DCE
    with transform.PassContext(opt_level=3):
        passes = [
            transform.PartialEvaluate(),
            transform.DeadCodeElimination(inline_once=True)
        ]
        mod = transform.Sequential(passes)(tvm.IRModule.from_expr(df))
        df = mod["main"]

    df_parsed = relay.parser.fromtext("""
        v0.0.4
        fn (%x: Tensor[(1), float32], %y: Tensor[(1), float32],
            %z: Tensor[(1), float32], %w: Tensor[(1), float32])
            -> ((Tensor[(1), float32], Tensor[(1), float32]),
                (Tensor[(1), float32], Tensor[(1), float32],
                 Tensor[(1), float32], Tensor[(1), float32])) {
        let %x1: Tensor[(1), float32] = add(%x, %y) /* ty=Tensor[(1), float32] */;
        let %x2: Tensor[(1), float32] = add(%z, %w) /* ty=Tensor[(1), float32] */;
        let %x3: Tensor[(1), float32] = zeros_like(%x2) /* ty=Tensor[(1), float32] */;
        let %x4: Tensor[(1), float32] = ones_like(%x1) /* ty=Tensor[(1), float32] */;
        %0 = (%x1, %x2);
        %1 = zeros_like(%x) /* ty=Tensor[(1), float32] */;
        %2 = collapse_sum_like(%x4, %x) /* ty=Tensor[(1), float32] */;
        %3 = add(%1, %2) /* ty=Tensor[(1), float32] */;
        %4 = zeros_like(%y) /* ty=Tensor[(1), float32] */;
        %5 = collapse_sum_like(%x4, %y) /* ty=Tensor[(1), float32] */;
        %6 = add(%4, %5) /* ty=Tensor[(1), float32] */;
        %7 = zeros_like(%z) /* ty=Tensor[(1), float32] */;
        %8 = collapse_sum_like(%x3, %z) /* ty=Tensor[(1), float32] */;
        %9 = add(%7, %8) /* ty=Tensor[(1), float32] */;
        %10 = zeros_like(%w) /* ty=Tensor[(1), float32] */;
        %11 = collapse_sum_like(%x3, %w) /* ty=Tensor[(1), float32] */;
        %12 = add(%10, %11) /* ty=Tensor[(1), float32] */;
        %13 = (%3, %6, %9, %12);
        (%0, %13)
        }
        """)

    relay.analysis.assert_alpha_equal(df, df_parsed)
Exemple #10
0
def test_add():
    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    func = relay.Function([x], x + x)
    func = run_infer_type(func)
    back_func = run_infer_type(gradient(func))
    assert back_func.checked_type == relay.FuncType(
        [t], relay.TupleType([t, relay.TupleType([t])]))
    x = rand(dtype, *shape)
    forward, (grad, ) = create_executor().evaluate(back_func)(x)
    tvm.testing.assert_allclose(forward.numpy(), 2 * x.numpy())
    tvm.testing.assert_allclose(grad.numpy(), 2 * np.ones_like(x.numpy()))
Exemple #11
0
def test_sub():
    shape = (10, 10)
    dtype = 'float32'
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    func = relay.Function([x], x - x)
    func = run_infer_type(func)
    back_func = run_infer_type(gradient(func))
    assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
    ex = create_executor()
    x = rand(dtype, *shape)
    forward, (grad,) = ex.evaluate(back_func)(x)
    tvm.testing.assert_allclose(forward.asnumpy(), np.zeros_like(x.asnumpy()))
    tvm.testing.assert_allclose(grad.asnumpy(), np.zeros_like(x.asnumpy()))
Exemple #12
0
def test_zeros_ones_grad_dynamic():
    rank = np.random.randint(low=1, high=5, dtype="int32")
    dyn_shape = np.random.randint(low=1, high=4, size=(rank,), dtype="int32")
    shape_data = relay.var("shape_data", shape=(rank,), dtype="int32")

    for op, op_ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]:
        fwd_func = relay.Function([shape_data], op(shape_data, dtype="float32"))
        bwd_func = run_infer_type(gradient(run_infer_type(fwd_func)))

        for target, ctx in tvm.testing.enabled_targets():
            intrp = relay.create_executor(ctx=ctx, target=target)
            res, (grad,) = intrp.evaluate(bwd_func)(dyn_shape)
            tvm.testing.assert_allclose(res.asnumpy(), op_ref(dyn_shape, dtype="float32"))
            tvm.testing.assert_allclose(grad.asnumpy(), np.zeros((rank,), dtype="int32"))
def test_ad():
    shape = (10, 10)
    dtype = 'float32'
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    func = relay.Function([x], x + x)
    mod = relay.Module.from_expr(gradient(func))
    mod = relay.transform.InferType()(mod)
    back_func = mod["main"]
    feats = detect_feature(back_func)
    assert feats == set([
        Feature.fVar, Feature.fTuple, Feature.fTupleGetItem, Feature.fFunction,
        Feature.fOp, Feature.fCall, Feature.fLet, Feature.fRefCreate,
        Feature.fRefRead, Feature.fRefWrite
    ])
Exemple #14
0
def test_grad_tuple():
    shape = (10, 10)
    dtype = 'float32'
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    y = x + x
    func = relay.Function([x], relay.Tuple([y + y, y]))
    func = run_infer_type(func)
    back_func = run_infer_type(gradient(func))
    assert back_func.checked_type == relay.FuncType([t], relay.TupleType([relay.TupleType([t, t]), relay.TupleType([t])]))
    ex = create_executor()
    x = rand(dtype, *shape)
    (forward_four, forward_two), (grad,) = ex.evaluate(back_func)(x)
    tvm.testing.assert_allclose(forward_four.asnumpy(), 4 * x.asnumpy())
    tvm.testing.assert_allclose(forward_two.asnumpy(), 2 * x.asnumpy())
    tvm.testing.assert_allclose(grad.asnumpy(), 4 * np.ones_like(x.asnumpy()))
Exemple #15
0
def test_clip():
    ref = (lambda x: np.where(x > 10.0, np.zeros_like(x),
                     np.where(x < 1.0, np.zeros_like(x), np.ones_like(x))))
    x = relay.var("x", relay.TensorType((10, 4), "float32"))
    y = tvm.relay.clip(x, 1.0, 10.0)

    data = np.random.rand(10, 4).astype("float32") * 11.0
    ref_grad = ref(data)
    fwd_func = relay.Function([x], y)
    fwd_func = run_infer_type(fwd_func)
    bwd_func = run_infer_type(gradient(fwd_func))

    for target, ctx in ctx_list():
        intrp = relay.create_executor(ctx=ctx, target=target)
        op_res, (op_grad, ) = intrp.evaluate(bwd_func)(data)
        np.testing.assert_allclose(op_grad.asnumpy(), ref_grad, rtol=0.01)
def _test_tuple(mode):
    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    y = relay.var("y", t)
    z = relay.var("z", t)
    if mode == "higher_order":
        tup = relay.Var("tup")
        func = relay.Function(
            [x, y, z],
            relay.Let(
                tup,
                relay.Tuple([x, y, z]),
                relay.TupleGetItem(tup, 0) + relay.TupleGetItem(tup, 1) -
                relay.TupleGetItem(tup, 2),
            ),
        )
    else:
        # first order does not do let.
        tup = relay.Tuple([x, y, z])
        func = relay.Function(
            [x, y, z],
            relay.TupleGetItem(tup, 0) + relay.TupleGetItem(tup, 1) -
            relay.TupleGetItem(tup, 2),
        )
    func = run_infer_type(func)
    back_func = run_infer_type(gradient(func, mode=mode))
    assert back_func.checked_type == relay.FuncType(
        [t, t, t], relay.TupleType([t, relay.TupleType([t, t, t])]))
    x_nd = rand(dtype, *shape)
    y_nd = rand(dtype, *shape)
    z_nd = rand(dtype, *shape)
    x_np = x_nd.asnumpy()
    y_np = y_nd.asnumpy()
    z_np = z_nd.asnumpy()
    expected_forward = x_np + y_np - z_np
    ex = create_executor()
    forward, (grad_x, grad_y, grad_z) = ex.evaluate(back_func)(x_nd, y_nd,
                                                               z_nd)
    tvm.testing.assert_allclose(forward.asnumpy(), expected_forward)
    tvm.testing.assert_allclose(grad_x.asnumpy(),
                                np.ones_like(grad_x.asnumpy()))
    tvm.testing.assert_allclose(grad_y.asnumpy(),
                                np.ones_like(grad_y.asnumpy()))
    tvm.testing.assert_allclose(grad_z.asnumpy(),
                                -1 * np.ones_like(grad_z.asnumpy()))
def verify_avg_pool2d_grad(
    x_shape,
    pool_size,
    strides,
    padding,
    ceil_mode,
    count_include_pad,
    executor_kind,
    dtype="float32",
):

    for shape_dtype in ["int32", "int64"]:
        x = relay.var("x",
                      shape=[tvm.tir.IntImm(shape_dtype, x) for x in x_shape],
                      dtype=dtype)
        y = tvm.relay.nn.avg_pool2d(
            x,
            pool_size=pool_size,
            strides=strides,
            padding=padding,
            ceil_mode=ceil_mode,
            count_include_pad=count_include_pad,
        )

        fwd_func = relay.Function([x], y)
        fwd_func = run_infer_type(fwd_func)
        bwd_func = run_infer_type(gradient(fwd_func))

        data = np.random.rand(*x_shape).astype(dtype)
        ph, pw = padding
        y_shape = topi.utils.get_const_tuple(fwd_func.ret_type.shape)
        out_grad = np.ones(shape=y_shape)
        ref_grad = tvm.topi.testing.pool_grad_nchw(
            data,
            out_grad,
            pool_size=pool_size,
            strides=strides,
            padding=[ph, pw, ph, pw],
            pool_type="avg",
            ceil_mode=ceil_mode,
        )

        for target, dev in tvm.testing.enabled_targets():
            op_res, (op_grad, ) = relay.create_executor(
                executor_kind, device=dev,
                target=target).evaluate(bwd_func)(data)
            np.testing.assert_allclose(op_grad.numpy(), ref_grad, rtol=0.01)
Exemple #18
0
    def check_single_op(opfunc, ref):
        shape = (10, 4)
        dtype = 'float32'
        tp = relay.TensorType(shape, dtype)
        x = relay.var("x", tp)
        y = opfunc(x)

        if ref is not None:
            data = np.random.rand(*shape).astype(dtype)
            ref_grad = ref(data)
            fwd_func = relay.Function([x], y)
            bwd_func = run_infer_type(gradient(fwd_func))

            for target, ctx in ctx_list():
                intrp = relay.create_executor(ctx=ctx, target=target)
                op_res, (op_grad, ) = intrp.evaluate(bwd_func)(data)
                np.testing.assert_allclose(op_grad.asnumpy(), ref_grad, rtol=0.01)
Exemple #19
0
    def test_binary_op(self, target, dev, relay_op, ref_func, shape, dtype):
        t = relay.TensorType(shape, dtype=dtype)
        x = relay.var("x", t)
        y = relay.var("y", t)
        z = relay_op(x, y)

        x_data = np.random.rand(*shape).astype(t.dtype)
        y_data = np.random.rand(*shape).astype(t.dtype)
        ref_grad0, ref_grad1 = ref_func(x_data, y_data)
        fwd_func = relay.Function([x, y], z)
        fwd_func = run_infer_type(fwd_func)
        bwd_func = run_infer_type(gradient(fwd_func))

        op_res, (op_grad0, op_grad1) = relay.create_executor(
            device=dev, target=target).evaluate(bwd_func)(x_data, y_data)
        np.testing.assert_allclose(op_grad0.numpy(), ref_grad0, rtol=0.01)
        np.testing.assert_allclose(op_grad1.numpy(), ref_grad1, rtol=0.01)
Exemple #20
0
def test_temp_add():
    scope = relay.ScopeBuilder()
    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    y = scope.let("y", x + x)
    scope.ret(y + y)
    func = relay.Function([x], scope.get())
    func = run_infer_type(func)
    back_func = run_infer_type(gradient(func))
    assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
    ex = create_executor()
    x = rand(dtype, *shape)
    forward, (grad,) = ex.evaluate(back_func)(x)
    tvm.testing.assert_allclose(forward.asnumpy(), 4 * x.asnumpy())
    tvm.testing.assert_allclose(grad.asnumpy(), 4 * np.ones_like(x.asnumpy()))
def test_partial_eval():
    """Test transformation following reverse mode ad and PartialEval"""
    mod = tvm.IRModule()

    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)

    func = relay.Function([], relay.const(np.ones(shape, dtype)))
    func = run_infer_type(func)
    back_func = transform.gradient(func)
    back_func = run_infer_type(back_func)

    mod["main"] = back_func
    back_func = mod["main"]

    transform.PartialEvaluate()(mod)
Exemple #22
0
    def check_binary_op(opfunc, ref):
        s = (5, 10, 5)
        t = relay.TensorType((5, 10, 5))
        x = relay.var("x", t)
        y = relay.var("y", t)
        z = opfunc(x, y)

        x_data = np.random.rand(*s).astype(t.dtype)
        y_data = np.random.rand(*s).astype(t.dtype)
        ref_grad0, ref_grad1 = ref(x_data, y_data)
        fwd_func = relay.Function([x, y], z)
        bwd_func = run_infer_type(gradient(fwd_func))

        for target, ctx in ctx_list():
            intrp = relay.create_executor(ctx=ctx, target=target)
            op_res, (op_grad0, op_grad1) = intrp.evaluate(bwd_func)(x_data, y_data)
            np.testing.assert_allclose(op_grad0.asnumpy(), ref_grad0, rtol=0.01)
            np.testing.assert_allclose(op_grad1.asnumpy(), ref_grad1, rtol=0.01)
def test_clip():
    for dtype in ("float32", "float64"):
        ref = lambda x: np.where(
            x > 10.0, np.zeros_like(x),
            np.where(x < 1.0, np.zeros_like(x), np.ones_like(x)))
        x = relay.var("x", relay.TensorType((10, 4), dtype))
        y = tvm.relay.clip(x, 1.0, 10.0)

        data = np.random.rand(10, 4).astype(dtype) * 11.0
        ref_grad = ref(data)
        fwd_func = relay.Function([x], y)
        fwd_func = run_infer_type(fwd_func)
        bwd_func = run_infer_type(gradient(fwd_func))

        for target, dev in tvm.testing.enabled_targets():
            op_res, (op_grad, ) = relay.create_executor(
                device=dev, target=target).evaluate(bwd_func)(data)
            np.testing.assert_allclose(op_grad.numpy(), ref_grad, rtol=0.01)
def test_after_partial_eval():
    """Test transformation following reverse mode ad and PartialEval"""
    mod = tvm.IRModule()

    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)

    x = relay.var("x", t)
    y = relay.var("y", t)

    func = relay.Function([x, y], (x * y) * relay.const(np.ones(shape, dtype)))
    func = run_infer_type(func)
    back_func = transform.gradient(func)
    back_func = run_infer_type(back_func)

    mod["main"] = back_func
    back_func = mod["main"]

    seq = tvm.transform.Sequential(
        [
            transform.PartialEvaluate(),
            transform.InferType(),
            transform.LazyGradientInit(),
            transform.InferType(),
            transform.DeadCodeElimination(),
            transform.InferType(),
        ]
    )

    mod = seq(mod)

    assert mod["main"].checked_type == relay.FuncType(
        [t, t], relay.TupleType([t, relay.TupleType([t, t])])
    )

    x = rand(dtype, *shape)
    y = rand(dtype, *shape)
    (forward), (grad_x, grad_y,) = create_executor(mod=mod).evaluate(
        back_func
    )(x, y)
    assert_allclose(forward.numpy(), x.numpy() * y.numpy())
    assert_allclose(grad_x.numpy(), y.numpy())
    assert_allclose(grad_y.numpy(), x.numpy())
def verify_global_avg_pool2d_grad(x_shape):
    x = relay.var("x", relay.TensorType(x_shape, "float32"))
    y = tvm.relay.nn.global_avg_pool2d(x)

    fwd_func = relay.Function([x], y)
    fwd_func = run_infer_type(fwd_func)
    bwd_func = run_infer_type(gradient(fwd_func))

    data = np.random.rand(*x_shape).astype("float32")
    y_shape = topi.util.get_const_tuple(fwd_func.ret_type.shape)
    out_grad = np.ones(shape=y_shape)
    ref_grad = topi.testing.pool_grad_nchw(data, out_grad, pool_size=(x_shape[2], x_shape[3]),
                                            strides=(1, 1), padding=[0, 0, 0, 0], pool_type='avg',
                                            ceil_mode=False)

    for target, ctx in ctx_list():
        intrp = relay.create_executor(ctx=ctx, target=target)
        op_res, (op_grad, ) = intrp.evaluate(bwd_func)(data)
        np.testing.assert_allclose(op_grad.asnumpy(), ref_grad, rtol=0.01)
Exemple #26
0
def test_ref():
    shape = (10, 10)
    dtype = 'float32'
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    r = relay.Var("r")
    u = relay.Var("u")
    body = relay.RefRead(r)
    body = relay.Let(u, relay.RefWrite(r, relay.RefRead(r) + relay.RefRead(r)), body)
    body = relay.Let(r, relay.RefCreate(x), body)
    func = relay.Function([x], body)
    func = run_infer_type(func)
    back_func = run_infer_type(gradient(func))
    assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
    x_nd = rand(dtype, *shape)
    ex = create_executor()
    forward, (grad_x,) = ex.evaluate(back_func)(x_nd)
    tvm.testing.assert_allclose(forward.asnumpy(), 2 * x_nd.asnumpy())
    tvm.testing.assert_allclose(grad_x.asnumpy(), 2 * np.ones_like(grad_x.asnumpy()))
Exemple #27
0
    def check_single_op(opfunc, ref, dtype):
        shape = (10, 4)
        tp = relay.TensorType(shape, dtype)
        x = relay.var("x", tp)
        g = relay.var("g", tp)
        y = opfunc(x) * g

        if ref is not None:
            data = np.random.rand(*shape).astype(dtype)
            grad_in = np.random.rand(*shape).astype(dtype)
            ref_grad = ref(data, grad_in)
            fwd_func = relay.Function([x, g], y)
            fwd_func = run_infer_type(fwd_func)
            bwd_func = run_infer_type(gradient(fwd_func))

            for target, ctx in tvm.testing.enabled_targets():
                intrp = relay.create_executor(ctx=ctx, target=target)
                op_res, (op_grad, _) = intrp.evaluate(bwd_func)(data, grad_in)
                np.testing.assert_allclose(op_grad.asnumpy(), ref_grad, rtol=0.01)
Exemple #28
0
def test_grad_tuple():
    scope = relay.ScopeBuilder()
    shape = (10, 10)
    dtype = "float32"
    t = relay.TensorType(shape, dtype)
    x = relay.var("x", t)
    y = scope.let("y", x + x)
    scope.ret(relay.Tuple([y + y, y]))
    func = relay.Function([x], scope.get())
    func = run_infer_type(func)
    back_func = run_infer_type(gradient(func))
    assert back_func.checked_type == relay.FuncType(
        [t], relay.TupleType([relay.TupleType([t, t]),
                              relay.TupleType([t])]))
    x = rand(dtype, *shape)
    (forward_four,
     forward_two), (grad, ) = create_executor().evaluate(back_func)(x)
    tvm.testing.assert_allclose(forward_four.numpy(), 4 * x.numpy())
    tvm.testing.assert_allclose(forward_two.numpy(), 2 * x.numpy())
    tvm.testing.assert_allclose(grad.numpy(), 4 * np.ones_like(x.numpy()))
Exemple #29
0
    def check_binary_op(opfunc, ref, dtype):
        s = (5, 10, 5)
        t = relay.TensorType((5, 10, 5), dtype=dtype)
        x = relay.var("x", t)
        y = relay.var("y", t)
        z = opfunc(x, y)

        x_data = np.random.rand(*s).astype(t.dtype)
        y_data = np.random.rand(*s).astype(t.dtype)
        ref_grad0, ref_grad1 = ref(x_data, y_data)
        fwd_func = relay.Function([x, y], z)
        fwd_func = run_infer_type(fwd_func)
        bwd_func = run_infer_type(gradient(fwd_func))

        for target, dev in tvm.testing.enabled_targets():
            op_res, (op_grad0, op_grad1) = relay.create_executor(
                device=dev, target=target
            ).evaluate(bwd_func)(x_data, y_data)
            np.testing.assert_allclose(op_grad0.numpy(), ref_grad0, rtol=0.01)
            np.testing.assert_allclose(op_grad1.numpy(), ref_grad1, rtol=0.01)
Exemple #30
0
def _test_tuple_argument(mode):
    shape = (2, 3)
    dtype = "float32"
    tensor_type = relay.TensorType(shape, dtype)
    fields = 3
    tuple_type = relay.TupleType([tensor_type] * fields)
    tup = relay.var("tup", type_annotation=tuple_type)
    body = relay.TupleGetItem(tup, 0)
    for i in range(1, fields):
        body = relay.add(body, relay.TupleGetItem(tup, i))
    func = relay.Function([tup], body)
    func = run_infer_type(func)
    back_func = run_infer_type(gradient(func, mode=mode))
    xs = [rand(dtype, *shape) for _ in range(fields)]
    xs_np = np.array([x.numpy() for x in xs])
    expected_forward = np.sum(xs_np, axis=0)
    forward, grad = create_executor().evaluate(back_func)(tuple(xs))
    tvm.testing.assert_allclose(forward.numpy(), expected_forward)
    for field in grad[0]:
        tvm.testing.assert_allclose(field.numpy(), np.ones_like(field.numpy()))