def test_dyn_broadcast_to():
    dtype = 'uint8'
    rank = 3
    shape_type = 'int64'
    dyn_shape = relay.Var("shape", relay.ty.TensorType((rank, ), shape_type))
    x_shape = (1, )
    x = relay.Var("x", relay.ty.TensorType(x_shape, dtype))
    z = relay.broadcast_to(x, dyn_shape)
    zz = run_infer_type(z)

    assert zz.checked_type == relay.ty.TensorType((relay.Any(), ) * rank,
                                                  dtype)

    func = relay.Function([x, dyn_shape], z)

    x = np.random.uniform(size=x_shape).astype(dtype)
    dyn_shape = (1, ) * rank
    ref_res = np.broadcast_to(x, dyn_shape)
    for target, ctx in tvm.testing.enabled_targets():
        if (target != 'cuda'
            ):  #skip cuda because we don't have dynamic support for GPU
            for kind in ["vm", "debug"]:
                mod = tvm.ir.IRModule.from_expr(func)
                intrp = relay.create_executor(kind,
                                              mod=mod,
                                              ctx=ctx,
                                              target=target)
                op_res = intrp.evaluate(func)(
                    x, np.array(dyn_shape).astype(shape_type))
                tvm.testing.assert_allclose(op_res.asnumpy(),
                                            ref_res,
                                            rtol=1e-5)
예제 #2
0
    def verify_broadcast_to(x_shape, out_shape):
        rank = len(out_shape)
        dtype = "float32"
        shape_type = "int64"
        dyn_shape = relay.Var("shape", relay.ty.TensorType((rank, ),
                                                           shape_type))
        x = relay.Var("x", relay.ty.TensorType(x_shape, dtype))
        z = relay.broadcast_to(x, dyn_shape)
        zz = run_infer_type(z)

        assert zz.checked_type == relay.ty.TensorType((relay.Any(), ) * rank,
                                                      dtype)

        func = relay.Function([x, dyn_shape], z)

        x = np.random.uniform(size=x_shape).astype(dtype)
        ref_res = np.broadcast_to(x, out_shape)
        for target, ctx in tvm.testing.enabled_targets():
            for kind in ["vm", "debug"]:
                mod = tvm.ir.IRModule.from_expr(func)
                intrp = relay.create_executor(kind,
                                              mod=mod,
                                              ctx=ctx,
                                              target=target)
                op_res = intrp.evaluate(func)(
                    x, np.array(out_shape).astype(shape_type))
                tvm.testing.assert_allclose(op_res.asnumpy(),
                                            ref_res,
                                            rtol=1e-5)
def test_dyn_broadcast_to():
    dtype = "uint8"
    rank = 3
    shape_type = "int64"
    dyn_shape = relay.Var("shape", relay.ty.TensorType((rank, ), shape_type))
    x_shape = (1, )
    x = relay.Var("x", relay.ty.TensorType(x_shape, dtype))
    z = relay.broadcast_to(x, dyn_shape)
    zz = run_infer_type(z)

    assert zz.checked_type == relay.ty.TensorType((relay.Any(), ) * rank,
                                                  dtype)

    func = relay.Function([x, dyn_shape], z)

    x = np.random.uniform(size=x_shape).astype(dtype)
    dyn_shape = (1, ) * rank
    ref_res = np.broadcast_to(x, dyn_shape)
    for target, dev in tvm.testing.enabled_targets():
        for kind in ["vm", "debug"]:
            mod = tvm.ir.IRModule.from_expr(func)
            op_res = relay.create_executor(
                kind, mod=mod, device=dev, target=target).evaluate(func)(
                    x, np.array(dyn_shape).astype(shape_type))
            tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
예제 #4
0
    def verify_more_dynamic_broadcast_to(x_shape, out_shape):
        rank = len(out_shape)
        dtype = "float32"
        shape_type = "int64"
        reshape_shape = relay.Var(
            "shape", relay.ty.TensorType((len(x_shape), ), shape_type))
        broadcast_shape = relay.Var("shape",
                                    relay.ty.TensorType((rank, ), shape_type))
        x = relay.Var("x", relay.ty.TensorType((np.prod(x_shape), ), dtype))
        r = relay.reshape(x, reshape_shape)
        z = relay.broadcast_to(r, broadcast_shape)

        func = relay.Function([x, reshape_shape, broadcast_shape], z)

        x = np.random.uniform(size=np.prod(x_shape)).astype(dtype)
        ref_res = np.broadcast_to(np.reshape(x, x_shape), out_shape)
        for target, ctx in tvm.testing.enabled_targets():
            for kind in ["vm", "debug"]:
                mod = tvm.ir.IRModule.from_expr(func)
                intrp = relay.create_executor(kind,
                                              mod=mod,
                                              ctx=ctx,
                                              target=target)
                op_res = intrp.evaluate(func)(
                    x, np.array(x_shape).astype(shape_type),
                    np.array(out_shape).astype(shape_type))
                tvm.testing.assert_allclose(op_res.asnumpy(),
                                            ref_res,
                                            rtol=1e-5)
예제 #5
0
def test_concretize_broadcast_to_like():
    data = relay.var("data", shape=(3, ), dtype="float32")
    shape_like = relay.var("shape_like", shape=(3, 3, 3), dtype="float32")
    expr = relay.broadcast_to_like(data, shape_like)

    expected = run_infer_type(relay.broadcast_to(data, (3, 3, 3)))
    actual = run_opt_pass(expr, relay.transform.SimplifyExpr())
    assert tvm.ir.structural_equal(actual, expected)
예제 #6
0
def test_broadcast_to_const_shape_int64():
    shape_like = relay.const(np.array([1, 5]), dtype="int64")
    x = relay.var("x", shape=(1,), dtype="int64")
    z = relay.broadcast_to(x, shape=shape_like)
    z = relay.sum(z, axis=0)

    f = relay.Function([x], z)

    x = np.random.randint(10, size=(1,), dtype="int64")
    ref_res = np.broadcast_to(x, (5,))
    for target, dev in tvm.testing.enabled_targets():
        for kind in ["graph", "debug"]:
            op_res = relay.create_executor(kind, device=dev, target=target).evaluate(f)(x)
            tvm.testing.assert_allclose(op_res.numpy(), ref_res)
예제 #7
0
def test_broadcast_to():
    shape = (4, 1, 6)
    shape_like = (3, 4, 5, 6)
    dtype = "float32"
    x = relay.Var("x", relay.ty.TensorType(shape, dtype))
    z = relay.broadcast_to(x, shape=shape_like)
    zz = run_infer_type(z)
    assert zz.checked_type == relay.ty.TensorType(shape_like, dtype)

    func = relay.Function([x], z)
    x = np.random.uniform(size=shape).astype(dtype)
    ref_res = np.broadcast_to(x, shape_like)
    for target, dev in tvm.testing.enabled_targets():
        for kind in ["graph", "debug"]:
            op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)(x)
            tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
예제 #8
0
def test_broadcast_to():
    shape = (4, 1, 6)
    shape_like = (3, 4, 5, 6)
    dtype = "float32"
    x = relay.Var("x", relay.ty.TensorType(shape , dtype))
    z = relay.broadcast_to(x, shape=shape_like)
    zz = relay.ir_pass.infer_type(z)
    assert zz.checked_type == relay.ty.TensorType(shape_like, dtype)

    func = relay.Function([x], z)
    x = np.random.uniform(size=shape).astype(dtype)
    ref_res = np.broadcast_to(x, shape_like)
    for target, ctx in ctx_list():
        for kind in ["graph", "debug"]:
            intrp = relay.create_executor(kind, ctx=ctx, target=target)
            op_res = intrp.evaluate(func)(x)
            tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
예제 #9
0
    def verify_broadcast_to(shape, broadcast_shape):
        x = relay.var("x", relay.TensorType(shape, "float32"))
        y = relay.var("y", relay.TensorType(broadcast_shape, "float32"))
        z = relay.broadcast_to(x, shape=relay.shape_of(y))

        func = run_infer_type(relay.Function([x, y], z))
        func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())

        zz = func2.body
        assert isinstance(zz, relay.Call)
        assert zz.op == relay.op.get("broadcast_to")
        assert zz.checked_type == relay.ty.TensorType(broadcast_shape, "float32")

        x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
        y_data = np.random.uniform(low=-1, high=1, size=broadcast_shape).astype("float32")

        ref_res = np.broadcast_to(x_data, y_data.shape)
        verify_func(func2, [x_data, y_data], ref_res)
예제 #10
0
def test_broadcast_pool2d_shape_int64(executor_kind):
    x_shape = (1, 3, 32, 32)
    out_shape = (2, 3, 32, 32)
    x = relay.var("data", shape=x_shape, dtype="float32")
    broadcast_to = relay.broadcast_to(x,
                                      shape=relay.const([2, 3, 32, 32],
                                                        dtype="int64"))
    pool2d = relay.nn.max_pool2d(broadcast_to,
                                 pool_size=(3, 3),
                                 padding=(1, 1, 1, 1))
    sub = relay.subtract(broadcast_to, pool2d)

    f = relay.Function([x], sub)
    x = np.ones(x_shape).astype("float32")
    ref_res = np.zeros(out_shape).astype("float32")

    for target, dev in tvm.testing.enabled_targets():
        op_res = relay.create_executor(executor_kind,
                                       device=dev,
                                       target=target).evaluate(f)(x)
        tvm.testing.assert_allclose(op_res.numpy(), ref_res)