예제 #1
0
def test_log_pow_llvm():
    # graph
    n = te.size_var("n")
    A = te.placeholder((n, ), name="A")
    B = te.compute(A.shape, lambda *i: te.power(te.log(A(*i)), 2.0), name="B")
    s = te.create_schedule(B.op)
    # create iter var and assign them tags.
    bx, tx = s[B].split(B.op.axis[0], factor=32)
    # one line to build the function.
    if not tvm.testing.device_enabled("llvm"):
        return

    flog = tvm.build(s, [A, B], "llvm", name="mylog")
    ctx = tvm.cpu(0)
    # launch the kernel.
    n = 1028
    a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), ctx)
    b = tvm.nd.array(np.zeros(n, dtype=B.dtype), ctx)
    repeat = 10
    ftimer = flog.time_evaluator(flog.entry_name, ctx, number=1, repeat=repeat)
    res = ftimer(a, b)
    assert len(res.results) == repeat
    tvm.testing.assert_allclose(b.asnumpy(),
                                np.power(np.log(a.asnumpy()), 2.0),
                                rtol=1e-5)
예제 #2
0
def test_log_pow_llvm():
    """Test log pow using llvm to lower."""
    # graph
    size_var_n = te.size_var("n")
    placeholder_a = te.placeholder((size_var_n, ), name="A")
    result_b = te.compute(placeholder_a.shape,
                          lambda *i: te.power(te.log(placeholder_a(*i)), 2.0),
                          name="B")
    schedule = te.create_schedule(result_b.op)
    # create iter var and assign them tags.
    schedule[result_b].split(result_b.op.axis[0], factor=32)
    # one line to build the function.
    if not tvm.testing.device_enabled("llvm"):
        return

    flog = tvm.build(schedule, [placeholder_a, result_b], "llvm", name="mylog")
    dev = tvm.cpu(0)
    # launch the kernel.
    size_var_n = 1028
    buff_a = tvm.nd.array(
        np.random.uniform(size=size_var_n).astype(placeholder_a.dtype), dev)
    buff_b = tvm.nd.array(np.zeros(size_var_n, dtype=result_b.dtype), dev)
    repeat = 10
    ftimer = flog.time_evaluator(flog.entry_name, dev, number=1, repeat=repeat)
    res = ftimer(buff_a, buff_b)
    assert len(res.results) == repeat
    tvm.testing.assert_allclose(buff_b.numpy(),
                                np.power(np.log(buff_a.numpy()), 2.0),
                                rtol=1e-5)
예제 #3
0
def test_binary_dtype_match():
    def verify_general_dtype_support(f, is_conditional=False):
        rules = [
            [("bool", "int32"), "int32"],
            [("int32", "float32"), "float32"],
            [("int32", "int64"), "int64"],
            [("uint32", "int32"), "int32"],
        ]
        for (lhs_dtype, rhs_dtype), out_dtype in rules:
            lhs = te.var("lhs", dtype=lhs_dtype)
            rhs = te.var("rhs", dtype=rhs_dtype)
            out = f(lhs, rhs)
            if not is_conditional:
                assert out.dtype == out_dtype
            else:
                assert out.dtype == "bool"
            if hasattr(out, "a"):
                assert out.a.dtype == out_dtype
                assert out.b.dtype == out_dtype
            elif hasattr(out, "args"):
                # CallOp
                assert out.args[0].dtype == out_dtype
                assert out.args[1].dtype == out_dtype
            else:
                raise ValueError("Unknown binary op format!")

    def verify_callop_float_only(f):
        for lhs_dtype in ["int32", "float32", "float64"]:
            for rhs_dtype in ["int32", "float32", "float64"]:
                lhs = te.var("lhs", dtype=lhs_dtype)
                rhs = te.var("rhs", dtype=rhs_dtype)
                if "float" not in lhs_dtype and "float" not in rhs_dtype:
                    check_throws(lambda: f(lhs, rhs))
                elif "float" in lhs_dtype:
                    out = f(lhs, rhs)

                    # Upcasting for floating point types
                    dtypes = [lhs_dtype, rhs_dtype]
                    if "float64" in dtypes:
                        target_dtype = "float64"
                    elif "float32" in dtypes:
                        target_dtype = "float32"
                    else:
                        target_dtype = "int32"
                    assert out.dtype == target_dtype

                    # Final inputs are the right type
                    assert out.args[0].dtype == target_dtype
                    assert out.args[1].dtype == target_dtype
                else:
                    out = f(lhs, rhs)
                    assert out.dtype == rhs_dtype
                    assert out.args[0].dtype == rhs_dtype
                    assert out.args[1].dtype == rhs_dtype

    verify_general_dtype_support(lambda a, b: a + b)
    verify_general_dtype_support(lambda a, b: a * b)
    verify_general_dtype_support(lambda a, b: a >= b, is_conditional=True)
    verify_general_dtype_support(lambda a, b: a <= b, is_conditional=True)
    verify_callop_float_only(lambda a, b: te.power(a, b))
예제 #4
0
def test_binary_dtype_match():
    def verify_general_dtype_support(f, is_conditional=False):
        rules = [[('bool', 'int32'), 'int32'], [('int32', 'float32'),
                                                'float32'],
                 [('int32', 'int64'), 'int64'], [('uint32', 'int32'), 'int32']]
        for (lhs_dtype, rhs_dtype), out_dtype in rules:
            lhs = te.var('lhs', dtype=lhs_dtype)
            rhs = te.var('rhs', dtype=rhs_dtype)
            out = f(lhs, rhs)
            if not is_conditional:
                assert out.dtype == out_dtype
            else:
                assert out.dtype == 'bool'
            if hasattr(out, 'a'):
                assert out.a.dtype == out_dtype
                assert out.b.dtype == out_dtype
            elif hasattr(out, 'args'):
                # CallOp
                assert out.args[0].dtype == out_dtype
                assert out.args[1].dtype == out_dtype
            else:
                raise ValueError('Unknown binary op format!')

    def verify_callop_float_only(f):
        for lhs_dtype in ['int32', 'float32', 'float64']:
            for rhs_dtype in ['int32', 'float32', 'float64']:
                lhs = te.var('lhs', dtype=lhs_dtype)
                rhs = te.var('rhs', dtype=rhs_dtype)
                if 'float' not in lhs_dtype and 'float' not in rhs_dtype:
                    check_throws(lambda: f(lhs, rhs))
                elif 'float' in lhs_dtype and 'float' in rhs_dtype and lhs_dtype != rhs_dtype:
                    check_throws(lambda: f(lhs, rhs))
                elif 'float' in lhs_dtype:
                    out = f(lhs, rhs)
                    assert out.dtype == lhs_dtype
                    assert out.args[0].dtype == lhs_dtype
                    assert out.args[1].dtype == lhs_dtype
                else:
                    out = f(lhs, rhs)
                    assert out.dtype == rhs_dtype
                    assert out.args[0].dtype == rhs_dtype
                    assert out.args[1].dtype == rhs_dtype

    verify_general_dtype_support(lambda a, b: a + b)
    verify_general_dtype_support(lambda a, b: a * b)
    verify_general_dtype_support(lambda a, b: a >= b, is_conditional=True)
    verify_general_dtype_support(lambda a, b: a <= b, is_conditional=True)
    verify_callop_float_only(lambda a, b: te.power(a, b))
def test_basic_operation():
    np.random.seed(0)
    shape = (10, 10)
    x = te.var("x", dtype='float32')
    k = te.reduce_axis((0, 10), name="k")
    l = te.reduce_axis((0, 10), name="l")
    A0 = te.placeholder(shape, name='A0')
    A1 = te.placeholder(shape, name='A1')
    zeros = np.zeros(shape)

    B = te.compute(shape, lambda i, j: A0[i, j], name='B')
    check_grad(B, [A0])

    B = te.compute(shape, lambda i, j: A0[i, j] + A1[i, j], name='B')
    check_grad(B, [A0, A1])

    B = te.compute(shape, lambda i, j: A0[i, j] + A0[j, i], name='B')
    check_grad(B, A0)

    B = te.compute(shape, lambda i, j: te.floor(A0[i, j]), name='B')
    check_grad(B, A0, desired_grads=[zeros])

    B = te.compute(shape, lambda i, j: te.ceil(A0[i, j]), name='B')
    check_grad(B, A0, desired_grads=[zeros])

    B = te.compute(shape, lambda i, j: te.trunc(A0[i, j]), name='B')
    check_grad(B, A0, desired_grads=[zeros])

    B = te.compute(shape, lambda i, j: te.round(A0[i, j]), name='B')
    check_grad(B, A0, desired_grads=[zeros])

    B = te.compute(shape, lambda i, j: A0[i, j] + te.exp(A0[j, i]), name='B')
    check_grad(B, A0)

    B = te.compute(
        shape,
        lambda i, j: te.log(0.1 + te.abs(A0[i, j] + te.exp(A0[j, i]))),
        name='B')
    check_grad(B, A0)

    B = te.compute(shape,
                   lambda i, j: te.sigmoid(A0[i, j] * A0[i, j] * A0[j, i]),
                   name='B')
    check_grad(B, A0)

    B = te.compute(shape,
                   lambda i, j: te.tanh(A0[i, j] * A0[i, j] * A0[j, i]),
                   name='B')
    check_grad(B, A0)

    B = te.compute(shape,
                   lambda i, j: te.sqrt(A0[i, j] * A0[i, j] * A0[j, i]),
                   name='B')
    check_grad(B, A0, data_range=(0.1, 10))

    B = te.compute(shape,
                   lambda i, j: te.power(te.abs(A0[i, j]), A0[j, i]),
                   name='B')
    check_grad(B, A0, data_range=(-4, 4))

    B = te.compute(shape, lambda i, j: A0[i, j] * A0[j, i], name='B')
    check_grad(B, A0)

    B = te.compute((10, ),
                   lambda i: te.sum(A0[i, k] * A0[k, i], axis=k),
                   name='B')
    check_grad(B, A0)

    B = te.compute(shape,
                   lambda i, j: te.sum(A0[i, k] * A0[k, i] + 5, axis=k),
                   name='B')
    check_grad(B, A0)

    B = te.compute(shape,
                   lambda i, j: te.max(A0[i, k] * A0[k, j] + 5, axis=k),
                   name='B')
    check_grad(B, A0)

    B = te.compute(shape,
                   lambda i, j: A0[i, j] * (A1[j, i] + A0[j, i]),
                   name='B')
    check_grad(B, [A0, A1])

    B = te.compute(shape,
                   lambda i, j: te.sum(
                       A0[k, k] - A0[te.min(j + k, 9), j] * A0[i, k], axis=k),
                   name='B')
    check_grad(B, A0)

    def fcombine(x, y):
        return x * y

    def fidentity(t0):
        return tvm.tir.const(1, t0)

    prod = te.comm_reducer(fcombine, fidentity, name='prod')
    B = te.compute((10, 10),
                   lambda i, j: prod(A0[i, k] + A0[k, i], axis=k),
                   name='B')
    check_grad(B, A0)

    X = te.placeholder((10, ), name='X')
    A = te.compute((10, ), lambda i: X[i] + X[9 - i])
    B = te.compute((10, ), lambda i: X[i] * X[9 - i])
    Y = topi.tensordot(A, B, 1)
    check_grad(Y, X)