示例#1
0
文件: lstm.py 项目: ryansoq/nixtvm
def lstm_driver():
    bg = tvm.placeholder((10, 1), name='bg')
    Ug = tvm.placeholder((10, 10), name='Ug')
    Vg = tvm.placeholder((10, 10), name='Vg')

    bi = tvm.placeholder((10, 1), name='bi')
    Ui = tvm.placeholder((10, 10), name='Ui')
    Vi = tvm.placeholder((10, 10), name='Vi')

    bf = tvm.placeholder((10, 1), name='bf')
    Uf = tvm.placeholder((10, 10), name='Uf')
    Vf = tvm.placeholder((10, 10), name='Vf')

    bo = tvm.placeholder((10, 1), name='bo')
    Uo = tvm.placeholder((10, 10), name='Uo')
    Vo = tvm.placeholder((10, 10), name='Vo')

    x = tvm.placeholder((10, 1), name='x')
    s = tvm.placeholder((10, 1), name='s')
    h = tvm.placeholder((10, 1), name='h')

    s2, h2 = lstm_cell(bg, Ug, Vg, bi, Ui, Vi, bf, Uf, Vf, bo, Uo, Vo, x, s, h)
    print(s2, h2)

    sout = tvm.create_schedule([s2.op, h2.op])
    mout = tvm.build(
        sout,
        [s2, h2, bg, Ug, Vg, bi, Ui, Vi, bf, Uf, Vf, bo, Uo, Vo, x, s, h],
        'llvm')
    print(mout)

    ones = topi.full_like(s2, 1.0)
    jac = tvm.ir_pass.JacobianRecursive(s2, x, ones)
    print(jac)
示例#2
0
def verify_full(shape, dtype, fill_value):
    A = tvm.placeholder(shape, dtype=dtype, name="A")
    B = topi.full_like(A, fill_value=fill_value)
    C = topi.full(shape=shape, dtype=dtype, fill_value=fill_value)
    s1 = tvm.create_schedule([B.op])
    s2 = tvm.create_schedule([C.op])

    @memoize("topi.tests.test_topi_full")
    def get_ref_data():
        return np.full(shape, fill_value, dtype)
    np_nd = get_ref_data()

    def check_device(device):
        if not tvm.module.enabled(device):
            print("Skip because %s is not enabled" % device)
            return

        ctx = tvm.context(device, 0)
        out = tvm.nd.array(np.zeros(shape, dtype=dtype), ctx)
        f = tvm.build(s1, [A, B], device, name="full_like")
        f(tvm.nd.array(np.zeros(shape, dtype), ctx), out)
        tvm.testing.assert_allclose(out.asnumpy(), np_nd, rtol=1e-5)

        f = tvm.build(s2, [C], device, name="full")
        f(out)
        tvm.testing.assert_allclose(out.asnumpy(), np_nd, rtol=1e-5)

    for device in ["llvm"]:
        check_device(device)
示例#3
0
def verify_full(shape, dtype, fill_value):
    A = tvm.placeholder(shape, dtype=dtype, name="A")
    B = topi.full_like(A, fill_value=fill_value)
    C = topi.full(shape=shape, dtype=dtype, fill_value=fill_value)
    s1 = tvm.create_schedule([B.op])
    s2 = tvm.create_schedule([C.op])

    @memoize("topi.tests.test_topi_full")
    def get_ref_data():
        return np.full(shape, fill_value, dtype)

    np_nd = get_ref_data()

    def check_device(device):
        if not tvm.module.enabled(device):
            print("Skip because %s is not enabled" % device)
            return

        ctx = tvm.context(device, 0)
        out = tvm.nd.array(np.zeros(shape, dtype=dtype), ctx)
        f = tvm.build(s1, [A, B], device, name="full_like")
        f(tvm.nd.array(np.zeros(shape, dtype), ctx), out)
        np.testing.assert_allclose(out.asnumpy(), np_nd, rtol=1e-5)

        f = tvm.build(s2, [C], device, name="full")
        f(out)
        np.testing.assert_allclose(out.asnumpy(), np_nd, rtol=1e-5)

    for device in ["llvm"]:
        check_device(device)
示例#4
0
def example2_build():
    npoints = 20
    x = tvm.placeholder((1, ))
    k = tvm.placeholder((2, ))
    y = tvm.compute((1, ),
                    lambda i: x[i] * x[i] * k[0] + x[i] * k[1],
                    name='y')

    ones = topi.full_like(y, 1.0)
    [dy] = list(tvm.ir_pass.JacobianRecursive(y, [x], ones))
    # print('jac',type(jac),jac[0])

    sdy = tvm.create_schedule(dy.op)
    sy = tvm.create_schedule(y.op)

    my = tvm.build(sy, [y, x, k])
    mdy = tvm.build(sdy, [dy, x, k])

    xs = np.linspace(-10.0, 10.0, 20)
    ys = []
    dys = []
    for xi in xs:
        k_in = tvm.nd.array(np.array([4.0, 2.0]).astype(k.dtype))

        y_out = tvm.nd.empty(get_shape(y), y.dtype)
        my(y_out, tvm.nd.array(np.array([xi]).astype(x.dtype)), k_in)
        ys.append(y_out.asnumpy())

        dy_out = tvm.nd.empty(get_shape(y), dy.dtype)
        mdy(dy_out, tvm.nd.array(np.array([xi]).astype(x.dtype)), k_in)
        dys.append(dy_out.asnumpy())

        # print(xi, y_out.asnumpy(), dy_out.asnumpy())

    print(xs, ys, dys)
示例#5
0
def example1_build():
    x = tvm.placeholder((1, ))
    k = tvm.placeholder((1, ))
    y = tvm.compute((1, ), lambda i: x[i] * x[i] * k[0], name='y')

    [dy] = tvm.ir_pass.JacobianRecursive(y, [x], topi.full_like(y, 1.0))

    sdy = tvm.create_schedule(dy.op)
    mdy = tvm.build(sdy, [dy, x, k])

    dy_out = tvm.nd.empty(get_shape(y), tvm.float32)

    mdy(dy_out, tvm.nd.array(np.array([1.0]).astype(x.dtype)),
        tvm.nd.array(np.array([4.0]).astype(k.dtype)))
    print(dy_out)
    def check_device(device, host="llvm"):
        ctx = tvm.context(device, 0)
        if not tvm.runtime.enabled(host):
            return
        if not ctx.exist:
            print("skip because %s is not enabled.." % device)
            return

        sout = te.create_schedule(out.op)
        mout = tvm.build(sout, [out] + inputs)
        out_shape = get_const_tuple(out.shape)

        l, h = data_range
        input_data = [
            tvm.nd.array(
                np.random.uniform(l, h, size=get_const_tuple(
                    input.shape)).astype(input.dtype)) for input in inputs
        ]

        ones = topi.full_like(out, 1.0)
        # we provide head to sum and reduce the output dimension,
        # which equals to grad(out.sum(), inputs)
        grads = te.gradient(out, inputs, head=ones)
        grad_sched = te.create_schedule([grad.op for grad in grads])
        mgrad = tvm.build(grad_sched, list(grads) + inputs)
        # print(tvm.lower(grad_sched, list(grads) + inputs, simple_mode=True))

        grad_data = [
            tvm.nd.empty(get_const_tuple(i.shape), g.dtype)
            for i, g in zip(inputs, grads)
        ]

        mgrad(*grad_data, *input_data)
        g_res = [g.asnumpy() for g in grad_data]

        if desired_grads:
            assert isinstance(desired_grads, list)
            for actual, desired in zip(g_res, desired_grads):
                assert_allclose(actual, desired, rtol=0.1, atol=1e-2)
        else:

            def forward(*in_data):
                out_data = tvm.nd.empty(out_shape, out.dtype)
                mout(out_data, *[tvm.nd.array(d) for d in list(in_data)])
                return out_data.asnumpy().sum()

            check_numerical_grads(forward, [d.asnumpy() for d in input_data],
                                  g_res)
示例#7
0
def ones_like(attrs, inputs, output_type, target):
    assert len(inputs) == 1
    return [topi.full_like(inputs[0], 1.0)]
示例#8
0
def zeros_like_compute(attrs, inputs, output_type, target):
    assert len(inputs) == 1
    return [topi.full_like(inputs[0], 0.0)]
示例#9
0
def demo_conv2d():
    lrate = 0.1
    nbatches = 100  # batches to train

    num_classes = 10
    batch_size = 10
    img_h = 28
    img_w = 28
    img_c = 1

    f1_c = 4
    f2_c = 5
    f3_units = 16

    x = tvm.placeholder((batch_size, img_h, img_w, img_c), name='x')
    y = tvm.placeholder((batch_size, num_classes), name='y')

    print('Block1')
    w1 = tvm.placeholder((3, 3, img_c, f1_c), name='w1')
    b1 = tvm.placeholder((f1_c, ), name='b1')
    t = topi.nn.conv2d(x, w1, 1, 0, layout='NHWC', out_dtype=tvm.float32)
    t = t + topi.broadcast_to(b1, (batch_size, 1, 1, f1_c))
    print('Block1: after-biasing shape is', get_shape(t))
    t = topi.nn.pool(t, [2, 2], [2, 2], [0, 0, 0, 0], 'max', layout='NHWC')
    print('Block1: after-pooling shape is', get_shape(t))
    t = topi.nn.relu(t)
    print('Block1: after-relu shape is', get_shape(t))

    print('Block2')
    w2 = tvm.placeholder((3, 3, f1_c, f2_c), name='w2')
    b2 = tvm.placeholder((f2_c, ), name='b2')
    t = topi.nn.conv2d(t, w2, 1, 0, layout='NHWC', out_dtype=tvm.float32)
    t = t + topi.broadcast_to(b2, (batch_size, 1, 1, f2_c))
    print('Block2: after-biasing shape is', get_shape(t))
    t = topi.nn.pool(t, [2, 2], [2, 2], [0, 0, 0, 0], 'max', layout='NHWC')
    print('Block2: after-pooling shape is', get_shape(t))
    t = topi.nn.relu(t)
    print('Block2: after-relu shape is', get_shape(t))
    t = topi.nn.flatten(t)
    print('Block2: after-flattern shape is', get_shape(t))

    print('Block3')
    w3 = tvm.placeholder((f3_units, get_shape(t)[1]))
    b3 = tvm.placeholder((f3_units, ))
    t = topi.nn.dense(t, w3, b3)
    print('Block3: after-dense shape is', get_shape(t))

    print('Block4')
    w4 = tvm.placeholder((num_classes, get_shape(t)[1]))
    b4 = tvm.placeholder((num_classes, ))
    t = topi.nn.dense(t, w4, b4)
    print('Block4: after-dense shape is', get_shape(t))
    t = topi.nn.relu(t)

    p = topi.argmax(t, axis=1)
    # TODO: check the correctnesss of the log_softmax expression
    # TODO: figure out the difference between it and standard cross-entropy loss
    l = -topi.sum(y * topi.nn.log_softmax(t)) / batch_size

    print('Block4: loss shape is', get_shape(l))

    ones = topi.full_like(l, 1.0)
    #[dl_dw1,dl_db1,dl_dw2,dl_db2,dl_dw3,dl_db3,dl_dw4,dl_db4]
    params = [w1, b1, w2, b2, w3, b3, w4, b4]

    dl = list(tvm.ir_pass.JacobianRecursive(l, params, ones))
    assert len(params) == len(dl)
    print('dl_dw1 weight is', get_shape(params[0]))

    sdl = tvm.create_schedule([p.op for p in [x, y, l] + params + dl])
    mdl = tvm.build(sdl, [x, y, l] + params + dl)
    print('Train+Inference module', mdl)

    # sl = tvm.create_schedule([l.op])
    # ml = tvm.build(sdl, [x,y] + params + [l])
    # print('Inference module',ml)

    state = {}
    for p in params:
        state.update({
            p:
            tvm.nd.array(
                np.random.uniform(-1.0, 1.0,
                                  size=get_shape(p)).astype(np.float32))
        })

    grads = {}
    for p, g in zip(params, dl):
        grads.update({p: tvm.nd.empty(get_shape(g))})

    for ib in range(nbatches):
        b = range(ib * batch_size, (ib + 1) * batch_size)
        tx = tvm.nd.array(mnist_img(b))
        ty = tvm.nd.array(mnist_cls_oh(b))
        tl = tvm.nd.empty(shape=(), dtype=tvm.float32)

        print('Entering')
        mdl(*([tx, ty, tl] + list(state.values()) + list(grads.values())))
        print('Done', 'loss', tl.asnumpy())

        state2 = {}
        for p in params:
            state2.update({
                p:
                tvm.nd.array(state[p].asnumpy() - lrate * grads[p].asnumpy())
            })

        state = state2
示例#10
0
文件: _tensor.py 项目: bddppq/tvm
def ones_like(attrs, inputs, output_type, target):
    assert len(inputs) == 1
    return [topi.full_like(inputs[0], 1.0)]
示例#11
0
文件: _tensor.py 项目: bddppq/tvm
def zeros_like_compute(attrs, inputs, output_type, target):
    assert len(inputs) == 1
    return [topi.full_like(inputs[0], 0.0)]
import tvm
import topi

x = tvm.te.placeholder((32, 3, 28, 28), name='x')
w1 = tvm.te.placeholder((10, 3, 3, 3), name='w1')
w2 = tvm.te.placeholder((10, 10, 3, 3), name='w2')
z1 = topi.nn.conv2d(x, w1, 1, 1, 1)
z2 = topi.nn.conv2d(z1, w2, 1, 1, 1)
y = topi.sum(z2)

# produce gradients
[dw1] = tvm.te.gradient(y, [w1])

print(type(dw1))

# produce Jacobians
[jw1, jw2] = tvm.te.gradient(z2, [w1, w2])

# produce gradients, the head adjoint for z2 is provided manually
[dw1, dw2] = tvm.te.gradient(z2, [w1, w2], topi.full_like(z2, 1.0))