Ejemplo n.º 1
0
def ArgMax(device="llvm",
           lib_path="./",
           ndim=None,
           dtype=None,
           axis=None,
           keep_dims=None,
           top_k=None,
           out_dtype=None):
    '''
    argmax
    Args:
        device:
        lib_path:
        ndim:
        dtype:
        axis:
        keepDims:
        top_k:
        out_dtype:

    Returns:
    '''
    if axis >= ndim:
        return
    shape = [tvm.var("n" + str(i)) for i in range(ndim)]
    opname = "ArgMax_ndim%d_%s_axis%d_%s_top%d_%s" \
             % (ndim, dtype, axis, "keepDims" if keep_dims else "notKeepDims", top_k, out_dtype)
    print(opname)

    in_tensor = tvm.placeholder(shape, dtype=dtype, name='in_tensor')
    out_tensor = topi.argmax(in_tensor, axis=axis, keepdims=keep_dims)
    out_tensor = AsType(out_tensor, out_dtype)
    tensor_list = [in_tensor, out_tensor]
    s = tvm.create_schedule(out_tensor.op)
    Genlib(s, tensor_list, device, opname, lib_path)
Ejemplo n.º 2
0
def verify_reduce_map_ele(in_shape, axis, keepdims, type="sum"):
    # Build the logic and compile the function
    dat_dtype = "float32"
    A = tvm.placeholder(shape=in_shape, name="A", dtype=dat_dtype)
    A1 = topi.sqrt(topi.exp(A))
    out_dtype = "float32"
    if type == "sum":
        B = topi.sum(A1, axis=axis, keepdims=keepdims)
    elif type == "max":
        B = topi.max(A1, axis=axis, keepdims=keepdims)
    elif type == "min":
        B = topi.min(A1, axis=axis, keepdims=keepdims)
    elif type == "argmax":
        B = topi.argmax(A1, axis=axis, keepdims=keepdims)
        out_dtype = "int32"
    elif type == "argmin":
        B = topi.argmin(A1, axis=axis, keepdims=keepdims)
        out_dtype = "int32"
    else:
        raise NotImplementedError

    def check_device(device):
        if not tvm.module.enabled(device):
            print("Skip because %s is not enabled" % device)
            return
        with tvm.target.create(device):
            s = topi.generic.schedule_reduce(B)
        ctx = tvm.context(device, 0)
        foo = tvm.build(s, [A, B], device, name="sum")
        # Test
        in_npy = np.random.uniform(size=in_shape).astype(np.float32)
        in_npy_map = np.sqrt(np.exp(in_npy)).astype(np.float32)
        if type == "sum":
            out_npy = in_npy_map.sum(axis=axis, keepdims=keepdims)
        elif type == "max":
            out_npy = in_npy_map.max(axis=axis, keepdims=keepdims)
        elif type == "min":
            out_npy = in_npy_map.min(axis=axis, keepdims=keepdims)
        elif type == "argmax":
            out_npy = _my_npy_argmax(in_npy_map, axis=axis, keepdims=keepdims)
        elif type == "argmin":
            out_npy = _my_npy_argmin(in_npy_map, axis=axis, keepdims=keepdims)
        else:
            raise NotImplementedError
        data_tvm = tvm.nd.array(in_npy, ctx=ctx)
        out_tvm = tvm.nd.empty(shape=out_npy.shape, ctx=ctx, dtype=out_dtype)
        for _ in range(1):
            foo(data_tvm, out_tvm)
        np.testing.assert_allclose(out_tvm.asnumpy(), out_npy, 1E-3, 1E-3)

    check_device("opencl")
    check_device("cuda")
    check_device("metal")
    check_device("rocm")
Ejemplo n.º 3
0
def verify_reduce_map_ele(in_shape,
                          axis,
                          keepdims,
                          type="sum",
                          dtype="float32"):
    # Build the logic and compile the function
    A = tvm.placeholder(shape=in_shape, name="A", dtype=dtype)
    A1 = topi.sqrt(topi.exp(A))
    out_dtype = dtype
    if type == "sum":
        B = topi.sum(A1, axis=axis, keepdims=keepdims)
    elif type == "all":
        B = topi.all(A, axis=axis, keepdims=keepdims)
    elif type == "any":
        B = topi.any(A, axis=axis, keepdims=keepdims)
    elif type == "max":
        B = topi.max(A1, axis=axis, keepdims=keepdims)
    elif type == "min":
        B = topi.min(A1, axis=axis, keepdims=keepdims)
    elif type == "argmax":
        B = topi.argmax(A1, axis=axis, keepdims=keepdims)
        out_dtype = "int32"
    elif type == "argmin":
        B = topi.argmin(A1, axis=axis, keepdims=keepdims)
        out_dtype = "int32"
    else:
        raise NotImplementedError

    def check_device(device):
        ctx = tvm.context(device, 0)
        if not ctx.exist:
            print("Skip because %s is not enabled" % device)
            return
        print("Running on target: %s" % device)
        with tvm.target.create(device):
            s = topi.generic.schedule_reduce(B)

        foo = tvm.build(s, [A, B], device, name=type)
        # Test
        if dtype == 'bool':
            in_npy_map = in_npy = np.random.choice([True, False],
                                                   size=in_shape)
        else:
            in_npy = np.random.uniform(-1, 1, size=in_shape).astype(dtype)
            in_npy_map = np.sqrt(np.exp(in_npy)).astype(dtype)

        if type == "sum":
            out_npy = in_npy_map.sum(axis=axis, keepdims=keepdims)
        elif type == "all" and dtype == 'bool':
            out_npy = in_npy_map.all(axis=axis, keepdims=keepdims)
        elif type == "any" and dtype == "bool":
            out_npy = in_npy_map.any(axis=axis, keepdims=keepdims)
        elif type == "max":
            out_npy = in_npy_map.max(axis=axis, keepdims=keepdims)
        elif type == "min":
            out_npy = in_npy_map.min(axis=axis, keepdims=keepdims)
        elif type == "argmax":
            out_npy = _my_npy_argmax(in_npy_map, axis=axis, keepdims=keepdims)
        elif type == "argmin":
            out_npy = _my_npy_argmin(in_npy_map, axis=axis, keepdims=keepdims)
        else:
            raise NotImplementedError
        data_tvm = tvm.nd.array(in_npy, ctx=ctx)
        out_tvm = tvm.nd.empty(shape=out_npy.shape, ctx=ctx, dtype=out_dtype)
        for _ in range(1):
            foo(data_tvm, out_tvm)
        if type == "argmax" or type == "argmin":
            out_tvm_indices = out_tvm.asnumpy()
            if keepdims:
                out_tvm_indices = np.take(out_tvm_indices,
                                          indices=0,
                                          axis=axis)
            if axis is None:
                out_tvm_val = in_npy_map.ravel()[out_tvm_indices]
            else:
                other_indices = tuple(
                    np.indices(in_shape[0:axis] + in_shape[(axis + 1):]))
                sel_indices = other_indices[0:axis] + (
                    out_tvm_indices, ) + other_indices[axis:]
                out_tvm_val = in_npy_map[sel_indices]
            if type == "argmax":
                tvm.testing.assert_allclose(out_tvm_val,
                                            in_npy_map.max(axis=axis), 1E-3,
                                            1E-3)
            elif type == "argmin":
                tvm.testing.assert_allclose(out_tvm_val,
                                            in_npy_map.min(axis=axis), 1E-3,
                                            1E-3)
        else:
            tvm.testing.assert_allclose(out_tvm.asnumpy(), out_npy, 1E-3, 1E-3)

    for device in get_all_backend():
        check_device(device)
Ejemplo n.º 4
0
def verify_reduce_map_ele(in_shape, axis, keepdims, type="sum"):
    # Build the logic and compile the function
    dat_dtype = "float32"
    A = tvm.placeholder(shape=in_shape, name="A", dtype=dat_dtype)
    A1 = topi.sqrt(topi.exp(A))
    out_dtype = "float32"
    if type == "sum":
        B = topi.sum(A1, axis=axis, keepdims=keepdims)
    elif type == "max":
        B = topi.max(A1, axis=axis, keepdims=keepdims)
    elif type == "min":
        B = topi.min(A1, axis=axis, keepdims=keepdims)
    elif type == "argmax":
        B = topi.argmax(A1, axis=axis, keepdims=keepdims)
        out_dtype = "int32"
    elif type == "argmin":
        B = topi.argmin(A1, axis=axis, keepdims=keepdims)
        out_dtype = "int32"
    else:
        raise NotImplementedError

    def check_device(device):
        ctx = tvm.context(device, 0)
        if not ctx.exist:
            print("Skip because %s is not enabled" % device)
            return
        print("Running on target: %s" % device)
        with tvm.target.create(device):
            s = topi.generic.schedule_reduce(B)

        foo = tvm.build(s, [A, B], device, name=type)
        # Test
        in_npy = np.random.uniform(size=in_shape).astype(np.float32)
        in_npy_map = np.sqrt(np.exp(in_npy)).astype(np.float32)
        if type == "sum":
            out_npy = in_npy_map.sum(axis=axis, keepdims=keepdims)
        elif type == "max":
            out_npy = in_npy_map.max(axis=axis, keepdims=keepdims)
        elif type == "min":
            out_npy = in_npy_map.min(axis=axis, keepdims=keepdims)
        elif type == "argmax":
            out_npy = _my_npy_argmax(in_npy_map, axis=axis, keepdims=keepdims)
        elif type == "argmin":
            out_npy = _my_npy_argmin(in_npy_map, axis=axis, keepdims=keepdims)
        else:
            raise NotImplementedError
        data_tvm = tvm.nd.array(in_npy, ctx=ctx)
        out_tvm = tvm.nd.empty(shape=out_npy.shape, ctx=ctx, dtype=out_dtype)
        for _ in range(1):
            foo(data_tvm, out_tvm)
        if type == "argmax" or type == "argmin":
            out_tvm_indices = out_tvm.asnumpy()
            if keepdims:
                out_tvm_indices = np.take(out_tvm_indices, indices=0, axis=axis)
            if axis is None:
                out_tvm_val = in_npy_map.ravel()[out_tvm_indices]
            else:
                other_indices = tuple(np.indices(in_shape[0:axis] + in_shape[(axis+1):]))
                sel_indices = other_indices[0:axis] + (out_tvm_indices,) + other_indices[axis:]
                out_tvm_val = in_npy_map[sel_indices]
            if type == "argmax":
                np.testing.assert_allclose(out_tvm_val, in_npy_map.max(axis=axis), 1E-3, 1E-3)
            elif type == "argmin":
                np.testing.assert_allclose(out_tvm_val, in_npy_map.min(axis=axis), 1E-3, 1E-3)
        else:
            np.testing.assert_allclose(out_tvm.asnumpy(), out_npy, 1E-3, 1E-3)
    for device in ["cuda", "opencl", "metal", "llvm", "rocm", "vulkan"]:
        check_device(device)
Ejemplo n.º 5
0
new_h = h
for i in range(num_timesteps):
    inp = topi.concatenate([xs[i], new_h], 1)
    g = topi.tanh(topi.matmul(inp, weights[0]) + weights[1])
    j = topi.sigmoid(topi.matmul(inp, weights[2]) + weights[3])
    f = topi.sigmoid(topi.matmul(inp, weights[4]) + weights[5])
    o = topi.sigmoid(topi.matmul(inp, weights[6]) + weights[7])

    new_s = new_s * f + g * j
    new_h = topi.tanh(new_s) * o

logits = topi.matmul(new_h, weights[8]) + weights[9]

# compute accuracy
pred = topi.nn.softmax(logits)
correct_pred = topi.equal(topi.argmax(y, 1), topi.argmax(pred, 1))
accuracy = topi.sum(correct_pred.astype('float32')) / batch_size

# Define loss and optimizer
loss = topi.sum(-topi.sum(y *
                          topi.nn.log_softmax(logits), axis=1)) / batch_size

head = topi.full((1, ), 'float32', 1.0)
gradients = list(tvm.differentiate(topi.reshape(loss, (1, )), weights, head))
new_weights = [w - lr * g for (w, g) in zip(weights, gradients)]

# Define model
sched = tvm.create_schedule([loss.op, accuracy.op] +
                            [x.op for x in new_weights])
parallel_schedule(sched)
train_model = tvm.build(sched,
Ejemplo n.º 6
0
def demo_argmax():
    x = np.array([[0, 0, 1, 0, 0], [3, 1, 2, 0, 0], [0, 0, 0, 1,
                                                     2]]).astype(np.float32)
    return with_tvm(0, 1, [x], lambda a: topi.argmax(a, axis=1))
Ejemplo n.º 7
0
def demo_conv2d():
    lrate = 0.1
    nbatches = 100  # batches to train

    num_classes = 10
    batch_size = 10
    img_h = 28
    img_w = 28
    img_c = 1

    f1_c = 4
    f2_c = 5
    f3_units = 16

    x = tvm.placeholder((batch_size, img_h, img_w, img_c), name='x')
    y = tvm.placeholder((batch_size, num_classes), name='y')

    print('Block1')
    w1 = tvm.placeholder((3, 3, img_c, f1_c), name='w1')
    b1 = tvm.placeholder((f1_c, ), name='b1')
    t = topi.nn.conv2d(x, w1, 1, 0, layout='NHWC', out_dtype=tvm.float32)
    t = t + topi.broadcast_to(b1, (batch_size, 1, 1, f1_c))
    print('Block1: after-biasing shape is', get_shape(t))
    t = topi.nn.pool(t, [2, 2], [2, 2], [0, 0, 0, 0], 'max', layout='NHWC')
    print('Block1: after-pooling shape is', get_shape(t))
    t = topi.nn.relu(t)
    print('Block1: after-relu shape is', get_shape(t))

    print('Block2')
    w2 = tvm.placeholder((3, 3, f1_c, f2_c), name='w2')
    b2 = tvm.placeholder((f2_c, ), name='b2')
    t = topi.nn.conv2d(t, w2, 1, 0, layout='NHWC', out_dtype=tvm.float32)
    t = t + topi.broadcast_to(b2, (batch_size, 1, 1, f2_c))
    print('Block2: after-biasing shape is', get_shape(t))
    t = topi.nn.pool(t, [2, 2], [2, 2], [0, 0, 0, 0], 'max', layout='NHWC')
    print('Block2: after-pooling shape is', get_shape(t))
    t = topi.nn.relu(t)
    print('Block2: after-relu shape is', get_shape(t))
    t = topi.nn.flatten(t)
    print('Block2: after-flattern shape is', get_shape(t))

    print('Block3')
    w3 = tvm.placeholder((f3_units, get_shape(t)[1]))
    b3 = tvm.placeholder((f3_units, ))
    t = topi.nn.dense(t, w3, b3)
    print('Block3: after-dense shape is', get_shape(t))

    print('Block4')
    w4 = tvm.placeholder((num_classes, get_shape(t)[1]))
    b4 = tvm.placeholder((num_classes, ))
    t = topi.nn.dense(t, w4, b4)
    print('Block4: after-dense shape is', get_shape(t))
    t = topi.nn.relu(t)

    p = topi.argmax(t, axis=1)
    # TODO: check the correctnesss of the log_softmax expression
    # TODO: figure out the difference between it and standard cross-entropy loss
    l = -topi.sum(y * topi.nn.log_softmax(t)) / batch_size

    print('Block4: loss shape is', get_shape(l))

    ones = topi.full_like(l, 1.0)
    #[dl_dw1,dl_db1,dl_dw2,dl_db2,dl_dw3,dl_db3,dl_dw4,dl_db4]
    params = [w1, b1, w2, b2, w3, b3, w4, b4]

    dl = list(tvm.ir_pass.JacobianRecursive(l, params, ones))
    assert len(params) == len(dl)
    print('dl_dw1 weight is', get_shape(params[0]))

    sdl = tvm.create_schedule([p.op for p in [x, y, l] + params + dl])
    mdl = tvm.build(sdl, [x, y, l] + params + dl)
    print('Train+Inference module', mdl)

    # sl = tvm.create_schedule([l.op])
    # ml = tvm.build(sdl, [x,y] + params + [l])
    # print('Inference module',ml)

    state = {}
    for p in params:
        state.update({
            p:
            tvm.nd.array(
                np.random.uniform(-1.0, 1.0,
                                  size=get_shape(p)).astype(np.float32))
        })

    grads = {}
    for p, g in zip(params, dl):
        grads.update({p: tvm.nd.empty(get_shape(g))})

    for ib in range(nbatches):
        b = range(ib * batch_size, (ib + 1) * batch_size)
        tx = tvm.nd.array(mnist_img(b))
        ty = tvm.nd.array(mnist_cls_oh(b))
        tl = tvm.nd.empty(shape=(), dtype=tvm.float32)

        print('Entering')
        mdl(*([tx, ty, tl] + list(state.values()) + list(grads.values())))
        print('Done', 'loss', tl.asnumpy())

        state2 = {}
        for p in params:
            state2.update({
                p:
                tvm.nd.array(state[p].asnumpy() - lrate * grads[p].asnumpy())
            })

        state = state2