Ejemplo n.º 1
0
def test_compile():
    x = sym.Variable("x")
    y = sym.Variable("y")
    z = sym.exp(y + x)
    shape = (10, 128)
    dtype = tvm.float32
    shape_dict = {"x": shape, "y": shape}

    def verify(graph, lib):
        m = graph_runtime.create(graph, lib, tvm.cpu(0))
        # get member functions
        set_input, run, get_output = m["set_input"], m["run"], m["get_output"]
        na = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
        nb = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
        # set inputs
        set_input("x", na)
        set_input("y", nb)
        # execute
        run()
        # get outputs
        out = tvm.nd.empty(shape, dtype)
        get_output(0, out)
        np.testing.assert_allclose(out.asnumpy(),
                                   np.exp(na.asnumpy() + nb.asnumpy()))

    graph, lib, _ = nnvm.compiler.build(z, "llvm", shape_dict)
    assert graph.index.num_nodes == 3
    verify(graph, lib)

    with nnvm.compiler.build_config(opt_level=0):
        graph, lib, _ = nnvm.compiler.build(z, "llvm", shape_dict)
        # print(graph.ir())
        assert graph.index.num_nodes == 4
        verify(graph, lib)
Ejemplo n.º 2
0
def test_compile_cache():
    x = sym.Variable("x")
    y = sym.Variable("y")
    z = sym.exp(y + x)
    shape = (10, 1)
    dtype = tvm.float32
    shape_dict = {"x": shape, "y": shape}

    def verify(graph, lib):
        m = graph_runtime.create(graph, lib, tvm.cpu(0))
        # get member functions
        na = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
        nb = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
        m.run(x=na, y=nb)
        # get outputs
        out = m.get_output(0, tvm.nd.empty(shape, dtype))
        tvm.testing.assert_allclose(out.asnumpy(),
                                    np.exp(na.asnumpy() + nb.asnumpy()))

    engine = nnvm.compiler.engine
    graph, lib, _ = nnvm.compiler.build(z, "llvm", shape_dict)
    inputs = [tvm.placeholder((10, )), tvm.placeholder((10, ))]

    gkey = nnvm.compiler.graph_key(nnvm.graph.create(z), inputs, "llvm")
    gkey2 = nnvm.compiler.graph_key(nnvm.graph.create(z), inputs + inputs,
                                    "llvm")
    gf = engine[gkey]
    assert gf is not None
    assert engine[gkey2] is None
    graph, lib, _ = nnvm.compiler.build(z, "llvm", shape_dict)
    assert graph.index.num_nodes == 3
    verify(graph, lib)
    # Test various set external cache
    engine.clear_cache()
    engine[gkey] = gf
Ejemplo n.º 3
0
def test_compile():
    x = sym.Variable("x")
    y = sym.Variable("y")
    z = sym.exp(y + x)
    shape = (10, 128)
    dtype = tvm.float32
    shape_dict = {"x": shape, "y": shape}
    def verify(graph, lib):
        m = graph_runtime.create(graph, lib, tvm.cpu(0))
        # get member functions
        set_input, run, get_output = m["set_input"], m["run"], m["get_output"]
        na = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
        nb = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
        # set inputs
        set_input("x", na)
        set_input("y", nb)
        # execute
        run()
        # get outputs
        out = tvm.nd.empty(shape, dtype)
        get_output(0, out)
        tvm.testing.assert_allclose(
            out.asnumpy(), np.exp(na.asnumpy() + nb.asnumpy()))

    graph, lib, _ = nnvm.compiler.build(z, "llvm", shape_dict)
    assert graph.index.num_nodes == 3
    verify(graph, lib)

    with nnvm.compiler.build_config(opt_level=0):
        graph, lib, _ = nnvm.compiler.build(z, "llvm", shape_dict)
        # print(graph.ir())
        assert graph.index.num_nodes == 4
        verify(graph, lib)
Ejemplo n.º 4
0
def test_compile_cache():
    x = sym.Variable("x")
    y = sym.Variable("y")
    z = sym.exp(y + x)
    shape = (10, 1)
    dtype = tvm.float32
    shape_dict = {"x": shape, "y": shape}
    def verify(graph, lib):
        m = graph_runtime.create(graph, lib, tvm.cpu(0))
        # get member functions
        na = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
        nb = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
        m.run(x=na, y=nb)
        # get outputs
        out = m.get_output(0, tvm.nd.empty(shape, dtype))
        tvm.testing.assert_allclose(
            out.asnumpy(), np.exp(na.asnumpy() + nb.asnumpy()))

    engine = nnvm.compiler.engine
    graph, lib, _ = nnvm.compiler.build(z, "llvm", shape_dict)
    inputs = [tvm.placeholder((10,)), tvm.placeholder((10,))]

    gkey = nnvm.compiler.graph_key(nnvm.graph.create(z), inputs, "llvm")
    gkey2 = nnvm.compiler.graph_key(nnvm.graph.create(z), inputs + inputs, "llvm")
    gf = engine[gkey]
    assert gf is not None
    assert engine[gkey2] is None
    graph, lib, _ = nnvm.compiler.build(z, "llvm", shape_dict)
    assert graph.index.num_nodes == 3
    verify(graph, lib)
    # Test various set external cache
    engine.clear_cache()
    engine[gkey] = gf
Ejemplo n.º 5
0
def test_copy():
    x = sym.Variable('x')
    z = sym.Variable('z')
    y = sym.exp(sym.elemwise_add(x, x, name='add', gpu=2),
                name='exp',
                gpu=1,
                attr={"kk": "1"})
    assert y.__copy__().debug_str() == y.debug_str()
Ejemplo n.º 6
0
def test_unary():
    x = sym.Variable('x')
    x = sym.exp(x)
    x = sym.log(x)
    x = sym.sigmoid(x)
    x = sym.tanh(x)
    x = sym.relu(x)
    assert x.list_input_names() == ['x']
Ejemplo n.º 7
0
def test_unary():
    x = sym.Variable('x')
    x = sym.exp(x)
    x = sym.log(x)
    x = sym.sigmoid(x)
    x = sym.tanh(x)
    x = sym.relu(x)
    assert x.list_input_names() == ['x']
Ejemplo n.º 8
0
def test_graph_gradient():
    x0 = sym.Variable('x0')
    x1 = sym.Variable('x1')
    yg = sym.Variable('yg')
    y = sym.exp(sym.mul(x0, x1))
    grad_graph = grad(y, [x0], yg)
    print("Original graph")
    print(y.debug_str())
    print("Gradient  graph")
    print(grad_graph.symbol.debug_str())
Ejemplo n.º 9
0
def test_run():
    x = sym.Variable("x")
    y = sym.Variable("y")
    z = sym.exp(y + x)
    shape = (10, 10)
    dtype = tvm.float32
    nx = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
    ny = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
    res = _run_graph(z, {"x": nx, "y": ny})
    tvm.testing.assert_allclose(
        res[0].asnumpy(), np.exp(nx.asnumpy() + ny.asnumpy()))
Ejemplo n.º 10
0
def test_run():
    x = sym.Variable("x")
    y = sym.Variable("y")
    z = sym.exp(y + x)
    shape = (10, 10)
    dtype = tvm.float32
    nx = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
    ny = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
    res = _run_graph(z, {"x": nx, "y": ny})
    np.testing.assert_allclose(res[0].asnumpy(),
                               np.exp(nx.asnumpy() + ny.asnumpy()))
Ejemplo n.º 11
0
def test_compose():
    x = sym.Variable('x')
    z = sym.Variable('z')
    y = sym.exp(sym.elemwise_add(x, x, name='add', gpu=2),
                name='exp', gpu=1, attr={"kk": "1"})

    assert y.list_input_names() == ['x']
    assert y.list_output_names() == ["exp_output"]
    assert y.list_attr()['gpu'] == '1'
    z = y.get_internals()
    assert z['add_output'].list_output_names() == ['add_output']
    assert y.list_attr(recursive=True)['add$gpu'] == '2'
Ejemplo n.º 12
0
def test_exp():
    x = sym.Variable("x")
    y = sym.exp(x)

    def forward(x):
        return np.exp(x)

    def backward(head_grads, x):
        return [np.exp(x) * head_grads]

    shape = {'x': (1, 3, 32, 32)}
    check_function(y, forward, backward, shape=shape)
def test_exp():
    x = sym.Variable("x")
    y = sym.exp(x)

    def forward(x):
        return np.exp(x)

    def backward(head_grads, x):
        return [np.exp(x) * head_grads]

    shape = {'x': (1, 3, 32, 32)}
    check_function(y, forward, backward, shape=shape)
Ejemplo n.º 14
0
def test_compose():
    x = sym.Variable('x')
    z = sym.Variable('z')
    y = sym.exp(sym.add(x, x, name='add', gpu=2),
                name='exp', gpu=1, attr={"kk": "1"})

    assert y.list_inputs() == ['x']
    assert y.list_outputs() == ["exp_output"]
    assert y.list_attr()['gpu'] == '1'
    z = y.get_internals()
    assert z['add_output'].list_outputs() == ['add_output']
    assert y.list_attr(recursive=True)['add_gpu'] == '2'
Ejemplo n.º 15
0
def test_exp():
    x = sym.Variable("x")
    y = sym.exp(x)

    def forward(x):
        return np.exp(x)

    def backward(x):
        return np.exp(x)

    dtype = "float32"
    dshape = (1, 3, 32, 32)
    inputs = {'x': (dshape, x)}
    helper(y, inputs, dtype, forward, backward)
Ejemplo n.º 16
0
    def test_fusible_network():
        """ The network is as following:
                    data
                      |
                     exp
                    /   \
                 sqrt   log
                    \   /
                    b_add
                      |
                    tanh
        """
        batch_size = 1
        data_shape = (batch_size, 3, 224, 224)
        data = symbol.Variable('data', shape=data_shape, dtype="float32")
        shape_dict = {"data": data_shape}
        params = {}
        params["data"] = np.random.uniform(-1, 1,
                                           size=data_shape).astype("float32")

        exp = symbol.exp(data, name='exp')
        sqrt = symbol.sqrt(exp, name='sqrt')
        log = symbol.log(exp, name='log')
        ret = sqrt + log
        ret = symbol.tanh(ret)

        # Fuse log and broadcast_add.
        check_annotated_graph(ret, ['exp', 'log', 'broadcast_add'], 8,
                              shape_dict,
                              params)

        # Fuse log, broadcast_add, and tanh
        check_annotated_graph(ret, ['exp', 'sqrt', 'none', 'elemwise_add'], 6,
                              shape_dict, params)

        # No operator will be fused.
        check_annotated_graph(ret, ['log', 'sqrt', 'none', 'tanh'], 11,
                              shape_dict, params)

        # All operators will be fused.
        check_annotated_graph(ret, [''], 2, shape_dict, params)

        # All operators will be fused since all of them are annotated to the
        # same device.
        check_annotated_graph(ret,
                              ['exp', 'sqrt', 'broadcast_add', 'none', 'log',
                               'tanh'], 2, shape_dict, params)

        # Fuse exp, sqrt, log, and boradcast_add
        check_annotated_graph(ret, ['tanh'], 4, shape_dict, params)
Ejemplo n.º 17
0
def test_exp():
    x = sym.Variable("x")
    y = sym.exp(x)
    dtype = "float32"
    dshape = (1, 3, 32, 32)
    oshape = dshape
    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape})
        m = graph_runtime.create(graph, lib, ctx)
        data = np.random.uniform(size=dshape).astype(dtype)
        m.run(x=data)
        out = m.get_output(0, tvm.nd.empty(oshape, dtype))
        y_np = np.exp(data)
        np.testing.assert_allclose(out.asnumpy(), y_np, atol=1e-5, rtol=1e-5)
Ejemplo n.º 18
0
def test_exp():
    x = sym.Variable("x")
    y = sym.exp(x)

    def forward(x):
        return np.exp(x)

    def backward(x):
        return np.exp(x)

    dtype = "float32"
    dshape = (1, 3, 32, 32)
    inputs = {'x': (dshape, x)}
    helper(y, inputs, dtype, forward, backward)
Ejemplo n.º 19
0
def test_exp():
    x = sym.Variable("x")
    y = sym.exp(x)

    def forward(x):
        return np.exp(x)

    def backward(head_grads, x):
        return [np.exp(x) * head_grads]

    dtype = "float32"
    dshape = (1, 3, 32, 32)
    inputs = [('x', dshape, x)]
    helper(y, inputs, dtype, forward, backward)
Ejemplo n.º 20
0
def test_place_device():
    x = sym.Variable('x', device_group="stage1")
    y = sym.add(x, x, name='add1')
    y = sym.cast(y, dtype=1, name="cast1")
    z = sym.add(y, y, device_group="stage2", name="add2")
    z = sym.add(z, sym.exp(y, device_group="stage2"),  name="add3")
    g = graph.create(z)
    g._set_json_attr("device_group_attr_key", "device_group")
    g._set_json_attr("device_assign_map", {"stage1": 0, "stage2" : 1}, "dict_str_int")
    g._set_json_attr("device_copy_op", "cross_device_copy")
    g = g.apply("PlaceDevice")
    jgraph = json.loads(g.apply('SaveJSON').json_attr('json'))
    jnodes = jgraph['nodes']
    jnode_row_ptr = jgraph['node_row_ptr']
    nindex = {n['name']: i for i, n in enumerate(jnodes)}
    assert g.json_attr('device')[jnode_row_ptr[nindex["add2"]]] == 1
    assert g.json_attr('device')[jnode_row_ptr[nindex["add3"]]] == 1
    assert g.json_attr('device')[jnode_row_ptr[nindex["cast1"]]] == 0
Ejemplo n.º 21
0
def test_place_device():
    x = sym.Variable('x', device_group="stage1")
    y = sym.add(x, x, name='add1')
    y = sym.cast(y, dtype=1, name="cast1")
    z = sym.add(y, y, device_group="stage2", name="add2")
    z = sym.add(z, sym.exp(y, device_group="stage2"),  name="add3")
    g = graph.create(z)
    g._set_json_attr("device_group_attr_key", "device_group")
    g._set_json_attr("device_assign_map", {"stage1": 0, "stage2" : 1}, "dict_str_int")
    g._set_json_attr("device_copy_op", "cross_device_copy")
    g = g.apply("PlaceDevice")
    jgraph = json.loads(g.apply('SaveJSON').json_attr('json'))
    jnodes = jgraph['nodes']
    jnode_row_ptr = jgraph['node_row_ptr']
    nindex = {n['name']: i for i, n in enumerate(jnodes)}
    assert g.json_attr('device')[jnode_row_ptr[nindex["add2"]]] == 1
    assert g.json_attr('device')[jnode_row_ptr[nindex["add3"]]] == 1
    assert g.json_attr('device')[jnode_row_ptr[nindex["cast1"]]] == 0
Ejemplo n.º 22
0
def test_rpc_executor():
    host = "localhost"
    port = 9021
    server = rpc.Server(host, port, use_popen=True)
    time.sleep(1)
    x = sym.Variable("x")
    y = sym.Variable("y")
    z = sym.exp(y + x)
    shape = (10, 128)
    dtype = tvm.float32
    shape_dict = {"x": shape, "y": shape}
    tmp = util.tempdir()
    lib_name = tmp.relpath("net.o")

    graph, lib, _ = nnvm.compiler.build(z, "llvm", shape_dict)
    # save module
    lib.save(lib_name)
    remote = rpc.connect(host, port)
    remote.upload(lib_name)
    ctx = remote.cpu(0)
    # load remote
    rlib = remote.load_module("net.o")

    # Create remotemodule
    m = graph_runtime.create(graph, rlib, remote.cpu(0))
    # get member functions
    set_input, run, get_output = m["set_input"], m["run"], m["get_output"]
    na = tvm.nd.array(np.ones(shape).astype(dtype), ctx)
    nb = tvm.nd.array(np.ones(shape).astype(dtype), ctx)
    # set inputs
    set_input("x", na)
    set_input("y", nb)
    # execute
    run()
    # get outputs
    out = tvm.nd.empty(shape, dtype, ctx)
    get_output(0, out)
    tvm.testing.assert_allclose(out.asnumpy(),
                                np.exp(na.asnumpy() + nb.asnumpy()))
    server.terminate()
Ejemplo n.º 23
0
def test_rpc_executor():
    host = "localhost"
    port = 9021
    server = rpc.Server(host, port, use_popen=True)
    time.sleep(1)
    x = sym.Variable("x")
    y = sym.Variable("y")
    z = sym.exp(y + x)
    shape = (10, 128)
    dtype = tvm.float32
    shape_dict = {"x": shape, "y": shape}
    tmp = util.tempdir()
    lib_name  = tmp.relpath("net.o")

    graph, lib, _ = nnvm.compiler.build(z, "llvm", shape_dict)
    # save module
    lib.save(lib_name)
    remote = rpc.connect(host, port)
    remote.upload(lib_name)
    ctx = remote.cpu(0)
    # load remote
    rlib = remote.load_module("net.o")

    # Create remotemodule
    m = graph_runtime.create(graph, rlib, remote.cpu(0))
    # get member functions
    set_input, run, get_output = m["set_input"], m["run"], m["get_output"]
    na = tvm.nd.array(np.ones(shape).astype(dtype), ctx)
    nb = tvm.nd.array(np.ones(shape).astype(dtype), ctx)
    # set inputs
    set_input("x", na)
    set_input("y", nb)
    # execute
    run()
    # get outputs
    out = tvm.nd.empty(shape, dtype, ctx)
    get_output(0, out)
    tvm.testing.assert_allclose(
        out.asnumpy(), np.exp(na.asnumpy() + nb.asnumpy()))
    server.terminate()
Ejemplo n.º 24
0
def test_fusible_network(device, target):
    R""" The network is as following:
                data
                  |
                 exp
                /   \
             sqrt   log
                \   /
                b_add
                  |
                tanh
    """
    if not tvm.module.enabled(device):
        print("Skip test because %s is not enabled." % device)
        return

    batch_size = 1
    data_shape = (batch_size, 3, 224, 224)
    data = symbol.Variable('data', shape=data_shape, dtype="float32")
    shape_dict = {"data": data_shape}
    params = {}
    params["data"] = np.random.uniform(-1, 1,
                                       size=data_shape).astype("float32")

    exp = symbol.exp(data, name='exp')
    sqrt = symbol.sqrt(exp, name='sqrt')
    log = symbol.log(exp, name='log')
    ret = sqrt + log
    ret = symbol.tanh(ret)

    fallback_device = tvm.context("cpu")
    target = {"cpu": "llvm", device: target}

    # Fuse log and broadcast_add.
    op_name_device = {
        "exp": "cpu",
        "log": "cpu",
        "broadcast_add": "cpu",
        "sqrt": device,
        "elemwise_add": device,
        "tanh": device
    }
    check_annotated_graph(ret, target, op_name_device, 8, fallback_device,
                          shape_dict, params)

    # Fuse log, broadcast_add, and tanh
    op_name_device = {
        "exp": "cpu",
        "log": device,
        "broadcast_add": device,
        "sqrt": "cpu",
        "elemwise_add": "cpu",
        "tanh": device
    }
    check_annotated_graph(ret, target, op_name_device, 6, fallback_device,
                          shape_dict, params)

    # No operator will be fused.
    op_name_device = {
        "exp": device,
        "log": "cpu",
        "broadcast_add": device,
        "sqrt": "cpu",
        "elemwise_add": device,
        "tanh": "cpu"
    }
    check_annotated_graph(ret, target, op_name_device, 11, fallback_device,
                          shape_dict, params)

    # All operators will be fused.
    op_name_device = {
        "exp": device,
        "log": device,
        "broadcast_add": device,
        "sqrt": device,
        "elemwise_add": device,
        "tanh": device
    }
    check_annotated_graph(ret, target, op_name_device, 2, fallback_device,
                          shape_dict, params)

    # All operators will be fused since all of them are annotated to the
    # same device.
    op_name_device = {
        "exp": "cpu",
        "log": "cpu",
        "broadcast_add": "cpu",
        "sqrt": "cpu",
        "elemwise_add": "cpu",
        "tanh": "cpu"
    }
    check_annotated_graph(ret, target, op_name_device, 2, fallback_device,
                          shape_dict, params)

    # Fuse exp, sqrt, log, and boradcast_add
    op_name_device = {
        "exp": device,
        "log": device,
        "broadcast_add": device,
        "sqrt": device,
        "elemwise_add": device,
        "tanh": "cpu"
    }
    check_annotated_graph(ret, target, op_name_device, 4, fallback_device,
                          shape_dict, params)
Ejemplo n.º 25
0
def test_flatten():
    x = sym.Variable("x", shape=(10, 20, 10))
    y = sym.flatten(x) * 2
    y = sym.exp(y, name="y")
    sdict = infer_shape(y)
    assert(sdict["y"][0] == [10, 200])
Ejemplo n.º 26
0
def test_copy():
    x = sym.Variable('x')
    z = sym.Variable('z')
    y = sym.exp(sym.elemwise_add(x, x, name='add', gpu=2),
                name='exp', gpu=1, attr={"kk": "1"})
    assert y.__copy__().debug_str() == y.debug_str()
Ejemplo n.º 27
0
def test_op_name():
    x = sym.Variable('x')
    y = sym.exp(x)
    op_name = y.attr("op_name")
    op_func = sym.__dict__[op_name]
    z = op_func(x)
Ejemplo n.º 28
0
def test_op_name():
    x = sym.Variable('x')
    y = sym.exp(x)
    op_name = y.attr("op_name")
    op_func = sym.__dict__[op_name]
    z = op_func(x)
Ejemplo n.º 29
0
 def elu(data):
     return -0.5 * sym.relu(1 - sym.exp(data)) + sym.relu(data)
Ejemplo n.º 30
0
def test_flatten():
    x = sym.Variable("x", shape=(10, 20, 10))
    y = sym.flatten(x) * 2
    y = sym.exp(y, name="y")
    sdict = infer_shape(y)
    assert(sdict["y"][0] == [10, 200])
Ejemplo n.º 31
0
 def elu(data):
     return -0.5 * sym.relu(1 - sym.exp(data)) + sym.relu(data)