Beispiel #1
0
    def compile_run_graph(device, target):
        if not tvm.module.enabled(device):
            print("Skip test because %s is not enabled." % device)
            return

        out_channels = 16
        data1 = symbol.Variable(name="data1")
        data2 = symbol.Variable(name="data2")
        simple_net1 = symbol.conv2d(data=data1, kernel_size=(3, 3),
                                    channels=out_channels, padding=(1, 1),
                                    use_bias=True)

        simple_net2 = symbol.conv2d(data=data2, kernel_size=(3, 3),
                                    channels=out_channels, padding=(1, 1),
                                    use_bias=True)
        ret = symbol.elemwise_add(simple_net1, simple_net2)
        ret = symbol.conv2d(ret, kernel_size=(3, 3),
                            channels=out_channels, padding=(1, 1),
                            use_bias=True)

        batch_size = 1
        data_shape = (batch_size, 3, 224, 224)
        shape_dict = {"data1": data_shape, "data2": data_shape}
        params = {}
        params["data1"] = np.random.uniform(-1, 1,
                                            size=data_shape).astype("float32")
        params["data2"] = np.random.uniform(-1, 1,
                                            size=data_shape).astype("float32")
        op_name_device = {"elemwise_add": "cpu", "conv2d": device}
        fallback_device = tvm.context("cpu")
        target = {"cpu": "llvm", device: target}
        # No op will be fused. 3 additional device copy nodes are required.
        check_annotated_graph(ret, target, op_name_device, 15, fallback_device,
                              shape_dict, params)
Beispiel #2
0
def sample():
    x = sym.Variable("x")
    y = sym.Variable("y")
    z1 = sym.elemwise_add(x, sym.sqrt(y))
    z2 = sym.log(x)
    gradient = graph_util.gradients([z1, z2], [x, y])
    print(gradient)
Beispiel #3
0
def test_concatenate_conv2d():
    ch = 3
    size = 8
    data = sym.Variable(name="data")
    concat = sym.concatenate(data, data, axis=1)
    conv = sym.conv2d(data=concat, kernel_size=(1,1), channels=ch*2, use_bias=False, name="conv")
    net = sym.elemwise_add(concat, conv)

    dtype="float32"
    dshape = (1, ch, size, size)
    kshape = (ch*2, ch*2, 1, 1)
    oshape = (1, ch*2, size, size)
    shape_dict = {"data": dshape}

    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(net, target, shape_dict)
        # data, conv weight, conv op, concat
        assert graph.index.num_nodes == 4

        data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
        kernel = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))
        m = graph_runtime.create(graph, lib, ctx)
        m.run(data=data, conv_weight=kernel)
        # get output
        out = m.get_output(0, tvm.nd.empty(oshape, dtype))

        concat = np.concatenate((data.asnumpy(), data.asnumpy()), axis=1)
        conv = topi.testing.conv2d_nchw_python(
            concat, kernel.asnumpy(), (1,1), 'SAME')
        ref = concat + conv
        tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5)
Beispiel #4
0
    def test_conv_network():
        """ The network is as following:
                data1       data2
                  |           |
                conv2d      conv2d
                   \         /
                  elemwise_add
                        |
                      conv2d
        """
        out_channels = 16
        data1 = symbol.Variable(name="data1")
        data2 = symbol.Variable(name="data2")
        simple_net1 = symbol.conv2d(data=data1, kernel_size=(3, 3),
                                    channels=out_channels, padding=(1, 1),
                                    use_bias=True)

        simple_net2 = symbol.conv2d(data=data2, kernel_size=(3, 3),
                                    channels=out_channels, padding=(1, 1),
                                    use_bias=True)
        ret = symbol.elemwise_add(simple_net1, simple_net2)
        ret = symbol.conv2d(ret, kernel_size=(3, 3),
                            channels=out_channels, padding=(1, 1),
                            use_bias=True)

        batch_size = 1
        data_shape = (batch_size, 3, 224, 224)
        shape_dict = {"data1": data_shape, "data2": data_shape}
        params = {}
        params["data1"] = np.random.uniform(-1, 1,
                                            size=data_shape).astype("float32")
        params["data2"] = np.random.uniform(-1, 1,
                                            size=data_shape).astype("float32")
        # No op will be fused. 3 additional device copy nodes are required.
        check_annotated_graph(ret, ["elemwise_add"], 15, shape_dict, params)
Beispiel #5
0
def test_concatenate_conv2d():
    ch = 3
    size = 8
    data = sym.Variable(name="data")
    concat = sym.concatenate(data, data, axis=1)
    conv = sym.conv2d(data=concat,
                      kernel_size=(1, 1),
                      channels=ch * 2,
                      use_bias=False,
                      name="conv")
    net = sym.elemwise_add(concat, conv)

    dtype = "float32"
    dshape = (1, ch, size, size)
    kshape = (ch * 2, ch * 2, 1, 1)
    oshape = (1, ch * 2, size, size)
    shape_dict = {"data": dshape}

    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(net, target, shape_dict)
        # data, conv weight, conv op, concat
        assert graph.index.num_nodes == 4

        data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
        kernel = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))
        m = graph_runtime.create(graph, lib, ctx)
        m.run(data=data, conv_weight=kernel)
        # get output
        out = m.get_output(0, tvm.nd.empty(oshape, dtype))

        concat = np.concatenate((data.asnumpy(), data.asnumpy()), axis=1)
        conv = topi.testing.conv2d_nchw_python(concat, kernel.asnumpy(),
                                               (1, 1), 'SAME')
        ref = concat + conv
        np.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5)
Beispiel #6
0
def sample():
    x = sym.Variable("x")
    y = sym.Variable("y")
    z1 = sym.elemwise_add(x, sym.sqrt(y))
    z2 = sym.log(x)
    gradient = graph_util.gradients([z1, z2], [x, y])
    print(gradient)
Beispiel #7
0
def test_copy():
    x = sym.Variable('x')
    z = sym.Variable('z')
    y = sym.exp(sym.elemwise_add(x, x, name='add', gpu=2),
                name='exp',
                gpu=1,
                attr={"kk": "1"})
    assert y.__copy__().debug_str() == y.debug_str()
Beispiel #8
0
def test_plan_memory():
    x = sym.Variable('x', shape=(4, 2))
    x2 = sym.elemwise_add(x, x, name='addk')
    y = sym.flatten(x2, name="reshapek")
    y = sym.elemwise_add(y, x2, name="add2")
    y = sym.elemwise_add(y, y)
    g = graph.create(y)
    g._set_json_attr("shape_attr_key", "shape")
    g = g.apply(["InferShape", "InferType", "PlanMemory"])
    jgraph = json.loads(g.apply('SaveJSON').json_attr('json'))
    jnodes = jgraph['nodes']
    jnode_row_ptr = jgraph['node_row_ptr']
    storage_id = g.json_attr('storage_id')
    nindex = {n['name']: i for i, n in enumerate(jnodes)}
    assert (storage_id[jnode_row_ptr[nindex["addk"]]] !=
            storage_id[jnode_row_ptr[nindex["reshapek"]]])
    assert (storage_id[jnode_row_ptr[nindex["add2"]]] == storage_id[
        jnode_row_ptr[nindex["reshapek"]]])
Beispiel #9
0
def test_plan_memory():
    x = sym.Variable('x', shape=(4, 2))
    x2 = sym.elemwise_add(x, x, name='addk')
    y = sym.flatten(x2, name="reshapek")
    y = sym.elemwise_add(y, x2, name="add2")
    y = sym.elemwise_add(y, y)
    g = graph.create(y)
    g._set_json_attr("shape_attr_key", "shape")
    g = g.apply(["InferShape", "InferType", "PlanMemory"])
    jgraph = json.loads(g.apply('SaveJSON').json_attr('json'))
    jnodes = jgraph['nodes']
    jnode_row_ptr = jgraph['node_row_ptr']
    storage_id = g.json_attr('storage_id')
    nindex = {n['name']: i for i, n in enumerate(jnodes)}
    assert (storage_id[jnode_row_ptr[nindex["addk"]]] !=
            storage_id[jnode_row_ptr[nindex["reshapek"]]])
    assert (storage_id[jnode_row_ptr[nindex["add2"]]] ==
            storage_id[jnode_row_ptr[nindex["reshapek"]]])
Beispiel #10
0
def test_default_input():
    x = sym.Variable('x')
    y = sym.dense(data=x, units=30, name='fc', use_bias=False)
    assert y.list_input_names() == ['x', 'fc_weight']
    tname = [z.list_output_names()[0] for z in y.list_input_variables()]
    assert tname == y.list_input_names()
    try:
        z = sym.elemwise_add(x)
        assert False
    except NNVMError:
        pass
Beispiel #11
0
def test_default_input():
    x = sym.Variable('x')
    y = sym.dense(data=x, units=30, name='fc', use_bias=False)
    assert y.list_input_names() == ['x', 'fc_weight']
    tname = [z.list_output_names()[0] for z in y.list_input_variables()]
    assert tname == y.list_input_names()
    try:
        z = sym.elemwise_add(x)
        assert False
    except NNVMError:
        pass
Beispiel #12
0
def test_compose():
    x = sym.Variable('x')
    z = sym.Variable('z')
    y = sym.exp(sym.elemwise_add(x, x, name='add', gpu=2),
                name='exp', gpu=1, attr={"kk": "1"})

    assert y.list_input_names() == ['x']
    assert y.list_output_names() == ["exp_output"]
    assert y.list_attr()['gpu'] == '1'
    z = y.get_internals()
    assert z['add_output'].list_output_names() == ['add_output']
    assert y.list_attr(recursive=True)['add$gpu'] == '2'
Beispiel #13
0
 def simple_bn(x, gamma, beta, moving_mean, moving_var,
               axis=1, epsilon=1e-5, shape=None):
     # expect = (x - moving_mean) / sym.sqrt(moving_var + eps) * gamma + beta
     scale = sym.elemwise_mul(1 / sym.sqrt(moving_var + epsilon), gamma)
     shift = sym.elemwise_add(
         sym.elemwise_mul(sym.negative(moving_mean), scale), beta)
     # for 2D
     num_newaxis=len(shape) - axis - 1
     if num_newaxis:
         scale = sym.expand_dims(scale, axis=1, num_newaxis=num_newaxis)
         shift = sym.expand_dims(shift, axis=1, num_newaxis=num_newaxis)
     return x * scale + shift
Beispiel #14
0
def test_infer_type():
    x = sym.Variable('x', dtype=0)
    y = sym.elemwise_add(x, x, name='add1')
    y = sym.cast(y, dtype="float64", name="cast1")
    g = graph.create(y)
    g._set_json_attr("dtype_attr_key", "dtype")
    g = g.apply('InferType')
    jgraph = json.loads(g.apply('SaveJSON').json_attr('json'))
    jnodes = jgraph['nodes']
    jnode_row_ptr = jgraph['node_row_ptr']
    nindex = {n['name']: i for i, n in enumerate(jnodes)}
    assert g.json_attr('dtype')[jnode_row_ptr[nindex["cast1"]]] == 1
    assert g.json_attr('dtype')[jnode_row_ptr[nindex["add1"]]] == 0
Beispiel #15
0
def test_infer_shape():
    x = sym.Variable('x', shape=(2, 4, 2))
    y = sym.elemwise_add(x, x, name='add1')
    y = sym.flatten(y, name="flatten")
    g = graph.create(y)
    g._set_json_attr("shape_attr_key", "shape")
    g = g.apply('InferShape')
    jgraph = json.loads(g.apply('SaveJSON').json_attr('json'))
    jnodes = jgraph['nodes']
    jnode_row_ptr = jgraph['node_row_ptr']
    nindex = {n['name']: i for i, n in enumerate(jnodes)}
    assert g.json_attr('shape')[jnode_row_ptr[nindex["flatten"]]] == [2, 8]
    assert g.json_attr('shape')[jnode_row_ptr[nindex["add1"]]] == [2, 4, 2]
Beispiel #16
0
def test_infer_type():
    x = sym.Variable('x', dtype=0)
    y = sym.elemwise_add(x, x, name='add1')
    y = sym.cast(y, dtype="float64", name="cast1")
    g = graph.create(y)
    g._set_json_attr("dtype_attr_key", "dtype")
    g = g.apply('InferType')
    jgraph = json.loads(g.apply('SaveJSON').json_attr('json'))
    jnodes = jgraph['nodes']
    jnode_row_ptr = jgraph['node_row_ptr']
    nindex = {n['name']: i for i, n in enumerate(jnodes)}
    assert g.json_attr('dtype')[jnode_row_ptr[nindex["cast1"]]] == 1
    assert g.json_attr('dtype')[jnode_row_ptr[nindex["add1"]]] == 0
Beispiel #17
0
def test_infer_shape():
    x = sym.Variable('x', shape=(2, 4, 2))
    y = sym.elemwise_add(x, x, name='add1')
    y = sym.flatten(y, name="flatten")
    g = graph.create(y)
    g._set_json_attr("shape_attr_key", "shape")
    g = g.apply('InferShape')
    jgraph = json.loads(g.apply('SaveJSON').json_attr('json'))
    jnodes = jgraph['nodes']
    jnode_row_ptr = jgraph['node_row_ptr']
    nindex = {n['name']: i for i, n in enumerate(jnodes)}
    assert g.json_attr('shape')[jnode_row_ptr[nindex["flatten"]]] == [2, 8]
    assert g.json_attr('shape')[jnode_row_ptr[nindex["add1"]]] == [2, 4, 2]
Beispiel #18
0
def test_residual_block_layout_transform():
    ch = 16
    size = 32
    data = sym.Variable(name="data")
    conv1 = sym.conv2d(data=data,
                       kernel_size=(3, 3),
                       channels=ch,
                       padding=(1, 1),
                       use_bias=False,
                       name="conv1")
    layout_transform1 = sym.__layout_transform__(data=conv1,
                                                 src_layout="NCHW",
                                                 dst_layout="NCHW8c")
    layout_transform2 = sym.__layout_transform__(data=layout_transform1,
                                                 src_layout="NCHW8c",
                                                 dst_layout="NCHW")
    conv2 = sym.conv2d(data=conv1,
                       kernel_size=(3, 3),
                       channels=ch,
                       padding=(1, 1),
                       use_bias=False,
                       name="conv2")
    elemwise_sum = sym.elemwise_add(layout_transform2, conv2)
    out = sym.relu(elemwise_sum)

    dtype = "float32"
    dshape = (1, ch, size, size)
    kshape = (ch, ch, 3, 3)
    oshape = (1, ch, size, size)
    shape_dict = {"data": dshape}

    target = "llvm"  # only test on llvm since it involves NCHW8c layout
    ctx = tvm.context(target, 0)
    graph, lib, _ = nnvm.compiler.build(out, target, shape_dict)
    # data, conv1 weight, conv1, layout transform + elemwise add + relu, conv2 weight, conv2 op
    assert graph.index.num_nodes == 6

    data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
    kernel1 = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))
    kernel2 = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))
    m = graph_runtime.create(graph, lib, ctx)
    m.run(data=data, conv1_weight=kernel1, conv2_weight=kernel2)
    out = m.get_output(0, tvm.nd.empty(oshape, dtype))

    conv1 = topi.testing.conv2d_nchw_python(data.asnumpy(), kernel1.asnumpy(),
                                            (1, 1), 'SAME')
    conv2 = topi.testing.conv2d_nchw_python(conv1, kernel2.asnumpy(), (1, 1),
                                            'SAME')
    ref = np.maximum(conv1 + conv2, 0)
    np.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5)
Beispiel #19
0
def test_compose():
    x = sym.Variable('x')
    z = sym.Variable('z')
    y = sym.exp(sym.elemwise_add(x, x, name='add', gpu=2),
                name='exp',
                gpu=1,
                attr={"kk": "1"})

    assert y.list_input_names() == ['x']
    assert y.list_output_names() == ["exp_output"]
    assert y.list_attr()['gpu'] == '1'
    z = y.get_internals()
    assert z['add_output'].list_output_names() == ['add_output']
    assert y.list_attr(recursive=True)['add$gpu'] == '2'
Beispiel #20
0
def test_infer_shape_known_partial():
    x = sym.Variable('x')
    y = sym.elemwise_add(x, x, name='add1')
    y = sym.flatten(y, name="flatten1")
    g = graph.create(y)
    jgraph = json.loads(g.apply('SaveJSON').json_attr('json'))
    shape = [[2, 4, 2], [], []]
    g._set_json_attr("shape", shape, 'list_shape')
    g = g.apply("InferShape")
    jnodes = jgraph['nodes']
    jnode_row_ptr = jgraph['node_row_ptr']
    nindex = {n['name']: i for i, n in enumerate(jnodes)}
    assert g.json_attr('shape')[jnode_row_ptr[nindex["flatten1"]]] == [2, 8]
    assert g.json_attr('shape')[jnode_row_ptr[nindex["add1"]]] == [2, 4, 2]
Beispiel #21
0
def test_infer_shape_known_partial():
    x = sym.Variable('x')
    y = sym.elemwise_add(x, x, name='add1')
    y = sym.flatten(y, name="flatten1")
    g = graph.create(y)
    jgraph = json.loads(g.apply('SaveJSON').json_attr('json'))
    shape = [[2, 4, 2], [] , []]
    g._set_json_attr("shape", shape, 'list_shape')
    g = g.apply("InferShape")
    jnodes = jgraph['nodes']
    jnode_row_ptr = jgraph['node_row_ptr']
    nindex = {n['name']: i for i, n in enumerate(jnodes)}
    assert g.json_attr('shape')[jnode_row_ptr[nindex["flatten1"]]] == [2, 8]
    assert g.json_attr('shape')[jnode_row_ptr[nindex["add1"]]] == [2, 4, 2]
Beispiel #22
0
def test_gradient():
    x = sym.Variable("x")
    y = sym.Variable("y")
    z1 = sym.elemwise_add(x, sym.sqrt(y))
    z2 = sym.log(x)
    gradient = graph_util.gradients([z1, z2], [x, y])
    assert len(gradient) == 2

    g1 = sym.Variable("g1")
    g2 = sym.Variable("g2")
    grad_ys = [g1, g2]
    gradient = graph_util.gradients(sym.Group([z1, z2]),
                               sym.Group([x, y]), grad_ys=grad_ys)
    g_graph = graph.create(sym.Group(gradient)).ir()
    assert len(gradient) == 2
    assert "g1" in g_graph
    assert "g2" in g_graph
Beispiel #23
0
 def simple_bn(x,
               gamma,
               beta,
               moving_mean,
               moving_var,
               axis=1,
               epsilon=1e-5,
               shape=None):
     # expect = (x - moving_mean) / sym.sqrt(moving_var + eps) * gamma + beta
     scale = sym.elemwise_mul(1 / sym.sqrt(moving_var + epsilon), gamma)
     shift = sym.elemwise_add(
         sym.elemwise_mul(sym.negative(moving_mean), scale), beta)
     # for 2D
     num_newaxis = len(shape) - axis - 1
     if num_newaxis:
         scale = sym.expand_dims(scale, axis=1, num_newaxis=num_newaxis)
         shift = sym.expand_dims(shift, axis=1, num_newaxis=num_newaxis)
     return x * scale + shift
Beispiel #24
0
def test_gradient():
    x = sym.Variable("x")
    y = sym.Variable("y")
    z1 = sym.elemwise_add(x, sym.sqrt(y))
    z2 = sym.log(x)
    gradient = graph_util.gradients([z1, z2], [x, y])
    assert len(gradient) == 2

    g1 = sym.Variable("g1")
    g2 = sym.Variable("g2")
    grad_ys = [g1, g2]
    gradient = graph_util.gradients(sym.Group([z1, z2]),
                                    sym.Group([x, y]),
                                    grad_ys=grad_ys)
    g_graph = graph.create(sym.Group(gradient)).ir()
    assert len(gradient) == 2
    assert "g1" in g_graph
    assert "g2" in g_graph
Beispiel #25
0
def test_create_full_graph():
    x = sym.Variable("x")
    y = sym.Variable("y")
    z1 = sym.elemwise_add(x, sym.sqrt(y))
    z2 = sym.log(x)
    symbol = sym.Group([z1, z2])
    compute_graph = graph.create(symbol, need_backward=True)
    assert (compute_graph.index.num_nodes == 11)

    head_grads = [sym.Variable("g1"), sym.Variable("g2")]
    compute_graph = graph.create(symbol,
                                 need_backward=True,
                                 head_grads=head_grads)
    ir = compute_graph.ir()
    assert (compute_graph.index.num_nodes == 11)
    assert ("g1" in ir)
    assert ("g2" in ir)

    fixed_args = ["x"]
    compute_graph = graph.create(symbol,
                                 need_backward=True,
                                 fixed_args=fixed_args)
    assert (compute_graph.index.num_nodes == 8)
Beispiel #26
0
def test_residual_block_layout_transform():
    ch = 16
    size = 32
    data = sym.Variable(name="data")
    conv1 = sym.conv2d(data=data, kernel_size=(3,3), channels=ch, padding = (1, 1), use_bias=False, name="conv1")
    layout_transform1 = sym.__layout_transform__(data=conv1, src_layout="NCHW", dst_layout="NCHW8c")
    layout_transform2 = sym.__layout_transform__(data=layout_transform1, src_layout="NCHW8c", dst_layout="NCHW")
    conv2 = sym.conv2d(data=conv1, kernel_size=(3,3), channels=ch, padding = (1, 1), use_bias=False, name="conv2")
    elemwise_sum = sym.elemwise_add(layout_transform2, conv2)
    out = sym.relu(elemwise_sum)

    dtype="float32"
    dshape = (1, ch, size, size)
    kshape = (ch, ch, 3, 3)
    oshape = (1, ch, size, size)
    shape_dict = {"data": dshape}

    target = "llvm" # only test on llvm since it involves NCHW8c layout
    ctx = tvm.context(target, 0)
    graph, lib, _ = nnvm.compiler.build(out, target, shape_dict)
    # data, conv1 weight, conv1, layout transform + elemwise add + relu, conv2 weight, conv2 op
    assert graph.index.num_nodes == 6

    data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
    kernel1 = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))
    kernel2 = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))
    m = graph_runtime.create(graph, lib, ctx)
    m.run(data=data, conv1_weight=kernel1, conv2_weight=kernel2)
    out = m.get_output(0, tvm.nd.empty(oshape, dtype))

    conv1 = topi.testing.conv2d_nchw_python(
        data.asnumpy(), kernel1.asnumpy(), (1,1), 'SAME')
    conv2 = topi.testing.conv2d_nchw_python(
        conv1, kernel2.asnumpy(), (1,1), 'SAME')
    ref = np.maximum(conv1 + conv2, 0)
    tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5)
Beispiel #27
0
# Most deep learning frameworks use computation graph to describe
# their computation. In this example, we directly use
# NNVM's API to construct the computational graph.
#
# .. note::
#
#   In a typical deep learning compilation workflow,
#   we can get the models from :any:`nnvm.frontend`
#
# The following code snippet describes :math:`z = x + \sqrt{y}`
# and creates a nnvm graph from the description.
# We can print out the graph ir to check the graph content.

x = sym.Variable("x")
y = sym.Variable("y")
z = sym.elemwise_add(x, sym.sqrt(y))
compute_graph = nnvm.graph.create(z)
print("-------compute graph-------")
print(compute_graph.ir())

######################################################################
# Compile
# -------
# We can call :any:`nnvm.compiler.build` to compile the graph.
# The build function takes a shape parameter which specifies the
# input shape requirement. Here we only need to pass in shape of ``x``
# and the other one will be inferred automatically by NNVM.
#
# The function returns three values. ``deploy_graph`` contains
# the final compiled graph structure. ``lib`` is a :any:`tvm.module.Module`
# that contains compiled CUDA functions. We do not need the ``params``
Beispiel #28
0
def test_copy():
    x = sym.Variable('x')
    z = sym.Variable('z')
    y = sym.exp(sym.elemwise_add(x, x, name='add', gpu=2),
                name='exp', gpu=1, attr={"kk": "1"})
    assert y.__copy__().debug_str() == y.debug_str()
Beispiel #29
0
def test_list_args():
    x = sym.Variable('x')
    z = sym.Variable('z')
    y = sym.dense(data=x, name='fc', units=30)
    y = sym.elemwise_add(y, z, name='add1')
Beispiel #30
0
def test_list_args():
    x = sym.Variable('x')
    z = sym.Variable('z')
    y = sym.dense(data=x, name='fc', units=30)
    y = sym.elemwise_add(y, z, name='add1')
import nnvm.compiler
import nnvm.symbol as sym
import numpy as np

x = sym.Variable("x")
y = sym.Variable("y")
z = sym.elemwise_add(x, sym.sqrt(y))
compute_graph = nnvm.graph.create(z)
x_np = np.array([1, 2, 3, 4]).astype("float32")
y_np = np.array([4, 4, 4, 4]).astype("float32")
shape = (4, )
deploy_graph, lib, params = nnvm.compiler.build(compute_graph, target="cuda", target_host= "llvm -target=aarch64-linux-gnu", shape={"x": shape}, params={"y": y_np}, dtype="float32")

x_np.tofile("./data/x.bin")
np.save("./data/x.nparray", x_np)
with open("./model/jetson_gpu.json", "w") as fo:
    fo.write(deploy_graph.json())
with open("./model/jetson_gpu.params", "wb") as fo:
    fo.write(nnvm.compiler.save_param_dict(params))

lib.save("./model/jetson_gpu.o")
dev_modules = lib.imported_modules
dev_modules[0].save("./model/jetson_gpu.ptx")