Esempio n. 1
0
 def check(dim, axis, nstep):
     eps = 0.01
     x = sym.Variable("x") + 1
     beta = sym.Variable("beta")
     gamma = sym.Variable("gamma")
     moving_var = sym.Variable("moving_var")
     moving_mean = sym.Variable("moving_mean")
     y1, y2 = x, sym.Variable("xx") + 1
     ishape = {"x": tuple(10 for i in range(dim))}
     for i in range(nstep):
         y1 = sym.batch_norm(y1 + 1,
                             gamma,
                             beta,
                             moving_mean,
                             moving_var,
                             epsilon=eps,
                             axis=axis)
         y1 = sym.dropout(y1)
         y2 = simple_bn(y2 + 1,
                        gamma,
                        beta,
                        moving_mean,
                        moving_var,
                        epsilon=eps,
                        axis=axis,
                        shape=ishape["x"])
     g = nnvm.graph.create(y1)
     g2 = nnvm.graph.create(y2)
     graph_attr.set_shape_inputs(g, ishape)
     g1 = g.apply("InferShape").apply("SimplifyInference")
     # assert graph equals as expected
     graph_util.check_graph_equal(g1, g2)
Esempio n. 2
0
def compare_graph(sym1, sym2, ishape=(2, 3, 224, 224)):
    g1 = nnvm.graph.create(sym1)
    g2 = nnvm.graph.create(sym2)
    graph_attr.set_shape_inputs(g1, {'data':ishape})
    graph_attr.set_shape_inputs(g2, {'data':ishape})
    g1 = g1.apply("InferShape").apply("SimplifyInference")
    g2 = g2.apply("InferShape").apply("SimplifyInference")
    graph_util.check_graph_equal(g1, g2)
Esempio n. 3
0
def compare_graph(sym1, sym2, ishape=(2, 3, 224, 224)):
    g1 = nnvm.graph.create(sym1)
    g2 = nnvm.graph.create(sym2)
    graph_attr.set_shape_inputs(g1, {'data':ishape})
    graph_attr.set_shape_inputs(g2, {'data':ishape})
    g1 = g1.apply("InferShape").apply("SimplifyInference")
    g2 = g2.apply("InferShape").apply("SimplifyInference")
    graph_util.check_graph_equal(g1, g2)
Esempio n. 4
0
def compare_graph(init, predict, nnvm_sym, ishape):
    caffe2_sym, params = nnvm.frontend.from_caffe2(init, predict)
    g1 = nnvm.graph.create(caffe2_sym)
    g2 = nnvm.graph.create(nnvm_sym)
    input_name = predict.external_input[0]
    ishapes = {input_name: ishape}
    graph_attr.set_shape_inputs(g1, ishapes)
    graph_attr.set_shape_inputs(g2, ishapes)
    g1 = g1.apply("InferShape").apply("SimplifyInference")
    g2 = g2.apply("InferShape").apply("SimplifyInference")
    graph_util.check_graph_equal(g1, g2)
Esempio n. 5
0
 def check(shape, channels):
     x = sym.Variable("x")
     bias = sym.Variable("bias")
     scale = sym.Variable("scale")
     y1 = before(x, scale, channels)
     ishape = {"x": shape, "scale": (channels,), "bias": (channels,)}
     g1 = nnvm.graph.create(y1)
     graph_attr.set_shape_inputs(g1, ishape)
     g2 = g1.apply("InferShape").apply("FoldScaleAxis")
     # assert graph equals as expected
     graph_util.check_graph_equal(g1, g2)
Esempio n. 6
0
def compare_graph(onnx_file, nnvm_sym, ishape):
    onnx_graph = onnx.load(onnx_file)
    onnx_sym, params = nnvm.frontend.from_onnx(onnx_graph)
    g1 = nnvm.graph.create(onnx_sym)
    g2 = nnvm.graph.create(nnvm_sym)
    ishapes = {'input_0': ishape}
    graph_attr.set_shape_inputs(g1, ishapes)
    graph_attr.set_shape_inputs(g2, ishapes)
    g1 = g1.apply("InferShape").apply("SimplifyInference")
    g2 = g2.apply("InferShape").apply("SimplifyInference")
    graph_util.check_graph_equal(g1, g2)
Esempio n. 7
0
def compare_graph(onnx_file, nnvm_sym, ishape):
    onnx_graph = onnx.load(onnx_file)
    onnx_sym, params = nnvm.frontend.from_onnx(onnx_graph)
    g1 = nnvm.graph.create(onnx_sym)
    g2 = nnvm.graph.create(nnvm_sym)
    ishapes = {'input_0': ishape}
    graph_attr.set_shape_inputs(g1, ishapes)
    graph_attr.set_shape_inputs(g2, ishapes)
    g1 = g1.apply("InferShape").apply("SimplifyInference")
    g2 = g2.apply("InferShape").apply("SimplifyInference")
    graph_util.check_graph_equal(g1, g2)
Esempio n. 8
0
 def check(shape, channels):
     x = sym.Variable("x")
     bias = sym.Variable("bias")
     scale = sym.Variable("scale")
     y1 = before(x, scale, channels)
     ishape = {"x": shape, "scale": (channels, ), "bias": (channels, )}
     g1 = nnvm.graph.create(y1)
     graph_attr.set_shape_inputs(g1, ishape)
     g2 = g1.apply("InferShape").apply("FoldScaleAxis")
     # assert graph equals as expected
     graph_util.check_graph_equal(g1, g2)
Esempio n. 9
0
def compare_graph(init, predict, nnvm_sym, ishape):
    caffe2_sym, params = nnvm.frontend.from_caffe2(init, predict)
    g1 = nnvm.graph.create(caffe2_sym)
    g2 = nnvm.graph.create(nnvm_sym)
    input_name = predict.external_input[0]
    ishapes = {input_name: ishape}
    graph_attr.set_shape_inputs(g1, ishapes)
    graph_attr.set_shape_inputs(g2, ishapes)
    g1 = g1.apply("InferShape").apply("SimplifyInference")
    g2 = g2.apply("InferShape").apply("SimplifyInference")
    graph_util.check_graph_equal(g1, g2)
Esempio n. 10
0
def compare_graph(onnx_file, nnvm_sym, ishape):
    onnx_model = onnx.load_model(onnx_file)
    onnx_sym, params = nnvm.frontend.from_onnx(onnx_model)
    g1 = nnvm.graph.create(onnx_sym)
    g2 = nnvm.graph.create(nnvm_sym)
    input_name = onnx_model.graph.input[0].name
    ishapes = {input_name: ishape}
    graph_attr.set_shape_inputs(g1, ishapes)
    graph_attr.set_shape_inputs(g2, ishapes)
    g1 = g1.apply("InferShape").apply("SimplifyInference")
    g2 = g2.apply("InferShape").apply("SimplifyInference")
    graph_util.check_graph_equal(g1, g2)
Esempio n. 11
0
def compare_graph(onnx_file, nnvm_sym, ishape):
    onnx_model = onnx.load_model(onnx_file)
    onnx_sym, params = nnvm.frontend.from_onnx(onnx_model)
    g1 = nnvm.graph.create(onnx_sym)
    g2 = nnvm.graph.create(nnvm_sym)
    input_name = onnx_model.graph.input[0].name
    ishapes = {input_name: ishape}
    graph_attr.set_shape_inputs(g1, ishapes)
    graph_attr.set_shape_inputs(g2, ishapes)
    g1 = g1.apply("InferShape").apply("SimplifyInference")
    g2 = g2.apply("InferShape").apply("SimplifyInference")
    graph_util.check_graph_equal(g1, g2)
Esempio n. 12
0
 def check(shape, channels):
     x = sym.Variable("x") + 1
     weight = sym.Variable("weight")
     bias = sym.Variable("bias")
     in_scale = sym.Variable("in_scale")
     out_scale = sym.Variable("out_scale")
     y1 = before(x, weight, bias, in_scale, out_scale, channels)
     y2 = expected(x, weight, bias, in_scale, out_scale, channels)
     ishape = {"x": shape, "out_scale": (channels,), "in_scale": (shape[1],)}
     g1 = nnvm.graph.create(y1)
     g2 = nnvm.graph.create(y2)
     graph_attr.set_shape_inputs(g1, ishape)
     g1 = g1.apply("InferShape").apply("FoldScaleAxis")
     # assert graph equals as expected
     graph_util.check_graph_equal(g1, g2)
Esempio n. 13
0
 def check(shape, channels):
     x = sym.Variable("x") + 1
     weight = sym.Variable("weight")
     bias = sym.Variable("bias")
     in_scale = sym.Variable("in_scale")
     out_scale = sym.Variable("out_scale")
     y1 = before(x, weight, bias, in_scale, out_scale, channels)
     y2 = expected(x, weight, bias, in_scale, out_scale, channels)
     ishape = {"x": shape, "out_scale": (channels,), "in_scale": (shape[1],)}
     g1 = nnvm.graph.create(y1)
     g2 = nnvm.graph.create(y2)
     graph_attr.set_shape_inputs(g1, ishape)
     g1 = g1.apply("InferShape").apply("FoldScaleAxis")
     # assert graph equals as expected
     graph_util.check_graph_equal(g1, g2)
Esempio n. 14
0
def compare_graph(onnx_file, nnvm_sym, ishape):
    onnx_vars = [int(n) for n in onnx.__version__.split('.')] if hasattr(onnx, "__version__") else []
    if len(onnx_vars) >= 2 and (onnx_vars[0] > 0 or onnx_vars[1] >= 2):  # version >= 0.2
        onnx_model = onnx.load(onnx_file)
        onnx_sym, params = nnvm.frontend.from_onnx(onnx_model.graph)
    else:
        onnx_graph = onnx.load(onnx_file)
        onnx_sym, params = nnvm.frontend.from_onnx(onnx_graph)
    g1 = nnvm.graph.create(onnx_sym)
    g2 = nnvm.graph.create(nnvm_sym)
    ishapes = {'input_0': ishape}
    graph_attr.set_shape_inputs(g1, ishapes)
    graph_attr.set_shape_inputs(g2, ishapes)
    g1 = g1.apply("InferShape").apply("SimplifyInference")
    g2 = g2.apply("InferShape").apply("SimplifyInference")
    graph_util.check_graph_equal(g1, g2)
Esempio n. 15
0
 def check(dim, axis, nstep):
     eps = 0.01
     x = sym.Variable("x") + 1
     beta = sym.Variable("beta")
     gamma = sym.Variable("gamma")
     moving_var = sym.Variable("moving_var")
     moving_mean = sym.Variable("moving_mean")
     y1, y2 = x, sym.Variable("xx") + 1
     ishape = {"x": tuple(10 for i in range(dim))}
     for i in range(nstep):
         y1 = sym.batch_norm(
             y1 + 1, gamma, beta, moving_mean, moving_var, epsilon=eps, axis=axis)
         y1 = sym.dropout(y1)
         y2 = simple_bn(y2 + 1, gamma, beta, moving_mean, moving_var,
                        epsilon=eps, axis=axis, shape=ishape["x"])
     g = nnvm.graph.create(y1)
     g2 = nnvm.graph.create(y2)
     graph_attr.set_shape_inputs(g, ishape)
     g1 = g.apply("InferShape").apply("SimplifyInference")
     # assert graph equals as expected
     graph_util.check_graph_equal(g1, g2)
Esempio n. 16
0
def generate_graph(graph_fn, params_fn, device="vta"):

    # Measure build start time
    build_start = time.time()

    # Derive the TVM target
    target = tvm.target.create("llvm -device={}".format(device))

    # Derive the LLVM compiler flags
    # When targetting the Pynq, cross-compile to ARMv7 ISA
    if env.TARGET == "sim":
        target_host = "llvm"
    elif env.TARGET == "pynq":
        target_host = "llvm -mtriple=armv7-none-linux-gnueabihf -mcpu=cortex-a9 -mattr=+neon"

    # Load the ResNet-18 graph and parameters
    sym = nnvm.graph.load_json(open(graph_fn).read())
    params = nnvm.compiler.load_param_dict(open(params_fn, 'rb').read())

    # Populate the shape and data type dictionary
    shape_dict = {"data": (1, 3, 224, 224)}
    dtype_dict = {"data": 'float32'}
    shape_dict.update({k: v.shape for k, v in params.items()})
    dtype_dict.update({k: str(v.dtype) for k, v in params.items()})

    # Create NNVM graph
    graph = nnvm.graph.create(sym)
    graph_attr.set_shape_inputs(sym, shape_dict)
    graph_attr.set_dtype_inputs(sym, dtype_dict)
    graph = graph.apply("InferShape").apply("InferType")

    # Apply NNVM graph optimization passes
    sym = vta.graph.clean_cast(sym)
    sym = vta.graph.clean_conv_fuse(sym)
    if target.device_name == "vta":
        assert env.BLOCK_IN == env.BLOCK_OUT
        sym = vta.graph.pack(sym, shape_dict, env.BATCH, env.BLOCK_OUT)

    # Compile NNVM graph
    with nnvm.compiler.build_config(opt_level=3):
        if target.device_name != "vta":
            graph, lib, params = nnvm.compiler.build(
                sym, target, shape_dict, dtype_dict,
                params=params, target_host=target_host)
        else:
            with vta.build_config():
                graph, lib, params = nnvm.compiler.build(
                    sym, target, shape_dict, dtype_dict,
                    params=params, target_host=target_host)

    # Save the compiled inference graph library
    assert tvm.module.enabled("rpc")
    temp = util.tempdir()
    lib.save(temp.relpath("graphlib.o"))

    # Send the inference library over to the remote RPC server
    remote.upload(temp.relpath("graphlib.o"))
    lib = remote.load_module("graphlib.o")

    # Measure build time
    build_time = time.time() - build_start
    print("ResNet-18 inference graph built in {0:.2f}s!".format(build_time))

    return graph, lib, params
Esempio n. 17
0
def pack(graph, shape_dict, bfactor, cfactor, start_name=None):
    """Pack the graph into batch&channel packed format.

    Parameters
    ----------
    graph : Graph
       The input graph.

    shape_dict : dict of str to shapex
       The input shape.

    bfactor : int
       The packing factor in batch

    cfactor : int
       The packing factor in channel

    start_name: str, optional
       Start name start packing from certain known node.

    Returns
    -------
    graph : Graph
        The transformed graph.
    """
    graph = graph_attr.set_shape_inputs(graph, shape_dict)
    graph = graph.apply("InferShape")
    shape = graph.json_attr("shape")
    gidx = graph.index
    node_map = {}
    dset = set()
    counter = 0
    start_pack = False

    for nid, node in enumerate(gidx.nodes):
        children = [node_map[e[0]] for e in node["inputs"]]
        ishape = [shape[gidx.entry_id(e)] for e in node["inputs"]]
        oshape = shape[gidx.entry_id(nid, 0)]
        attrs = node.get("attrs", {})
        node_name = node["name"]
        op_name = node["op"]
        get_clone = lambda c, o_n, n_n, a: getattr(nnvm.symbol, o_n)(
            *c, name=n_n, **a)

        if op_name == "null":
            new_node = nnvm.symbol.Variable(node_name)
            if start_name and node_name == start_name:
                start_pack = True
                new_node = _pack_batch_channel(new_node, oshape, bfactor, cfactor)
        elif op_name == "max_pool2d":
            assert not start_pack
            start_pack = True
            new_node = get_clone(children, op_name, node_name, attrs)
            new_node = _pack_batch_channel(new_node, oshape, bfactor, cfactor)
        elif op_name == "global_avg_pool2d":
            if start_pack:
                start_pack = False
                children[0] = _unpack_batch_channel(children[0], ishape[0])
                new_node = getattr(nnvm.symbol, op_name)(
                    *children, name=node_name, **attrs)
            else:
                new_node = get_clone(children, op_name, node_name, attrs)
        elif op_name == "conv2d" and attrs["out_dtype"] == "int32":
            if start_pack:
                attrs["layout"] = "NCHW%dn%dc" % (bfactor, cfactor)
                attrs["kernel_layout"] = "OIHW%do%di" % (cfactor, cfactor)
                data, weight = children
                weight = _pack_weight(weight, ishape[1], cfactor)
                new_node = nnvm.sym.conv2d(
                    data, weight, name=node_name, **attrs)
            elif counter == 1:
                attrs["layout"] = "NCHW%dn%dc" % (bfactor, cfactor)
                attrs["kernel_layout"] = "OIHW%do%di" % (cfactor, cfactor)
                data, weight = children
                data = _pack_batch_channel(data, ishape[0], bfactor, cfactor)
                weight = _pack_weight(weight, ishape[1], cfactor)
                new_node = nnvm.sym.conv2d(
                    data, weight, name=node_name, **attrs)
                new_node = _unpack_batch_channel(new_node, oshape)
                counter = counter + 1
            else:
                new_node = get_clone(children, op_name, node_name, attrs)
        elif op_name.startswith("broadcast"):
            if start_pack:
                assert len(ishape[1]) == 3
                children[1] = _pack_bias(children[1], ishape[1], bfactor, cfactor)
                new_node = getattr(nnvm.symbol, op_name)(
                    *children, name=node_name, **attrs)
            else:
                new_node = get_clone(children, op_name, node_name, attrs)
        elif op_name.startswith("elementwise_add"):
            new_node = get_clone(children, op_name, node_name, attrs)
        else:
            new_node = get_clone(children, op_name, node_name, attrs)
            dset.add(op_name)
        node_map[nid] = new_node

    assert len(graph.index.output_entries) == 1
    ret = node_map[graph.index.output_entries[0][0]]
    if start_pack:
        oshape = shape[graph.index.output_entries[0][0]]
        ret = _unpack_batch_channel(ret, oshape)
    graph = nnvm.graph.create(ret)
    graph = graph_attr.set_shape_inputs(graph, shape_dict)
    graph = graph.apply("InferShape")
    return graph
Esempio n. 18
0
def pack(graph, shape_dict, bfactor, cfactor, start_name=None):
    """Pack the graph into batch&channel packed format.

    Parameters
    ----------
    graph : Graph
       The input graph.

    shape_dict : dict of str to shapex
       The input shape.

    bfactor : int
       The packing factor in batch

    cfactor : int
       The packing factor in channel

    start_name: str, optional
       Start name start packing from certain known node.

    Returns
    -------
    graph : Graph
        The transformed graph.
    """
    graph = graph_attr.set_shape_inputs(graph, shape_dict)
    graph = graph.apply("InferShape")
    shape = graph.json_attr("shape")
    gidx = graph.index
    node_map = {}
    dset = set()
    counter = 0
    start_pack = False

    for nid, node in enumerate(gidx.nodes):
        children = [node_map[e[0]] for e in node["inputs"]]
        ishape = [shape[gidx.entry_id(e)] for e in node["inputs"]]
        oshape = shape[gidx.entry_id(nid, 0)]
        attrs = node.get("attrs", {})
        node_name = node["name"]
        op_name = node["op"]
        get_clone = lambda c, o_n, n_n, a: getattr(nnvm.symbol, o_n)(
            *c, name=n_n, **a)

        if op_name == "null":
            new_node = nnvm.symbol.Variable(node_name)
            if start_name and node_name == start_name:
                start_pack = True
                new_node = _pack_batch_channel(new_node, oshape, bfactor, cfactor)
        elif op_name == "max_pool2d":
            assert not start_pack
            start_pack = True
            new_node = get_clone(children, op_name, node_name, attrs)
            new_node = _pack_batch_channel(new_node, oshape, bfactor, cfactor)
        elif op_name == "global_avg_pool2d":
            if start_pack:
                start_pack = False
                children[0] = _unpack_batch_channel(children[0], ishape[0])
                new_node = getattr(nnvm.symbol, op_name)(
                    *children, name=node_name, **attrs)
            else:
                new_node = get_clone(children, op_name, node_name, attrs)
        elif op_name == "conv2d" and attrs["out_dtype"] == "int32":
            if start_pack:
                attrs["layout"] = "NCHW%dn%dc" % (bfactor, cfactor)
                attrs["kernel_layout"] = "OIHW%do%di" % (cfactor, cfactor)
                data, weight = children
                weight = _pack_weight(weight, ishape[1], cfactor)
                new_node = nnvm.sym.conv2d(
                    data, weight, name=node_name, **attrs)
            elif counter == 1:
                attrs["layout"] = "NCHW%dn%dc" % (bfactor, cfactor)
                attrs["kernel_layout"] = "OIHW%do%di" % (cfactor, cfactor)
                data, weight = children
                data = _pack_batch_channel(data, ishape[0], bfactor, cfactor)
                weight = _pack_weight(weight, ishape[1], cfactor)
                new_node = nnvm.sym.conv2d(
                    data, weight, name=node_name, **attrs)
                new_node = _unpack_batch_channel(new_node, oshape)
                counter = counter + 1
            else:
                new_node = get_clone(children, op_name, node_name, attrs)
        elif op_name.startswith("broadcast"):
            if start_pack:
                assert len(ishape[1]) == 3
                children[1] = _pack_bias(children[1], ishape[1], bfactor, cfactor)
                new_node = getattr(nnvm.symbol, op_name)(
                    *children, name=node_name, **attrs)
            else:
                new_node = get_clone(children, op_name, node_name, attrs)
        elif op_name.startswith("elementwise_add"):
            new_node = get_clone(children, op_name, node_name, attrs)
        else:
            new_node = get_clone(children, op_name, node_name, attrs)
            dset.add(op_name)
        node_map[nid] = new_node

    assert len(graph.index.output_entries) == 1
    ret = node_map[graph.index.output_entries[0][0]]
    if start_pack:
        oshape = shape[graph.index.output_entries[0][0]]
        ret = _unpack_batch_channel(ret, oshape)
    graph = nnvm.graph.create(ret)
    graph = graph_attr.set_shape_inputs(graph, shape_dict)
    graph = graph.apply("InferShape")
    return graph
def nnvm_graph_pack(graph,
                    shape_dict,
                    bfactor,
                    cfactor,
                    weight_bits,
                    start_name="max_pool2d0",
                    stop_name="global_avg_pool2d0"):
    """Pack the graph into batch&channel packed format.

    Parameters
    ----------
    graph : Graph
       The input graph.

    shape_dict : dict of str to shape
       The input shape.

    bfactor : int
       The packing factor in batch

    cfactor : int
       The packing factor in channel

    start_name: str, optional
       Start packing from certain known node.

    start_name: str, optional
       Stop packing from certain known node.

    Returns
    -------
    graph : Graph
        The transformed graph.
    """
    graph = graph_attr.set_shape_inputs(graph, shape_dict)
    graph = graph.apply("InferShape")
    shape = graph.json_attr("shape")
    gidx = graph.index
    node_map = {}
    dset = set()
    start_pack = False

    for nid, node in enumerate(gidx.nodes):
        children = [node_map[e[0]] for e in node["inputs"]]
        ishape = [shape[gidx.entry_id(e)] for e in node["inputs"]]
        oshape = shape[gidx.entry_id(nid, 0)]
        attrs = node.get("attrs", {})
        node_name = node["name"]
        op_name = node["op"]
        get_clone = lambda c, o_n, n_n, a: getattr(nnvm.symbol, o_n)(
            *c, name=n_n, **a)
        if op_name == "null":
            new_node = nnvm.symbol.Variable(node_name)
            if start_name and node_name == start_name:
                start_pack = True
                new_node = _pack_batch_channel(new_node, oshape, bfactor, cfactor)
            if start_pack and "_begin_state_" in node_name: # RNN -> CNN, pack
                new_node = _pack_batch_channel(new_node, oshape, bfactor, cfactor)
        elif node_name == start_name:
            assert not start_pack
            start_pack = True
            new_node = get_clone(children, op_name, node_name, attrs)
            new_node = _pack_batch_channel(new_node, oshape, bfactor, cfactor)
        elif node_name == stop_name:
            if start_pack:
                start_pack = False
                children[0] = _unpack_batch_channel(children[0], ishape[0])
                new_node = getattr(nnvm.symbol, op_name)(
                    *children, name=node_name, **attrs)
            else:
                new_node = get_clone(children, op_name, node_name, attrs)
        elif op_name == "conv2d" and attrs.get("out_dtype", None) == "int32":
            assert 8 % weight_bits == 0
            w_lanes = 8 // weight_bits
            if start_pack:
                attrs["layout"] = "NCHW%dn%dc" % (bfactor, cfactor)
                attrs["kernel_layout"] = "OIHW%do%di%dp" % (cfactor, cfactor, w_lanes)
                data, weight = children
                weight = _pack_weight(weight, ishape[1], cfactor)
                # insert bit packing when necessary
                if w_lanes != 1:
                    assert 8 % w_lanes == 0
                    weight = nnvm.sym.bitpack(weight, lanes=w_lanes)
                new_node = nnvm.sym.conv2d(
                    data, weight, name=node_name, **attrs)
            else:
                new_node = get_clone(children, op_name, node_name, attrs)
        elif op_name == "conv2d_transpose" and attrs.get("out_dtype", None) == "int32":
            assert 8 % weight_bits == 0
            w_lanes = 8 // weight_bits
            if start_pack:
                attrs["layout"] = "NCHW%dn%dc" % (bfactor, cfactor)
                attrs["kernel_layout"] = "IOHW%di%do%dp" % (cfactor, cfactor, w_lanes)
                data, weight = children
                weight = _pack_weight_conv2d_transpose(weight, ishape[1], cfactor)
                new_node = nnvm.sym.conv2d_transpose(
                    data, weight, name=node_name, **attrs)
            else:
                new_node = get_clone(children, op_name, node_name, attrs)
        elif op_name.startswith("broadcast_") and tuple(ishape[0]) == tuple(ishape[1]):
            new_node = get_clone(children, op_name, node_name, attrs)
        elif op_name.startswith("broadcast") and len(ishape[1]) == 3:
            if start_pack:
                children[1] = _pack_bias(children[1], ishape[1], bfactor, cfactor)
                new_node = getattr(nnvm.symbol, op_name)(
                    *children, name=node_name, **attrs)
            else:
                new_node = get_clone(children, op_name, node_name, attrs)
        elif op_name.startswith("elementwise_add"):
            new_node = get_clone(children, op_name, node_name, attrs)
        else:
            new_node = get_clone(children, op_name, node_name, attrs)
            dset.add(op_name)
        node_map[nid] = new_node

    assert len(graph.index.output_entries) == 1
    ret = node_map[graph.index.output_entries[0][0]]
    if start_pack:
        oshape = shape[graph.index.output_entries[0][0]]
        ret = _unpack_batch_channel(ret, oshape)
    graph = nnvm.graph.create(ret)
    graph = graph_attr.set_shape_inputs(graph, shape_dict)
    graph = graph.apply("InferShape")
    return graph
Esempio n. 20
0
######################################################################
# now compile the graph
import nnvm.compiler
np.random.seed(0)
sym = nnvm.graph.load_json(open(os.path.join(RESNET_GRAPH_FILE)).read())
params = pickle.load(open(os.path.join(RESNET_PARAMS_FILE)))

dt = time.time()
shape_dict = {"data": img.shape}
dtype_dict = {"data": 'float32'}
shape_dict.update({k: v.shape for k, v in params.items()})
dtype_dict.update({k: str(v.dtype) for k, v in params.items()})

graph = nnvm.graph.create(sym)
graph_attr.set_shape_inputs(sym, shape_dict)
graph_attr.set_dtype_inputs(sym, dtype_dict)
graph = graph.apply("InferShape").apply("InferType")

dtype = "float32"
sym = vta.graph.remove_stochastic(sym)
sym = vta.graph.clean_cast(sym)
sym = vta.graph.clean_conv_fuse(sym)
if "vta" in target:
    sym = vta.graph.pack(sym, shape_dict, factor)

graph_attr.set_shape_inputs(sym, shape_dict)
sym = sym.apply("InferShape")
graph_attr.set_dtype_inputs(sym, dtype_dict)
sym = sym.apply("InferType")
timers['execution_time_prepare_graph'] = time.time() - dt