Пример #1
0
 def box_nms(node, params, graph):
     name, op_name = node.attr('name'), node.attr('op_name')
     childs, attr = sutils.sym_iter(
         node.get_children()), node.list_attr()
     if op_name == '_greater_scalar':
         valid_thresh = sutils.get_attr(attr, 'scalar', 0)
         attr['scalar'] = int(valid_thresh * oscales[1])
         node = sutils.get_mxnet_op(op_name)(*childs, **attr, name=name)
     elif op_name == '_contrib_box_nms':
         valid_thresh = sutils.get_attr(attr, 'valid_thresh', 0)
         attr['valid_thresh'] = int(valid_thresh * oscales[1])
         node = sutils.get_mxnet_op(op_name)(*childs, **attr, name=name)
     return node
Пример #2
0
def _reshape(inputs, attrs, params):
    X, shape = inputs

    graph = {}
    for op in sutils.topo_sort(shape):
        name, op_name = op.attr('name'), op.attr('op_name')
        childs, attr = sutils.sym_iter(op.get_children()), op.list_attr()
        if childs is not None:
            childs = [graph[c.attr('name')] for c in childs]

        if sutils.is_var(op, params):
            pass
        elif childs is None:
            params[name] = sutils.get_nd_op(op_name)(**attr)
            op = mx.sym.var(name, shape=params[name].shape)
        else:
            childs = [graph[c.attr('name')] for c in childs]
            assert all([sutils.is_params(c, params) for c in childs])
            in_params = [params[c.attr('name')] for c in childs]
            if op_name == "expand_dims" and in_params[0].shape == ():
                params[name] = nd.array([in_params[0].asnumpy()],
                                        dtype=in_params[0].dtype)
            elif op_name == "Reshape" and sutils.get_attr(attr, 'shape') == []:
                assert in_params[0].shape == (1, )
                params[name] = nd.array(in_params[0].asnumpy()[0],
                                        dtype=in_params[0].dtype)
            else:
                params[name] = sutils.get_nd_op(op_name)(*in_params, **attr)
            op = mx.sym.var(name, shape=params[name].shape)
        graph[name] = op

    assert sutils.is_params(graph[shape.attr('name')], params)
    shape = params[shape.attr('name')].asnumpy().tolist()
    shape[0] = -1  # since dim zero is batch, set -1 for flexiblity.
    return mx.sym.reshape(X, shape)
Пример #3
0
    def _fuse_custom_pad_transpose(sym, params, **kwargs):
        name, op_name = sym.attr('name'), sym.attr('op_name')
        attr, childs = sym.list_attr(), sutils.sym_iter(sym.get_children())

        ret = sym
        if op_name != 'transpose' or not is_pad_op(childs[0]):
            return ret, params

        cattr = childs[0].list_attr()
        padding = sutils.get_attr(cattr, 'padding')
        axes = sutils.get_attr(attr, 'axes')
        cchilds = sutils.sym_iter(childs[0].get_children())
        X = mx.sym.transpose(*cchilds, axes=axes)
        ret = mx.sym.Custom(X,
                            padding=[padding[r] for r in axes],
                            op_type="cvm_pad")
        return ret, params
Пример #4
0
    def _fuse_custom_pad_transpose(sym, params, **kwargs):
        name, op_name = sym.attr('name'), sym.attr('op_name')
        attr, childs = sym.list_attr(), sutils.sym_iter(sym.get_children())
        cattr = childs[0].list_attr() if childs else None

        ret = sym
        if op_name != 'transpose' or childs[0].attr('op_name') != 'Custom' or \
                not cattr or 'op_type' not in cattr or \
                cattr['op_type'] != 'cvm_pad':
            return ret, params

        padding = sutils.get_attr(cattr, 'padding')
        axes = sutils.get_attr(attr, 'axes')
        cchilds = sutils.sym_iter(childs[0].get_children())
        X = mx.sym.transpose(*cchilds, axes=axes)
        ret = mx.sym.Custom(X,
                            padding=[padding[r] for r in axes],
                            op_type="cvm_pad")
        return ret, params
Пример #5
0
    def _fuse_pad_eq(sym, params, **kwargs):
        name, op_name = sym.attr('name'), sym.attr('op_name')
        attr, childs = sym.list_attr(), sutils.sym_iter(sym.get_children())

        ret = sym
        if op_name not in ['Convolution', 'Pooling'] or \
                childs[0].attr('op_name') != 'Pad':
            return ret, params

        if 'pad' in attr:
            assert sutils.get_attr(attr, 'pad') == (0, 0)

        cattr = childs[0].list_attr()
        pad_width = sutils.get_attr(cattr, 'pad_width')
        if len(pad_width) != 8 or pad_width[4] != pad_width[5] or \
                pad_width[6] != pad_width[7]:
            return ret, params

        attr['pad'] = (pad_width[4], pad_width[6])
        X = sutils.sym_iter(childs[0].get_children()) + childs[1:]
        ret = sutils.get_mxnet_op(op_name)(*X, **attr)

        return ret, params
Пример #6
0
    def _fuse_custom_pad(sym, params, **kwargs):
        name, op_name = sym.attr('name'), sym.attr('op_name')
        attr, childs = sym.list_attr(), sutils.sym_iter(sym.get_children())

        ret = sym
        if op_name != 'Custom' or 'op_type' not in attr or \
                attr['op_type'] != 'cvm_pad':
            return ret, params

        padding = nd.array(sutils.get_attr(attr, 'padding'))
        padding = padding.reshape((-1, )).asnumpy().astype(np.int32).tolist()
        ret = mx.sym.pad(*childs, mode='constant', pad_width=tuple(padding))

        return ret, params
Пример #7
0
def compile_to_cvm(model,
                   model_name,
                   datadir="/data/std_out",
                   input_shape=None,
                   target="cuda"):
    """ Compile Mxnet model into CVM Accept-JSON&BIN-Format
    """
    logger = logging.getLogger("mrt.compile")
    symbol, params = model.symbol, model.params

    datadir = path.join(datadir, model_name)
    os.makedirs(datadir, exist_ok=True)

    # transform from mxnet symbol to cvm
    logger.info("Transform Mxnet symbol into CVM")
    nnvm_sym, _ = to_nnvm(symbol, params)
    dtype, nnvm_params = "int32", {}
    tvm_ctx = tvm.context(target, 0)
    for sym in topo_sort(symbol):
        if sutils.is_params(sym, params):
            key, value = sym.attr('name'), params[sym.attr('name')]
            flat = value.asnumpy()
            assert np.abs(flat).max() <= sutils.INT32_MAX, \
                "key: {}\nvalue: {}".format(key, value)
            assert (flat.astype(dtype).astype("float64") == flat).all(), \
                "key: {}\nvalue: {}".format(key, value)
            nnvm_params[key] = tvm.nd.array(flat.astype(dtype), tvm_ctx)

    # compile to JSON&Bytes format
    # graph = nnvm.graph.create(nnvm_sym)
    # open("/tmp/tmp.nnvm.json", "w").write(graph.json())
    logger.info("Compile into CVM graph")
    if input_shape is None:
        for sym in topo_sort(symbol):
            if sutils.is_inputs(sym, params):
                _, oshp, _ = sym.infer_shape()
                assert len(oshp) == 1
                input_shape = oshp[0]
    input_shapes = {'data': input_shape}
    with nnvm.compiler.build_config(opt_level=0):
        deploy_graph, _, nnvm_params = nnvm.compiler.build(nnvm_sym,
                                                           target=target,
                                                           shape=input_shapes,
                                                           params=nnvm_params,
                                                           dtype=dtype)

    # tvm parameters reduce
    logger.info("Parameters precision reduce")
    for sym in topo_sort(nnvm_sym):
        if sutils.is_params(sym, nnvm_params):
            name, attr = sym.attr('name'), sym.list_attr()
            precision = sutils.get_attr(attr, "precision")
            dtype = "int32" if precision > 8 else "int8"
            nnvm_params[name] = tvm.nd.array(
                params[name].asnumpy().astype(dtype), tvm_ctx)

    # dump
    logger.info("CVM Json&Params dump")
    with open(path.join(datadir, "symbol"), "w") as fout:
        fout.write(deploy_graph.json())
    param_bytes = nnvm.compiler.save_param_dict(nnvm_params)
    with open(path.join(datadir, "params"), "wb") as fout:
        fout.write(param_bytes)
    return deploy_graph, nnvm_params