Exemple #1
0
def convert_argmax(node, **kwargs):
    """Map MXNet's argmax operator attributes to onnx's ArgMax operator
    and return the created node.
    """
    name, input_nodes, attrs = get_inputs(node, kwargs)

    axis = int(attrs.get("axis"))
    keepdims = get_boolean_attribute_value(attrs, "keepdims")

    argmax_node = onnx.helper.make_node('ArgMax',
                                        inputs=input_nodes,
                                        axis=axis,
                                        keepdims=keepdims,
                                        outputs=[name],
                                        name=name)

    dtype = onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[kwargs["in_type"]]
    if dtype == 'float32':
        dtype = 'float'
    elif dtype == 'float64':
        dtype = 'double'
    cast_name = 'cast_' + name
    cast_node = onnx.helper.make_node(
        "Cast",
        [name],
        [cast_name],
        to=getattr(onnx.TensorProto, dtype.upper()),
        name=cast_name,
    )

    return [argmax_node, cast_node]
Exemple #2
0
def convert_clip(node, **kwargs):
    """Map MXNet's Clip operator attributes to onnx's Clip operator
    and return the created node.
    """
    name, input_nodes, attrs = get_inputs(node, kwargs)

    initializer = kwargs["initializer"]
    data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('float32')]
    a_min_name = "a_min" + str(kwargs["idx"])
    a_min_vals = np.array([float(attrs.get('a_min', -np.inf))])
    a_min_node = onnx.helper.make_tensor_value_info(a_min_name, data_type, [])
    initializer.append(
        onnx.helper.make_tensor(name=a_min_name,
                                data_type=data_type,
                                dims=[],
                                vals=a_min_vals,
                                raw=False))
    a_max_name = "a_max" + str(kwargs["idx"])
    a_max_vals = np.array([float(attrs.get('a_max', -np.inf))])
    a_max_node = onnx.helper.make_tensor_value_info(a_max_name, data_type, [])
    initializer.append(
        onnx.helper.make_tensor(name=a_max_name,
                                data_type=data_type,
                                dims=[],
                                vals=a_max_vals,
                                raw=False))

    clip_node = onnx.helper.make_node(
        "Clip",
        [input_nodes[0], a_min_name, a_max_name],
        [name],
        name=name,
    )
    return [a_min_node, a_max_node, clip_node]
Exemple #3
0
def convert_ones_like(node, **kwargs):
    """Map MXNet's ones_like operator attributes to onnx's Shape and ConstantOfShape operator
    and return the created node.
    """
    name, input_nodes, attrs = get_inputs(node, kwargs)
    olnode = []
    op_name = "ones_like_shape" + str(kwargs["idx"])
    shape_node = onnx.helper.make_node('Shape',
                                       inputs=[input_nodes[0]],
                                       outputs=[op_name],
                                       name=op_name)

    input_nodes[0] = op_name
    olnode.append(shape_node)

    tensor_node = onnx.helper.make_tensor(
        "zeros_like_value" + str(kwargs["idx"]), onnx.TensorProto.FLOAT, [1],
        [1])
    shape_like_node = onnx.helper.make_node('ConstantOfShape',
                                            inputs=[input_nodes[0]],
                                            outputs=[name],
                                            value=tensor_node,
                                            name=name)
    olnode.append(shape_like_node)

    return olnode
def convert_upsample(node, **kwargs):
    """Map MXNet's UpSampling operator attributes to onnx's Upsample operator
    and return the created node.
    """

    name, input_nodes, attrs = get_inputs(node, kwargs)

    sample_type = attrs.get('sample_type', 'nearest')
    sample_type = 'linear' if sample_type == 'bilinear' else sample_type
    scale = convert_string_to_list(attrs.get('scale'))
    scaleh = scalew = float(scale[0])
    if len(scale) > 1:
        scaleh = float(scale[0])
        scalew = float(scale[1])
    scale = np.array([1.0, 1.0, scaleh, scalew], dtype=np.float32)
    roi = np.array([], dtype=np.float32)
    node_roi = create_helper_tensor_node(roi, name + 'roi', kwargs)
    node_sca = create_helper_tensor_node(scale, name + 'scale', kwargs)

    node = onnx.helper.make_node(
        'Resize',
        inputs=[input_nodes[0], name + 'roi', name + 'scale'],
        outputs=[name],
        coordinate_transformation_mode='asymmetric',
        mode=sample_type,
        nearest_mode='floor',
        name=name)
    return [node_roi, node_sca, node]
Exemple #5
0
def convert_broadcast_logical_and(node, **kwargs):
    """Map MXNet's broadcast_logical_and operator attributes to onnx's And operator
    and return the created node.
    """
    name, input_nodes, _ = get_inputs(node, kwargs)
    nodes = []
    cast_names = []
    for node in input_nodes:
        cast_name = 'cast_' + node
        bool_node = onnx.helper.make_node('Cast', [node], [cast_name],
                                          to=getattr(onnx.TensorProto, "BOOL"),
                                          name=cast_name)
        nodes.append(bool_node)
        cast_names.append(cast_name)
    node = onnx.helper.make_node('And', cast_names, [name], name=name)
    nodes.append(node)
    dtype = onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[kwargs["in_type"]]
    if dtype == 'float32':
        dtype = 'float'
    elif dtype == 'float64':
        dtype = 'double'
    cast_name = 'cast_' + name
    cast_node = onnx.helper.make_node(
        "Cast",
        [name],
        [cast_name],
        to=getattr(onnx.TensorProto, dtype.upper()),
        name=cast_name,
    )
    nodes.append(cast_node)

    return nodes
def convert_softmax_activation(node, **kwargs):
    """
    Map MXNet's softmax operator attributes to onnx's Softmax operator and return the created node.
    """
    name, input_nodes, attrs = get_inputs(node, kwargs)
    outputs = []
    mode = attrs.get("mode", "channel")
    axis = -1
    if mode == "channel":
        # transpose nchw -> nhwc  Softmax: axis=3  nhwc -> nchw
        trans_op_name = 'transpose' + str(kwargs["idx"])
        trans_node = onnx.helper.make_node("Transpose",
                                           input_nodes, [trans_op_name],
                                           name=trans_op_name,
                                           perm=[0, 2, 3, 1])
        softmax_op_name = 'softmax' + str(kwargs["idx"])
        softmax_node = onnx.helper.make_node("Softmax", [trans_op_name],
                                             [softmax_op_name],
                                             axis=3,
                                             name=softmax_op_name)
        output_node = onnx.helper.make_node("Transpose", [softmax_op_name],
                                            [name],
                                            name=name,
                                            perm=[0, 3, 1, 2])
        outputs.append(trans_node)
        outputs.append(softmax_node)
        outputs.append(output_node)
    else:
        softmax_node = onnx.helper.make_node("Softmax",
                                             input_nodes, [name],
                                             axis=axis,
                                             name=name)
        outputs.append(softmax_node)
    return outputs
Exemple #7
0
def convert_arange(node, **kwargs):
    """Map MXNet's Arange operator attributes to onnx's Range operator
    and return the created node.
    """
    input_type = kwargs["in_type"]
    name, input_nodes, attrs = get_inputs(node, kwargs)
    initializer = kwargs["initializer"]
    start_value = np.array(
        [float(attrs.get("start", 0))],
        dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[input_type])
    start_name = "start_" + str(kwargs["idx"])
    start_node = onnx.helper.make_tensor_value_info(start_name, input_type, [])
    initializer.append(
        onnx.helper.make_tensor(
            name=start_name,
            data_type=input_type,
            dims=[],
            vals=start_value,
            raw=False,
        ))

    limit_value = np.array(
        [float(attrs.get("stop", 1))],
        dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[input_type])
    limit_name = "limit_" + str(kwargs["idx"])
    limit_node = onnx.helper.make_tensor_value_info(limit_name, input_type, [])
    initializer.append(
        onnx.helper.make_tensor(
            name=limit_name,
            data_type=input_type,
            dims=[],
            vals=limit_value,
            raw=False,
        ))

    delta_value = np.array(
        [float(attrs.get("step", 1))],
        dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[input_type])
    delta_name = "delta_" + str(kwargs["idx"])
    delta_node = onnx.helper.make_tensor_value_info(delta_name, input_type, [])
    initializer.append(
        onnx.helper.make_tensor(
            name=delta_name,
            data_type=input_type,
            dims=[],
            vals=delta_value,
            raw=False,
        ))
    range_node = onnx.helper.make_node('Range',
                                       [start_name, limit_name, delta_name],
                                       [name],
                                       name=name)
    return [start_node, limit_node, delta_node, range_node]
Exemple #8
0
def convert_fully_connected(node, **kwargs):
    """Map MXNet's FullyConnected operator attributes to onnx's Gemm operator
    and return the created node.
    """
    name, input_nodes, attrs = get_inputs(node, kwargs)

    initializer = kwargs["initializer"]

    no_bias = get_boolean_attribute_value(attrs, "no_bias")

    fcnode = []

    op_name = "flatten_" + str(kwargs["idx"])
    flatten_node = onnx.helper.make_node('Flatten',
                                         inputs=[input_nodes[0]],
                                         outputs=[op_name],
                                         name=op_name)

    input_nodes[0] = op_name
    fcnode.append(flatten_node)

    if no_bias:
        data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(
            attrs.get('dtype', 'float32'))]
        bias_name = "bias" + str(kwargs["idx"])
        tensor_node = onnx.helper.make_tensor_value_info(
            bias_name, data_type, (1, ))
        initializer.append(
            onnx.helper.make_tensor(
                name=bias_name,
                data_type=data_type,
                dims=(1, ),
                vals=[0],
                raw=False,
            ))
        input_nodes.append(bias_name)
        fcnode.append(tensor_node)

    node = onnx.helper.make_node(
        "Gemm",
        input_nodes,  # input (A, B, C) - C can be in place
        [name],  # output
        alpha=1.0,
        beta=1.0,
        transA=False,
        transB=True,
        name=name)

    fcnode.append(node)

    return fcnode
def convert_leakyrelu(node, **kwargs):
    """Map MXNet's LeakyReLU operator attributes to onnx's Elu/LeakyRelu/PRelu operators
    based on the input node's attributes and return the created node.
    """
    name, input_nodes, attrs = get_inputs(node, kwargs)
    initializer = kwargs["initializer"]
    act_type = attrs.get("act_type", "leaky")
    alpha = float(attrs.get("slope", 0.25))
    act_name = {
        "elu": "Elu",
        "leaky": "LeakyRelu",
        "prelu": "PRelu",
        "selu": "Selu"
    }
    reshape_val_name = 'reshape' + str(kwargs["idx"])
    input_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')]
    reshape_value = np.array([1, -1, 1, 1], dtype='int64')
    dims = np.shape(reshape_value)
    shape_node = onnx.helper.make_tensor_value_info(reshape_val_name,
                                                    input_type, dims)
    initializer.append(
        onnx.helper.make_tensor(
            name=reshape_val_name,
            data_type=input_type,
            dims=dims,
            vals=reshape_value,
            raw=False,
        ))
    slope_op_name = 'slope' + str(kwargs["idx"])
    lr_node = []
    if act_type == "prelu" or act_type == "selu":
        reshape_slope_node = onnx.helper.make_node(
            'Reshape',
            inputs=[input_nodes[1], reshape_val_name],
            outputs=[slope_op_name],
            name=slope_op_name)
        node = onnx.helper.make_node(act_name[act_type],
                                     inputs=[input_nodes[0], slope_op_name],
                                     outputs=[name],
                                     name=name)
        lr_node.append(shape_node)
        lr_node.append(reshape_slope_node)
        lr_node.append(node)
    else:
        node = onnx.helper.make_node(act_name[act_type],
                                     inputs=input_nodes,
                                     outputs=[name],
                                     name=name,
                                     alpha=alpha)
        lr_node.append(node)
    return lr_node
def convert_crop(node, **kwargs):
    """Map MXNet's crop operator attributes to onnx's Crop operator
    and return the created node.
    """
    name, inputs, attrs = get_inputs(node, kwargs)
    start = np.array([0, 0, 0, 0], dtype=np.int)  # index是int类型
    start_node = create_helper_tensor_node(start, name + '_starts', kwargs)
    shape_node = create_helper_shape_node(inputs[1], inputs[1] + '_shape')
    crop_node = onnx.helper.make_node(
        "Slice",
        inputs=[inputs[0], name + '_starts',
                inputs[1] + '_shape'],  # data、start、end
        outputs=[name],
        name=name)
    return [start_node, shape_node, crop_node]
Exemple #11
0
def convert_greater_scalar(node, **kwargs):
    """Map MXNet's _greater_scalar operator attributes to onnx's Greater operator.
    Creates a new node for the input scalar value, adds it to the initializer
    and return multiple created nodes.
    """
    name, input_nodes, attrs = get_inputs(node, kwargs)
    input_type = kwargs["in_type"]
    scalar_value = np.array(
        [attrs.get("scalar", 1)],
        dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[input_type])

    initializer = kwargs["initializer"]
    dims = np.shape(scalar_value)

    scalar_op_name = "scalar_op" + str(kwargs["idx"])
    tensor_node = onnx.helper.make_tensor_value_info(scalar_op_name,
                                                     input_type, dims)

    initializer.append(
        onnx.helper.make_tensor(
            name=scalar_op_name,
            data_type=input_type,
            dims=dims,
            vals=scalar_value,
            raw=False,
        ))

    greater_node = onnx.helper.make_node("Greater",
                                         [input_nodes[0], scalar_op_name],
                                         [name],
                                         name=name)

    dtype = onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[kwargs["in_type"]]
    if dtype == 'float32':
        dtype = 'float'
    elif dtype == 'float64':
        dtype = 'double'
    cast_name = 'cast_' + name
    cast_node = onnx.helper.make_node(
        "Cast",
        [name],
        [cast_name],
        to=getattr(onnx.TensorProto, dtype.upper()),
        name=cast_name,
    )

    return [tensor_node, greater_node, cast_node]
Exemple #12
0
def convert_batchnorm(node, **kwargs):
    """Map MXNet's BatchNorm operator attributes to onnx's BatchNormalization operator
    and return the created node.
    """
    name, input_nodes, attrs = get_inputs(node, kwargs)

    momentum = float(attrs.get("momentum", 0.9))
    eps = float(attrs.get("eps", 0.001))

    bn_node = onnx.helper.make_node(
        "BatchNormalization",
        input_nodes,
        [name],
        name=name,
        epsilon=eps,
        momentum=momentum,
    )
    return [bn_node]
Exemple #13
0
def convert_take(node, **kwargs):
    """Map MXNet's take operator attributes to onnx's Gather operator
    and return the created node.
    """
    name, input_nodes, attrs = get_inputs(node, kwargs)
    dtype = 'int32'
    cast_name = 'cast_' + name
    cast_node = onnx.helper.make_node(
        "Cast",
        [input_nodes[1]],
        [cast_name],
        to=getattr(onnx.TensorProto, dtype.upper()),
        name=cast_name,
    )

    node = onnx.helper.make_node("Gather", [input_nodes[0], cast_name], [name],
                                 axis=int(attrs.get("axis", -1)),
                                 name=name)
    return [cast_node, node]
Exemple #14
0
def convert_where(node, **kwargs):
    """Map MXNet's where operator attributes to onnx's Where operator
    and return the created node.
    """
    name, input_nodes, attrs = get_inputs(node, kwargs)
    dtype = 'bool'
    cast_name = 'cast_' + name
    cast_node = onnx.helper.make_node(
        "Cast",
        [input_nodes[0]],
        [cast_name],
        to=getattr(onnx.TensorProto, dtype.upper()),
        name=cast_name,
    )
    node = onnx.helper.make_node("Where",
                                 [cast_name, input_nodes[1], input_nodes[2]],
                                 [name],
                                 name=name)
    return [cast_node, node]
def convert_batchnorm(node, **kwargs):
    """
    Map MXNet's BatchNorm operator attributes to onnx's BatchNormalization operator and return the created node.
    """
    name, input_nodes, attrs = get_inputs(node, kwargs)
    momentum = float(attrs.get("momentum", 0.9))
    eps = float(attrs.get("eps", 0.001))
    bn_node = onnx.helper.make_node(
        "BatchNormalization",
        input_nodes,
        [name],
        name=name,
        epsilon=eps,
        momentum=momentum,
        # MXNet computes mean and variance per feature for batchnorm
        # Default for onnx is across all spatial features. So disabling the parameter.
        # spatial=0
    )
    return [bn_node]
def convert_slice_axis(node, **kwargs):
    """Map MXNet's slice_axis operator attributes to onnx's Slice operator
    and return the created node.
    """
    name, input_nodes, attrs = get_inputs(node, kwargs)

    axes = int(attrs.get("axis"))
    starts = int(attrs.get("begin"))
    ends = attrs.get("end", None)
    if not ends:
        raise ValueError(
            "Slice: ONNX doesnt't support 'None' in 'end' attribute")

    export_nodes = []

    starts = np.atleast_1d(np.asarray(starts, dtype=np.int))
    ends = np.atleast_1d(np.asarray(ends, dtype=np.int))
    axes = np.atleast_1d(np.asarray(axes, dtype=np.int))

    starts_node = create_helper_tensor_node(starts, name + '__starts', kwargs)
    export_nodes.append(starts_node)
    starts_node = starts_node.name

    ends_node = create_helper_tensor_node(ends, name + '__ends', kwargs)
    export_nodes.append(ends_node)
    ends_node = ends_node.name

    axes_node = create_helper_tensor_node(axes, name + '__axes', kwargs)
    export_nodes.append(axes_node)
    axes_node = axes_node.name

    input_node = input_nodes[0]
    node = onnx.helper.make_node(
        "Slice",
        [input_node, starts_node, ends_node, axes_node],
        [name],
        name=name,
    )
    export_nodes.extend([node])

    return export_nodes
def convert_batchnorm(node, **kwargs):
    """Map MXNet's BatchNorm operator attributes to onnx's BatchNormalization operator
    and return the created node.
    """
    name, input_nodes, attrs = get_inputs(node, kwargs)

    momentum = float(attrs.get("momentum", 0.9))
    eps = float(attrs.get("eps", 0.001))

    bn_node = onnx.helper.make_node(
        "BatchNormalization",
        input_nodes, [name],
        name=name,
        epsilon=eps,
        momentum=momentum
        # MXNet computes mean and variance per channel for batchnorm.
        # Default for onnx is across all spatial features. Relying on default
        # ONNX behavior of spatial=1 for ONNX opset 8 and below. As the spatial
        # attribute is deprecated in opset 9 and above, not explicitly encoding it.
    )
    return [bn_node]
Exemple #18
0
def convert_softmax(node, **kwargs):
    """Map MXNet's softmax operator attributes to onnx's Softmax operator
    and return the created node.
    """
    name, input_nodes, attrs = get_inputs(node, kwargs)
    axis = int(attrs.get("axis", -1))

    c_softmax_node = []
    axis = -1

    transpose_node1 = onnx.helper.make_node(
        "Transpose",
        inputs=input_nodes,
        perm=(0, 2, 3, 1),  # NCHW--NHWC--(NHW,C)
        name=name + '_tr1',
        outputs=[name + '_tr1']
    )

    softmax_node = onnx.helper.make_node(
        "Softmax",
        inputs=[name + '_tr1'],
        axis=axis,
        name=name + '',
        outputs=[name + '']
    )

    transpose_node2 = onnx.helper.make_node(
        "Transpose",
        inputs=[name + ''],
        perm=(0, 3, 1, 2),  # NHWC--NCHW
        name=name + '_tr2',
        outputs=[name + '_tr2']
    )

    c_softmax_node.append(transpose_node1)
    c_softmax_node.append(softmax_node)
    c_softmax_node.append(transpose_node2)

    return c_softmax_node
Exemple #19
0
def convert_upsample(node, **kwargs):
    """
    Map MXNet's UpSampling operator attributes to onnx's Upsample operator and return the created node.
    """
    name, input_nodes, attrs = get_inputs(node, kwargs)
    sample_type = attrs.get('sample_type', 'nearest')
    sample_type = 'linear' if sample_type == 'bilinear' else sample_type
    scale = convert_string_to_list(attrs.get('scale'))
    scaleh = scalew = float(scale[0])
    if len(scale) > 1:
        scaleh = float(scale[0])
        scalew = float(scale[1])
    scale = np.array([1.0, 1.0, scaleh, scalew], dtype=np.float32)
    scale_node = create_helper_tensor_node(scale, name + 'scales', kwargs)
    input_nodes.append(name + 'scales')
    node = onnx.helper.make_node(
        'Resize',
        input_nodes,
        [name],
        mode=sample_type,
        name=name
    )
    return [scale_node, node]
Exemple #20
0
def convert_pooling(node, **kwargs):
    """Map MXNet's Pooling operator attributes to onnx's
    MaxPool/AveragePool/GlobalMaxPool/GlobalAveragePool operators
    based on the input node's attributes and return the created node.
    """
    name, input_nodes, attrs = get_inputs(node, kwargs)

    kernel = eval(attrs["kernel"])
    pool_type = attrs["pool_type"] if attrs.get("pool_type") else "max"
    stride = eval(attrs["stride"]) if attrs.get("stride") else (1, 1)
    global_pool = get_boolean_attribute_value(attrs, "global_pool")
    p_value = attrs.get('p_value', 'None')

    pooling_convention = attrs.get('pooling_convention', 'valid')

    if pooling_convention == 'full':
        pooling_warning = "Pooling: ONNX currently doesn't support pooling_convention. " \
                          "This might lead to shape or accuracy issues. " \
                          "https://github.com/onnx/onnx/issues/549"

        logging.warning(pooling_warning)

    # ceil_mode = 1 if attrs.get("pooling_convention", "valid") == "full" else 0

    pad_dims = list(parse_helper(attrs, "pad", [0, 0]))
    pad_dims = pad_dims + pad_dims
    pool_types = {"max": "MaxPool", "avg": "AveragePool", "lp": "LpPool"}
    global_pool_types = {
        "max": "GlobalMaxPool",
        "avg": "GlobalAveragePool",
        "lp": "GlobalLpPool"
    }
    count_include_pad = 1 if attrs.get("count_include_pad",
                                       "True") in ["True", "1"] else 0

    if pool_type == 'lp' and p_value == 'None':
        raise AttributeError(
            'ONNX requires a p value for LpPool and GlobalLpPool')

    if global_pool:
        if pool_type == 'lp':
            node = onnx.helper.make_node(
                global_pool_types[pool_type],
                input_nodes,  # input
                [name],
                p=int(p_value),
                name=name)
        else:
            node = onnx.helper.make_node(
                global_pool_types[pool_type],
                input_nodes,  # input
                [name],
                name=name)
    else:
        if pool_type == 'lp':
            node = onnx.helper.make_node(
                pool_types[pool_type],
                input_nodes,  # input
                [name],
                p=int(p_value),
                kernel_shape=kernel,
                pads=pad_dims,
                strides=stride,
                name=name)
        elif pool_type == "avg":
            node = onnx.helper.make_node(
                pool_types[pool_type],
                input_nodes,  # input
                [name],
                count_include_pad=count_include_pad,
                kernel_shape=kernel,
                pads=pad_dims,
                strides=stride,
                # ceil_mode=ceil_mode,
                name=name)
        else:
            node = onnx.helper.make_node(
                pool_types[pool_type],
                input_nodes,  # input
                [name],
                kernel_shape=kernel,
                pads=pad_dims,
                strides=stride,
                name=name)

    return [node]