Example #1
0
def _convert_reduce_mean(node, graph, err):
    node_name = node.name
    input_name = str(node.inputs[0])
    output_name = str(node.outputs[0])
    axes = node.attrs['axes']
    keep_dim = node.attrs.get("keepdims", 0)

    if len(axes) == 2 and axes[0] == 1 and axes[1] == 2:
        print("Warning: reduce mean axes may be wrong")
        layer = myf("Pooling",
                    node_name, [input_name], [output_name],
                    pooling_param=dict(pool=P.Pooling.AVE),
                    global_pooling=True)
    elif len(axes) == 3:
        layer = myf("Reduction",
                    node_name, [input_name], [output_name],
                    reduction_param=dict(axis=1, operation=P.Reduction.MEAN))
    elif len(axes) == 1:
        layer = myf("Reduction",
                    node_name, [input_name], [output_name],
                    reduction_param=dict(axis=axes[0],
                                         operation=P.Reduction.MEAN))
    else:
        return err.unsupported_op_configuration(
            node, "Unsupported reduce mean type")

    graph.channel_dims[output_name] = graph.channel_dims[input_name]
    return layer
Example #2
0
def _convert_pool(node, graph, err):
    node_name = node.name
    input_name = str(node.inputs[0])
    output_name = str(node.outputs[0])
    if node.op_type.endswith("MaxPool"):
        pool_type = P.Pooling.MAX
    elif node.op_type == "AveragePool":
        pool_type = P.Pooling.AVE
    elif node.op_type.endswith("GlobalAveragePool"):
        pool_type = P.Pooling.AVE
        layer = myf("Pooling", node_name, [input_name], [output_name], 
                    pooling_param=dict(pool=pool_type, global_pooling=True))
        graph.channel_dims[output_name] = graph.channel_dims[input_name]
        return layer
    else:
        return err.unsupported_op_configuration(node, "Unsupported pool type")

    kernel_shape = node.attrs["kernel_shape"]
    strides = node.attrs.get('strides', [1, 1])
    pads = node.attrs.get('pads', [0, 0, 0, 0])
    layer = myf("Pooling", node_name, [input_name], [output_name], pooling_param=dict(pool=pool_type,
                                                                                      kernel_h=kernel_shape[0],
                                                                                      kernel_w=kernel_shape[1],
                                                                                      stride_h=strides[0],
                                                                                      stride_w=strides[1],
                                                                                      pad_h=pads[0],
                                                                                      pad_w=pads[1]))
    graph.channel_dims[output_name] = graph.channel_dims[input_name]
    return layer
Example #3
0
def _convert_upsample(node, graph, err):
    factor = int(node.attrs["cubic_coeff_a"])
    node_name = node.name
    input_name = str(node.inputs[0])
    output_name = str(node.outputs[0])
    channels = graph.channel_dims[input_name]
    node.inputs[1]
 
    pad = int(math.ceil((factor - 1) / 2.))
    mode = node.attrs["mode"]
  
    if mode == "bilinear":
        layer = myf("Deconvolution", node_name, [input_name], [output_name],
                    convolution_param=dict(
                        num_output=channels,
                        kernel_size=2 * factor - factor % 2,
                        stride=factor,
                        pad=pad,
                        group=channels,
                        bias_term=False,
                        weight_filler=dict(type="bilinear_upsampling")
                    ))
    else:
        layer = myf("Deconvolution", node_name, [input_name], [output_name],
                    convolution_param=dict(
                        num_output=channels,
                        kernel_size=factor,
                        stride=factor,
                        group=channels,
                        bias_term=False,
                    ))

    graph.channel_dims[output_name] = graph.channel_dims[input_name]
    return layer
Example #4
0
def _convert_Div(node, graph, err):
    print("TODO", node.attrs)
    input_name = str(node.inputs[0])
    input_name_y = str(node.inputs[1])
    output_name = str(node.outputs[0])
    name = str(node.name)

    if input_name == output_name:
        inplace = True
    else:
        inplace = False

    layer = myf("Power",
                name + "div", [input_name], [output_name + "_div"],
                in_place=inplace,
                power=-1)
    eltwise_layer = myf("Eltwise",
                        name, [input_name_y, output_name + "_div"],
                        [output_name],
                        operation=P.Eltwise.PROD)
    # l_top_relu1 = L.ReLU(l_bottom, name=name, in_place=True)

    graph.channel_dims[output_name] = graph.channel_dims[input_name]

    return layer, eltwise_layer
Example #5
0
def _convert_upsample(node, graph, err):
    factor = int(node.attrs.get("height_scale", 2))
    node_name = node.name
    input_name = str(node.inputs[0])
    output_name = str(node.outputs[0])
    # input_shape = graph.shape_dict[input_name]
    # channels = input_shape[1]
    channels = graph.channel_dims[input_name]
    pad = int(math.ceil((factor - 1) / 2.))
    mode = node.attrs["mode"]
    #https://github.com/pytorch/pytorch/issues/6900
    if mode == "bilinear":
        layer = myf("Deconvolution",
                    node_name, [input_name], [output_name],
                    convolution_param=dict(
                        num_output=channels,
                        kernel_size=2 * factor - factor % 2,
                        stride=factor,
                        pad=pad,
                        group=channels,
                        bias_term=False,
                        weight_filler=dict(type="bilinear_upsampling")))
    else:
        layer = myf("Deconvolution",
                    node_name, [input_name], [output_name],
                    convolution_param=dict(
                        num_output=channels,
                        kernel_size=factor,
                        stride=factor,
                        group=channels,
                        bias_term=False,
                    ))

    graph.channel_dims[output_name] = graph.channel_dims[input_name]
    return layer
Example #6
0
def _convert_Reshape(node, graph, err):
    node_name = node.name
    input_name = str(node.inputs[0])
    output_name = str(node.outputs[0])
    if len(node.inputs) == 1:
        shape = tuple(node.attrs.get('shape', ()))
    else:
        shape = tuple(node.input_tensors[node.inputs[1]])
    # if shape == ():

    if input_name == output_name:
        inplace = True
    else:
        inplace = False
    if len(shape) == 2:
        layer = myf("Flatten",
                    node_name, [input_name], [output_name],
                    in_place=inplace)
        graph.channel_dims[output_name] = shape[1]
        return layer
    elif len(shape) == 4 or len(shape) == 3:
        graph.channel_dims[output_name] = shape[1]
        layer = myf("Reshape",
                    node_name, [input_name], [output_name],
                    reshape_param=dict(shape=dict(dim=list(shape))))
        return layer
    else:
        return err.unsupported_op_configuration(
            node, "Reshape dimention number shall be 2 or 4")
Example #7
0
def _convert_BatchNorm(node, graph, err):
    epsilon = node.attrs.get("epsilon", 1e-5)
    scale = node.input_tensors[node.inputs[1]]
    bias = node.input_tensors[node.inputs[2]]
    mean = node.input_tensors[node.inputs[3]]
    var = node.input_tensors[node.inputs[4]]
    node_name = node.name

    input_name = str(node.inputs[0])
    output_name = str(node.outputs[0])

    if input_name == output_name:
        inplace = True
    else:
        inplace = False

    bn_layer = myf("BatchNorm",
                   node_name + "_bn", [input_name], [output_name],
                   eps=epsilon,
                   use_global_stats=True,
                   in_place=inplace)
    scale_layer = myf("Scale",
                      node_name + "_scale", [output_name], [output_name],
                      in_place=True,
                      bias_term=True)

    graph.channel_dims[output_name] = graph.channel_dims[input_name]

    return bn_layer, scale_layer
Example #8
0
def _convert_Add(node, graph, err):
    input_name_list = [str(i) for i in node.inputs]
    output_name = str(node.outputs[0])
    node_name = node.name

    # max_dim = 0
    # for name in input_name_list:
    #     if graph.channel_dims[name]>max_dim:
    #         max_dim = graph.channel_dims[name]

    if 'broadcast' in node.attrs:
        if node.attrs['broadcast'] == 1:
            input_node_number = len(input_name_list)
            if input_node_number != 2:
                return err.unsupported_op_configuration(
                    node, "Broadcast Add must has 2 input, not {}".format(
                        input_node_number))
            axis = node.attrs['axis']
            flat_layer = myf("Flatten", node_name + '_flat',
                             [input_name_list[1]], [output_name + '_flat'])
            layer = myf("Bias",
                        node_name, [input_name_list[0], output_name + '_flat'],
                        [output_name],
                        axis=axis)
            # layer = myf("Bias", node_name, input_name_list, [output_name], bias_term = False, axis = axis)
            graph.channel_dims[output_name] = graph.channel_dims[
                input_name_list[0]]
            return flat_layer, layer

    layer = myf("Eltwise",
                node_name,
                input_name_list, [output_name],
                operation=P.Eltwise.SUM)
    graph.channel_dims[output_name] = graph.channel_dims[input_name_list[0]]
    return layer
Example #9
0
def _convert_resize(node, graph, err):
    if not USE_DECONV_AS_UPSAMPLE:
        #print(node, graph)
        node_name = node.name
        input_name = str(node.inputs[0])
        output_name = str(node.outputs[0])
        #print(node.attrs, node_name, input_name, output_name)
        layer = myf("Upsample",
                    node_name, [input_name], [output_name],
                    upsample_param=dict(scale=2))

        graph.channel_dims[output_name] = graph.channel_dims[input_name]
    else:
        print('add resize deconv operator')
        factor = 2
        node_name = node.name
        input_name = str(node.inputs[0])
        output_name = str(node.outputs[0])
        # input_shape = graph.shape_dict[input_name]
        # channels = input_shape[1]
        channels = graph.channel_dims[input_name]
        pad = int(math.ceil((factor - 1) / 2.))
        layer = myf("Deconvolution",
                    node_name, [input_name], [output_name],
                    convolution_param=dict(
                        num_output=channels,
                        kernel_size=factor,
                        stride=factor,
                        group=channels,
                        bias_term=False,
                    ))
        graph.channel_dims[output_name] = graph.channel_dims[input_name]
    return layer
Example #10
0
def _convert_conv(node, graph, err):
    weight_name = node.inputs[1]
    input_name = str(node.inputs[0])
    output_name = str(node.outputs[0])
    node_name = node.name
    W = None
    if weight_name in node.input_tensors:
        W = node.input_tensors[weight_name]
    else:
        err.missing_initializer(
            node,
            "Weight tensor: {} not found in the graph initializer".format(
                weight_name, ))
    is_deconv = False
    if node.op_type.endswith("Transpose"):
        is_deconv = True
    bias_flag = False
    bias = None
    if len(node.inputs) > 2:
        bias = node.input_tensors[node.inputs[2]]
        bias_flag = True
    dilations = node.attrs.get("dilations", [1, 1])
    # groups = 1
    groups = node.attrs.get("group", 1)
    kernel_shape = node.attrs["kernel_shape"]
    pads = node.attrs.get("pads", [0, 0, 0, 0])
    strides = node.attrs["strides"]

    if groups > 1:
        layer = myf("Convolution",
                    node_name, [input_name], [output_name],
                    kernel_h=kernel_shape[0],
                    kernel_w=kernel_shape[1],
                    stride_h=strides[0],
                    stride_w=strides[1],
                    group=groups,
                    pad_h=pads[0],
                    pad_w=pads[1],
                    num_output=W.shape[0],
                    dilation=dilations[0],
                    bias_term=bias_flag,
                    engine=1)
    else:
        layer = myf("Convolution",
                    node_name, [input_name], [output_name],
                    kernel_h=kernel_shape[0],
                    kernel_w=kernel_shape[1],
                    stride_h=strides[0],
                    stride_w=strides[1],
                    group=groups,
                    pad_h=pads[0],
                    pad_w=pads[1],
                    num_output=W.shape[0],
                    dilation=dilations[0],
                    bias_term=bias_flag)

    graph.channel_dims[output_name] = W.shape[0]
    return layer
Example #11
0
def _convert_Mul(node, graph, err):
    input_name_list = [str(i) for i in node.inputs]
    output_name = str(node.outputs[0])
    node_name = node.name
    print('Mul:', node.name, node.attrs, input_name_list, output_name)
    if len(node.attrs) == 0:
        assert len(node.input_tensors) == 1
        assert len(input_name_list) == 2
        inp_tensor = node.input_tensors[input_name_list[1]]
        scale_value = float(inp_tensor)
        print(scale_value)
        layer = myf("Scale",
                    node_name, [input_name_list[0]], [output_name],
                    bias_term=False,
                    scale_param=dict(filler=dict(value=scale_value),
                                     bias_term=False))
        return layer
        #layer = myf("Reshape", node_name, [input_name], [output_name], reshape_param = dict(shape=dict(dim=list(shape))))
    #print(len(node.input_tensors))

    # max_dim = 0
    # for name in input_name_list:
    #     if graph.channel_dims[name]>max_dim:
    #         max_dim = graph.channel_dims[name]

    if 'broadcast' in node.attrs:
        if node.attrs['broadcast'] == 1:
            input_node_number = len(input_name_list)
            if input_node_number != 2:
                return err.unsupported_op_configuration(
                    node, "Broadcast Mul must has 2 input, not {}".format(
                        input_node_number))
            axis = node.attrs['axis']
            flat_layer = myf("Flatten", node_name + '_flat',
                             [input_name_list[1]], [output_name + '_flat'])
            layer = myf("Scale",
                        node_name, [input_name_list[0], output_name + '_flat'],
                        [output_name],
                        bias_term=False,
                        axis=axis)
            graph.channel_dims[output_name] = graph.channel_dims[
                input_name_list[0]]
            return flat_layer, layer

    layer = myf("Eltwise",
                node_name,
                input_name_list, [output_name],
                operation=P.Eltwise.PROD)
    graph.channel_dims[output_name] = graph.channel_dims[input_name_list[0]]
    return layer
Example #12
0
def _convert_Softmax(node, graph, err):
    node_name = node.name
    input_name_list = [str(i) for i in node.inputs]
    output_name = str(node.outputs[0])
    axis = node.attrs.get("axis", 1)
    layer = myf('Softmax', node_name, input_name_list, [output_name], axis=axis)
    return layer
def _convert_gemm(node, graph, err):
    node_name = node.name
    input_name = str(node.inputs[0])
    output_name = str(node.outputs[0])
    weight_name = node.inputs[1]
    if weight_name in node.input_tensors:
        W = node.input_tensors[weight_name]
    else:
        err.missing_initializer(node,
                                "Weight tensor: {} not found in the graph initializer".format(weight_name, ))
        return

    if node.attrs["broadcast"] != 1 or node.attrs["transB"] != 1:
        return err.unsupported_op_configuration(node, "Gemm is supported only for inner_product layer")

    b = None
    bias_flag = False
    if len(node.inputs) > 2:
        b = node.input_tensors[node.inputs[2]]

    if len(W.shape) != 2 or (b is not None and len(b.shape) != 1):
        return err.unsupported_op_configuration(node, "Gemm is supported only for inner_product layer")
    if b is not None:
        bias_flag = True
        if W.shape[0] != b.shape[0]:
            return err.unsupported_op_configuration(node,
                                                    "Gemm is supported only for inner_product layer")

    layer = myf("InnerProduct", node_name, [input_name], [output_name], num_output=W.shape[0], bias_term=bias_flag)
    graph.channel_dims[output_name] = W.shape[0]

    return layer
def _convert_conv_transpose(node, graph, err):
    input_name = str(node.inputs[0])
    output_name = str(node.outputs[0])
    node_name = node.name
    weight_name = node.inputs[1]
    W = None
    if weight_name in node.input_tensors:
        W = node.input_tensors[weight_name]
    else:
        err.missing_initializer(node,
                                "Weight tensor: {} not found in the graph initializer".format(weight_name, ))
    bias_flag = False
    bias = None
    if len(node.inputs) > 2:
        bias = node.input_tensors[node.inputs[2]]
        bias_flag = True
    dilations = node.attrs.get("dilations", [1, 1])
    # groups = 1
    groups = node.attrs.get("group", 1)
    kernel_shape = node.attrs["kernel_shape"]
    pads = node.attrs.get("pads", [0, 0, 0, 0])
    strides = node.attrs["strides"]

    layer = myf('Deconvolution', node_name, [input_name], [output_name],
                convolution_param=dict(
                    num_output=W.shape[1],
                    kernel_h=kernel_shape[0], kernel_w=kernel_shape[1],
                    stride_h=strides[0], stride_w=strides[1],
                    group=groups,
                    pad_h=pads[0], pad_w=pads[1],
                    bias_term=bias_flag,
                ))

    graph.channel_dims[output_name] = W.shape[1]
    return layer
def _convert_dropout(node, graph, err):
    node_name = node.name
    input_name = str(node.inputs[0])
    output_name = str(node.outputs[0])
    ratio = node.attrs.get('ratio', 0.5)
    layer = myf("Dropout", node_name, [input_name], [output_name], dropout_ratio=ratio)
    graph.channel_dims[output_name] = graph.channel_dims[input_name]
    return layer
Example #16
0
def _convert_ArgMax(node, graph, err):
    print("TODO", node.attrs)
    input_name = str(node.inputs[0])
    output_name = str(node.outputs[0])
    node_name = node.name
    layer = myf('ArgMax', node_name, [input_name], [output_name])
    graph.channel_dims[output_name] = graph.channel_dims[input_name]
    return layer
def make_input(input):
    name = input[0]
    output = input[0]
    output = [output]
    shape = input[2]
    shape = list(shape)
    input_layer = myf("Input", name, [], output, input_param=dict(shape=dict(dim=shape)))
    return input_layer
Example #18
0
def _convert_upsample(node, graph, err):
    if 'height_scale' in node.attrs.keys():
        factor = int(node.attrs['height_scale'])
        assert factor == int(node.attrs['width_scale'])
    elif 'scales' in node.attrs.keys():
        scales = node.attrs['scales']
        assert len(scales) == 4
        assert scales[2] == scales[3]
        factor = int(scales[2])
    else:
        raise ValueError('Could not find scale values in Upsample node: %s' %
                         str(node.attrs))
    node_name = node.name
    input_name = str(node.inputs[0])
    output_name = str(node.outputs[0])
    channels = graph.channel_dims[input_name]
    pad = int(math.ceil((factor - 1) / 2.))
    mode = node.attrs["mode"]
    # print(mode)
    # exit(0)
    #https://github.com/pytorch/pytorch/issues/6900
    if mode == b"bilinear":
        layer = myf("Deconvolution",
                    node_name, [input_name], [output_name],
                    convolution_param=dict(
                        num_output=channels,
                        kernel_size=2 * factor - factor % 2,
                        stride=factor,
                        pad=pad,
                        group=channels,
                        bias_term=False,
                        weight_filler=dict(type="bilinear")))
    else:
        layer = myf("Deconvolution",
                    node_name, [input_name], [output_name],
                    convolution_param=dict(
                        num_output=channels,
                        kernel_size=factor,
                        stride=factor,
                        group=channels,
                        bias_term=False,
                    ))

    graph.channel_dims[output_name] = graph.channel_dims[input_name]
    return layer
Example #19
0
def _convert_Reorg(graph, node_name, input_name, output_name):

    layer = myf('Reorg',
                node_name, [input_name], [output_name],
                reorg_param=dict(
                    stride=2,
                    reverse=False,
                ))
    graph.channel_dims[output_name] = graph.channel_dims[input_name]
    return layer
Example #20
0
def _convert_relu(node, graph, err):
    input_name = str(node.inputs[0])
    output_name = str(node.outputs[0])
    name = str(node.name)
    if input_name == output_name:
        inplace = True
    else:
        inplace = False
    layer = myf("ReLU", name, [input_name], [output_name], in_place=inplace)
    graph.channel_dims[output_name] = graph.channel_dims[input_name]
    return layer
Example #21
0
def _convert_Flatten(node, graph, err):
    node_name = node.name
    input_name = str(node.inputs[0])
    output_name = str(node.outputs[0])
    if input_name == output_name:
        inplace = True
    else:
        inplace = False
    layer = myf("Flatten", node_name, [input_name], [output_name], in_place=inplace)
    # graph.channel_dims[output_name] = shape[1]
    return layer
Example #22
0
def _convert_Reshape(node, graph, err):
    node_name = node.name
    input_name = str(node.inputs[0])
    output_name = str(node.outputs[0])
    # if len(node.inputs)==1:
    #     shape = tuple(node.attrs.get('shape', ()))
    # else:
    #     print()
    # shape = tuple(node.input_tensors[node.inputs[1]])
    # for _node in graph.nodes:
    #     if output_name in _node.inputs:
    #         shape =
    size_name = []
    for key in node.input_tensors.keys():
        size_name.append(key)
    shape = node.input_tensors
    # if shape == ():

    if input_name == output_name:
        inplace = True
    else:
        inplace = False
    if len(shape) == 2:
        layer = myf("Flatten",
                    node_name, [input_name], [output_name],
                    in_place=inplace)
        print(shape[size_name[1]])
        if shape[size_name[1]] < 0:
            graph.channel_dims[output_name] = graph.shape_dict[input_name][1]
        else:
            graph.channel_dims[output_name] = shape[size_name[1]]
        return layer
    elif len(shape) == 4:
        graph.channel_dims[output_name] = shape[1]
        layer = myf("Reshape",
                    node_name, [input_name], [output_name],
                    reshape_param=dict(shape=dict(dim=list(shape))))
        return layer
    else:
        return err.unsupported_op_configuration(
            node, "Reshape dimention number shall be 2 or 4")
Example #23
0
def _convert_softmax(node, graph, err):
    #print(node, graph)
    node_name = node.name
    input_name = str(node.inputs[0])
    output_name = str(node.outputs[0])
    #print(node.attrs, node_name, input_name, output_name)
    layer = myf("Softmax",
                node_name, [input_name], [output_name],
                softmax_param=dict(axis=node.attrs['axis']))

    graph.channel_dims[output_name] = graph.channel_dims[input_name]
    return layer
Example #24
0
def _convert_transpose(node, graph, err):
    #print(node, graph)
    node_name = node.name
    input_name = str(node.inputs[0])
    output_name = str(node.outputs[0])
    #print(node.attrs, node_name, input_name, output_name)
    layer = myf("Permute",
                node_name, [input_name], [output_name],
                permute_param=dict(order=node.attrs['perm']))

    graph.channel_dims[output_name] = graph.channel_dims[input_name]
    return layer
Example #25
0
def _convert_PassThrough(node_name, input_name, output_name, input_channel,
                         block_height, block_width):  # 反卷积

    layer = myf('PassThrough',
                node_name, [input_name], [output_name],
                pass_through_param=dict(
                    num_output=input_channel * block_height * block_width,
                    block_height=block_height,
                    block_width=block_width,
                ))

    return layer
Example #26
0
def _convert_Unsqueeze(node, graph, err):
    #return err.unsupported_op_configuration(node, "Unsupport Unsqueeze in caffe")
    print("TODO", node.attrs)
    node_name = node.name
    input_name = str(node.inputs[0])
    output_name = str(node.outputs[0])
    input_shape = graph.shape_dict[node.inputs[0]]
    shape = [1] + list(input_shape)
    layer = myf("Reshape",
                node_name, [input_name], [output_name],
                reshape_param=dict(shape=dict(dim=shape)))
    return layer
Example #27
0
def make_input(input):
    name = input[0]
    if name[0].isnumeric():
        name = 'node_{}'.format(name)
    output = input[0]
    output = [output]
    shape = input[2]
    shape = list(shape)
    input_layer = myf("Input",
                      name, [],
                      output,
                      input_param=dict(shape=dict(dim=shape)))
    return input_layer
Example #28
0
def _convert_leakyrelu(node,graph,err):
    input_name = str(node.inputs[0])
    output_name = str(node.outputs[0])
    name = str(node.name)
    slope = node.attrs["alpha"]

    if input_name==output_name:
        inplace = True
    else:
        inplace = False
    layer = myf("ReLU",name,[input_name],[output_name],negative_slope=slope,in_place=inplace)
    graph.channel_dims[output_name] = graph.channel_dims[input_name]
    return layer
Example #29
0
def _convert_resize(node,graph,err):
    #print(node, graph)
    node_name = node.name
    input_name = str(node.inputs[0])
    output_name = str(node.outputs[0])
    #print(node.attrs, node_name, input_name, output_name)
    layer = myf("Upsample", node_name, [input_name], [output_name],
                upsample_param=dict(
                    scale = 2
                ))

    graph.channel_dims[output_name] = graph.channel_dims[input_name]
    return layer
Example #30
0
def _convert_conv_slice(node, graph, err):
    input_name = str(node.inputs[0])
    output_name = str(node.outputs[0])
    node_name = node.name
    axes = node.attrs.get('axes', [])
    #graph.channel_dims[input_name] = graph.shape_dict[input_name][1]
    #channels = graph.shape_dict[input_name][1]
    #channels = graph.channel_dims[input_name]
    channels = graph.shape_dict[input_name][1]

    if len(axes) != 1:
        return err.unsupported_op_configuration(
            node, "Only single axis Slice is supported now")
    starts = node.attrs['starts']
    ends = node.attrs['ends']
    axes = node.attrs.get('axes', [])
    start = starts[0]
    end = ends[0]
    valid_pts = []
    for pt in [start, end]:
        if pt is not None and pt != 0 and pt != channels:
            valid_pts.append(pt)
    if start == 0:
        output_name_list = [output_name, str(output_name) + "slice_another"]
    else:
        output_name_list = [str(output_name) + "slice_another", output_name]
    if len(axes) == 0: axes = range(len(starts))
    if len(axes) == 1:
        if axes[0] == 0:
            axis = 'batch'
        elif axes[0] == 1:
            axis = 'channel'
        elif axes[0] == 2:
            axis = 'height'
        elif axes[0] == 3:
            axis = 'width'
        else:
            return err.unsupported_op_configuration(
                node, "Slice is supported only along H, W or C dimensions")
    else:
        return err.unsupported_op_configuration(
            node,
            "Slice is supported only along one axis for 3D or 4D Tensors")
    layer = myf('Slice',
                node_name, [input_name],
                output_name_list,
                slice_dim=axes[0],
                slice_point=valid_pts)
    graph.channel_dims[output_name_list[0]] = valid_pts[0]
    graph.channel_dims[output_name_list[-1]] = channels - valid_pts[-1]
    return layer