Exemple #1
0
def convertToCaffe(graph,
                   prototxt_save_path,
                   caffemodel_save_path,
                   convert_leaky_relu,
                   max_inputs=-1):

    if convert_leaky_relu:
        cvt._ONNX_NODE_REGISTRY['LeakyRelu'] = cvt._ONNX_NODE_REGISTRY['Relu']
        wlr._ONNX_NODE_REGISTRY['LeakyRelu'] = wlr._ONNX_NODE_REGISTRY['Relu']

    exist_edges = []
    layers = []
    exist_nodes = []
    err = ErrorHandling()
    if max_inputs > 0:
        graph.inputs = graph.inputs[:max_inputs]

    for i in graph.inputs:
        edge_name = i[0]
        input_layer = cvt.make_input(i)
        layers.append(input_layer)
        exist_edges.append(i[0])
        dims_input = graph.shape_dict[edge_name]
        for dim in dims_input:
            assert dim > 0, 'Please export the ONNX graph without dynamic shapes.'
        graph.channel_dims[edge_name] = dims_input[1]

    for id, node in enumerate(graph.nodes):
        # node_name = node.name
        # if node.name[0].isnumeric():
        node.name = name_to_alpha(node.name)
        for idx in range(len(node.inputs)):
            node.inputs[idx] = name_to_alpha(node.inputs[idx])
        for idx in range(len(node.outputs)):
            node.outputs[idx] = name_to_alpha(node.outputs[idx])
        # import pdb; pdb.set_trace()
        op_type = node.op_type
        inputs = node.inputs
        inputs_tensor = node.input_tensors
        input_non_exist_flag = False

        for inp in inputs:
            if inp not in exist_edges and inp not in inputs_tensor:
                input_non_exist_flag = True
                break
        if input_non_exist_flag:
            continue
        if op_type not in cvt._ONNX_NODE_REGISTRY:
            err.unsupported_op(node)
            continue
        converter_fn = cvt._ONNX_NODE_REGISTRY[op_type]
        layer = converter_fn(node, graph, err)
        if type(layer) == tuple:
            for l in layer:
                layers.append(l)
        else:
            layers.append(layer)
        outs = node.outputs
        for out in outs:
            exist_edges.append(out)

    net = caffe_pb2.NetParameter()
    for id, layer in enumerate(layers):
        layers[id] = layer._to_proto()
    net.layer.extend(layers)

    with open(prototxt_save_path, 'w') as f:
        print(net, file=f)

    caffe.set_mode_cpu()
    deploy = prototxt_save_path
    net = caffe.Net(deploy, caffe.TEST)

    for id, node in enumerate(graph.nodes):
        node_name = node.name
        op_type = node.op_type
        inputs = node.inputs
        inputs_tensor = node.input_tensors
        input_non_exist_flag = False
        if op_type not in wlr._ONNX_NODE_REGISTRY:
            err.unsupported_op(node)
            continue
        converter_fn = wlr._ONNX_NODE_REGISTRY[op_type]
        converter_fn(net, node, graph, err)

    net.save(caffemodel_save_path)
    return net
Exemple #2
0
def convertToCaffe(graph,
                   prototxt_save_path,
                   caffe_model_save_path,
                   exis_focus=False,
                   focus_concat_name=None,
                   focus_conv_name=None):  # 如果有 focus 层,自己添加参数
    exist_edges = []
    layers = []
    exist_nodes = []
    err = ErrorHandling()
    for i in graph.inputs:  # input 就是可视化中,第一个灰色东西,显示输入名 和 输入 shape,不是 op.
        edge_name = i[0]  # 一般是 images, data, input 这种名字

        input_layer = cvt.make_input(i)  # 生成 prototxt 风格的input

        layers.append(input_layer)
        exist_edges.append(i[0])
        graph.channel_dims[edge_name] = graph.shape_dict[edge_name][
            1]  # shape_dict[edge_name] 如 (1, 3, 112, 112) 这种

    for id, node in enumerate(graph.nodes):

        node_name = node.name  # node name 参数,就是节点在当前模型中的名字

        op_type = node.op_type  # op 类型,卷积, relu 这种

        if exis_focus:
            if op_type == "Slice":
                continue
            if node_name == focus_concat_name:
                converter_fn = cvt._ONNX_NODE_REGISTRY["Reorg"]
                output_name = str(node.outputs[0])
                layer = converter_fn(graph, "focus", "images", output_name)
                #layers.append(layer)
                #exist_edges.append("47")
                if type(layer) == tuple:
                    for l in layer:  # 一般是 bn 层, caffe 中的 bn 是分为两部分, BN 和 Scale 层
                        #  print("layer.name = ", l.layer_name)
                        layers.append(l)
                else:
                    layers.append(layer)
                outs = node.outputs  # 节点输出名
                for out in outs:
                    exist_edges.append(out)
                continue

        #print(node_name)
        inputs = node.inputs  # 列表,由可视化中 input 一栏中 name 字段组成,顺序同可视化界面一致。如果某个键有参数数组,则也会在 input_tensors 存在

        inputs_tensor = node.input_tensors  # 字典,可视化界面中,如果有参数数组就是这里面的值,键也在input 中, 有多少参数数组就有多少键值

        input_non_exist_flag = False

        for inp in inputs:  # input 组成元素有两种,一是上层节点 name,二是本层参数 name
            if inp not in exist_edges and inp not in inputs_tensor:  # 筛除,正常节点判断条件是不会成立的
                input_non_exist_flag = True
                break
        if input_non_exist_flag:
            continue

        if op_type not in cvt._ONNX_NODE_REGISTRY:  # 如果没在 op 字典中,报错
            err.unsupported_op(node)
            continue
        converter_fn = cvt._ONNX_NODE_REGISTRY[op_type]  # 相应转换函数
        layer = converter_fn(node, graph, err)
        if type(layer) == tuple:
            for l in layer:  # 一般是 bn 层, caffe 中的 bn 是分为两部分, BN 和 Scale 层
                #  print("layer.name = ", l.layer_name)
                layers.append(l)
        else:
            layers.append(layer)
        outs = node.outputs  # 节点输出名
        for out in outs:
            exist_edges.append(out)  # 储存输出节点,方便下面使用

    net = caffe_pb2.NetParameter()  # caffe 模型结构
    for id, layer in enumerate(layers):

        layers[id] = layer._to_proto()  # 转为 proto 风格?
        print(layers[id])
    net.layer.extend(layers)  # 将层名加入网络模型

    with open(prototxt_save_path, 'w') as f:  # 形成 prototxt 文件
        print(net, file=f)
    # ------ 到此 prototxt 文件转换结束 ------
    # ------ 下面转换 caffemodel 文件 ------
    caffe.set_mode_cpu()
    deploy = prototxt_save_path
    net = caffe.Net(deploy, caffe.TEST)

    for id, node in enumerate(graph.nodes):
        node_name = node.name
        op_type = node.op_type
        inputs = node.inputs
        inputs_tensor = node.input_tensors
        input_non_exist_flag = False
        if exis_focus:
            if op_type == "Slice":
                continue

        if op_type not in wlr._ONNX_NODE_REGISTRY:
            err.unsupported_op(node)
            continue
        converter_fn = wlr._ONNX_NODE_REGISTRY[op_type]
        if node_name == focus_conv_name:
            converter_fn(net, node, graph, err, pass_through=1)
        else:
            converter_fn(net, node, graph, err)  # 复制模型参数

    net.save(caffe_model_save_path)  # 保存模型
    return net
Exemple #3
0
def convertToCaffe(graph, prototxt_save_path, caffe_model_save_path):
    exist_edges = []
    layers = []
    exist_nodes = []
    err = ErrorHandling()
    for i in graph.inputs:
        edge_name = i[0]
        input_layer = cvt.make_input(i)
        layers.append(input_layer)
        exist_edges.append(i[0])
        graph.channel_dims[edge_name] = graph.shape_dict[edge_name][1]


    for id, node in enumerate(graph.nodes):
        node_name = node.name
        op_type = node.op_type
        inputs = node.inputs
        inputs_tensor = node.input_tensors
        input_non_exist_flag = False

        for inp in inputs:
            if inp not in exist_edges and inp not in inputs_tensor:
                input_non_exist_flag = True
                break
        if input_non_exist_flag:
            continue

        if op_type not in cvt._ONNX_NODE_REGISTRY:
            err.unsupported_op(node)
            continue
        converter_fn = cvt._ONNX_NODE_REGISTRY[op_type]
        layer = converter_fn(node,graph,err)
        if type(layer)==tuple:
            for l in layer:
                layers.append(l)
        else:
            layers.append(layer)
        outs = node.outputs
        for out in outs:
            exist_edges.append(out)

    net = caffe_pb2.NetParameter()
    for id,layer in enumerate(layers):
        layers[id] = layer._to_proto()
    net.layer.extend(layers)

    with open(prototxt_save_path, 'w') as f:
        print(net,file=f)

    caffe.set_mode_cpu()
    deploy = prototxt_save_path
    net = caffe.Net(deploy,
                    caffe.TEST)

    for id, node in enumerate(graph.nodes):
        node_name = node.name
        op_type = node.op_type
        inputs = node.inputs
        inputs_tensor = node.input_tensors
        input_non_exist_flag = False
        if op_type not in wlr._ONNX_NODE_REGISTRY:
            err.unsupported_op(node)
            continue
        converter_fn = wlr._ONNX_NODE_REGISTRY[op_type]
        converter_fn(net, node, graph, err)

    net.save(caffe_model_save_path)
    return net
Exemple #4
0
def convertToCaffe(graph, prototxt_save_path, caffe_model_save_path):

    exist_edges = []
    layers = []
    exist_nodes = []
    err = ErrorHandling()
    for i in graph.inputs:
        edge_name = i[0]
        input_layer = cvt.make_input(i)
        layers.append(input_layer)
        exist_edges.append(i[0])
        graph.channel_dims[edge_name] = graph.shape_dict[edge_name][1]

    slice_layers = []
    for id, node in enumerate(graph.nodes):
        node_name = node.name
        print('node name: ', node_name)
        op_type = node.op_type
        inputs = node.inputs
        inputs_tensor = node.input_tensors
        input_non_exist_flag = False

        for inp in inputs:
            if inp not in exist_edges and inp not in inputs_tensor:
                input_non_exist_flag = True
                break
        if input_non_exist_flag:
            continue

        if op_type not in cvt._ONNX_NODE_REGISTRY:
            err.unsupported_op(node)
            continue
        converter_fn = cvt._ONNX_NODE_REGISTRY[op_type]
        layer = converter_fn(node,graph,err)

        # merge slice
        if op_type == 'Slice' \
                and (len(slice_layers) == 0 or (slice_layers[0].inputs)[0] == layer.inputs[0]) \
                and graph.nodes[id+1].op_type == 'Slice' and id != len(graph.nodes)-1:
            slice_layers.append(layer)
            continue
        elif op_type == 'Slice':
            slice_layers.append(layer)
            slice_points = []
            slice_outputs = []
            name = ''
            for slice_layer in slice_layers:
                name += slice_layer.layer_name + '-'
                slice_points.append(slice_layer.params['slice_point'] + (0 if len(slice_points) == 0 else slice_points[-1]))
                slice_outputs.extend(slice_layer.outputs)
            layer = slice_layers[0]
            layer.layer_name = name[:-1]
            layer.params['slice_point'] = slice_points[:-1]
            layer.outputs = slice_outputs
            node.outputs = slice_outputs

        if type(layer)==tuple:
            for l in layer:
                layers.append(l)
        else:
            layers.append(layer)
        outs = node.outputs
        for out in outs:
            exist_edges.append(out)
    net = caffe_pb2.NetParameter()
    for id,layer in enumerate(layers):
        layers[id] = layer._to_proto()
    net.layer.extend(layers)

    with open(prototxt_save_path, 'w') as f:
        print(net,file=f)

    caffe.set_mode_cpu()
    deploy = prototxt_save_path
    net = caffe.Net(deploy,
                    caffe.TEST)

    for id, node in enumerate(graph.nodes):
        node_name = node.name
        op_type = node.op_type
        inputs = node.inputs
        inputs_tensor = node.input_tensors
        input_non_exist_flag = False
        if op_type not in wlr._ONNX_NODE_REGISTRY:
            err.unsupported_op(node)
            continue
        converter_fn = wlr._ONNX_NODE_REGISTRY[op_type]
        converter_fn(net, node, graph, err)

    net.save(caffe_model_save_path)
    return net