Beispiel #1
0
def runPytorch(img):
    torchModel = MobileFaceNet_DEX_c3() #MobileFaceNet_DEX_c3() # MobileFaceNetVerifyAgeGender() #IR_SE_FaceNet() #
    model_dict = torchModel.state_dict()
    torchWeights = torch.load(modelFileName, map_location=lambda storage, loc: storage)
    updated_dict, match_layers, mismatch_layers = weight_filler(torchWeights, model_dict)
    print("The mismatch layers %s", mismatch_layers)
    model_dict.update(updated_dict)
    torchModel.load_state_dict(model_dict)
    torchModel.eval().cpu()
    #img = img[0]
    #imgs = np.array([img, img,img, img], dtype=np.float32)
    y = torchModel.forward(torch.from_numpy(img).cpu())
    print(y.abs().sum())
    onxFileName  = "nameAgeGenderFaceBlur.onnx"
    dummy_input = torch.randn(4, 3, modelWidthHeight, modelWidthHeight)
    torch.onnx.export(torchModel.cpu(), dummy_input.cpu(), onxFileName, verbose=True)
    torch.onnx.in
    om = onnx.load(onxFileName)
    om = infer_shapes(om)
    om = optimize(om)
    onnx.save(om, onxFileName)
    a = om.graph
    #import pdb
    #pdb.set_trace()
    helper.printable_graph(a)
    v = a.value_info
Beispiel #2
0
def onnx_check():
    """对生成的onnx进行检查
    参考:https://github.com/onnx/tutorials/blob/master/tutorials/CorrectnessVerificationAndPerformanceComparison.ipynb
    """
    onnx_model = onnx.load("/home/ubuntu/mytrain/onnx_yolov3/yolov3_coco.onnx")
    print(helper.printable_graph(onnx_model.graph))  # 把onnx先打印出来看看
    onnx.checker.check_model(onnx_model)    # 报错onnx.onnx_cpp2py_export.checker.ValidationError: Node (086_upsample) has input size 2 not in range [min=3, max=4].
Beispiel #3
0
    def test_initializer_with_matching_graph_input(self):  # type: () -> None
        add = helper.make_node("Add", ["X", "Y_Initializer"], ["Z"])
        value_info = [
            helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1])
        ]

        graph = helper.make_graph(
            [add],
            "test",
            [
                helper.make_tensor_value_info("X", TensorProto.FLOAT, [1]),
                helper.make_tensor_value_info("Y_Initializer",
                                              TensorProto.FLOAT, [1])
            ],  # inputs
            [helper.make_tensor_value_info("Z", TensorProto.FLOAT, [1])
             ],  # outputs
            [helper.make_tensor("Y_Initializer", TensorProto.FLOAT, [1], [1])
             ],  # initializers
            doc_string=None,
            value_info=value_info)

        graph_str = helper.printable_graph(graph)
        self.assertTrue(
            ''') optional inputs with matching initializers (
  %Y_Initializer[FLOAT, 1]''' in graph_str, graph_str)
Beispiel #4
0
    def build_onnx_graph(
            self,
            layer_configs,
            weights_file_path,
            verbose=True):
        """Iterate over all layer configs (parsed from the DarkNet
        representation of YOLO), create an ONNX graph, populate it with
        weights from the weights file and return the graph definition.

        Keyword arguments:
        layer_configs -- an OrderedDict object with all parsed layers' configurations
        weights_file_path -- location of the weights file
        verbose -- toggles if the graph is printed after creation (default: True)
        """
        for layer_name in layer_configs.keys():
            layer_dict = layer_configs[layer_name]
            major_node_specs = self._make_onnx_node(layer_name, layer_dict)
            if major_node_specs.name is not None:
                self.major_node_specs.append(major_node_specs)
        # remove dummy 'route' and 'yolo' nodes
        self.major_node_specs = [node for node in self.major_node_specs
                                      if 'dummy' not in node.name]
        outputs = list()
        for tensor_name in self.output_tensors.keys():
            output_dims = [self.batch_size, ] + \
                self.output_tensors[tensor_name]
            output_tensor = helper.make_tensor_value_info(
                tensor_name, TensorProto.FLOAT, output_dims)
            outputs.append(output_tensor)
        inputs = [self.input_tensor]
        weight_loader = WeightLoader(weights_file_path)
        initializer = list()
        # If a layer has parameters, add them to the initializer and input lists.
        for layer_name in self.param_dict.keys():
            _, layer_type = layer_name.split('_', 1)
            params = self.param_dict[layer_name]
            if layer_type == 'convolutional':
                #print('%s  ' % layer_name, end='')
                initializer_layer, inputs_layer = weight_loader.load_conv_weights(
                    params)
                initializer.extend(initializer_layer)
                inputs.extend(inputs_layer)
            elif layer_type == 'upsample':
                initializer_layer, inputs_layer = weight_loader.load_upsample_scales(
                    params)
                initializer.extend(initializer_layer)
                inputs.extend(inputs_layer)
        del weight_loader
        self.graph_def = helper.make_graph(
            nodes=self._nodes,
            name=self.model_name,
            inputs=inputs,
            outputs=outputs,
            initializer=initializer
        )
        if verbose:
            print(helper.printable_graph(self.graph_def))
        model_def = helper.make_model(self.graph_def,
                                      producer_name='NVIDIA TensorRT sample')
        return model_def
Beispiel #5
0
def main():
    args = get_args()

    with open(args.input, "rb") as f:
        data = f.read()
        model = ModelProto()
        model.ParseFromString(data)

    if args.check:
        onnx.checker.check_model(model)

    if args.stats:
        ops = collections.Counter()
        for node in model.graph.node:
            ops[node.op_type] += 1
        print(ops, "\n\n")

    if args.meta:
        fields = [
            "ir_version", "producer_name", "producer_version", "name",
            "opset_import"
        ]
        for name in fields:
            value = getattr(model, name, None)
            if value:
                print("{} = {}".format(name, value))
        for i in model.metadata_props:
            print("meta.{} = {}", i.key, i.value)

    print(helper.printable_graph(model.graph))

    if args.pbtxt:
        with open(args.pbtxt, "w") as f:
            f.write(str(model.graph))
    def build_onnx_graph(self, layer_configs, weights_file_path, verbose=True):
        """Iterate over all layer configs (parsed from the DarkNet representation
        of YOLOv3-608), create an ONNX graph, populate it with weights from the weights
        file and return the graph definition.

        Keyword arguments:
        layer_configs -- an OrderedDict object with all parsed layers' configurations
        weights_file_path -- location of the weights file
        verbose -- toggles if the graph is printed after creation (default: True)
        """
        for layer_name in layer_configs.keys():
            layer_dict = layer_configs[layer_name]
            major_node_specs = self._make_onnx_node(layer_name, layer_dict)
            if major_node_specs.name is not None:
                self.major_node_specs.append(major_node_specs)

        outputs = self.ouput_tensors
        # for tensor_name in self.output_tensors.keys():
        #     output_dims = [self.batch_size, ] + \
        #         self.output_tensors[tensor_name]
        #     output_tensor = helper.make_tensor_value_info(
        #         tensor_name, TensorProto.FLOAT, output_dims)
        #     outputs.append(output_tensor)
        inputs = [self.input_tensor]
        weight_loader = WeightLoader(weights_file_path)
        initializer = list()
        for layer_name in self.param_dict.keys():
            _, layer_type = layer_name.split('_', 1)
            if layer_type == 'convolutional':
                conv_params = self.param_dict[layer_name]
                initializer_layer, inputs_layer = weight_loader.load_conv_weights(
                    conv_params)
            elif layer_type == 'upsample':
                upsample_params = self.param_dict[layer_name]
                initializer_layer = [
                    helper.make_tensor(upsample_params['name'],
                                       TensorProto.FLOAT, [4],
                                       upsample_params['param'])
                ]
                inputs_layer = [
                    helper.make_tensor_value_info(upsample_params['name'],
                                                  TensorProto.FLOAT, [4])
                ]
            else:
                raise Exception("error")
            initializer.extend(initializer_layer)
            inputs.extend(inputs_layer)
        del weight_loader
        self.graph_def = helper.make_graph(nodes=self._nodes,
                                           name='YOLOv3-608',
                                           inputs=inputs,
                                           outputs=outputs,
                                           initializer=initializer)
        if verbose:
            print(helper.printable_graph(self.graph_def))
        model_def = helper.make_model(
            self.graph_def, producer_name='https://github.com/CaoWGG')
        return model_def
Beispiel #7
0
    def build_onnx_graph(self, layer_configs, weights_file_path, verbose=True):
        """基于所有的层配置进行迭代,创建一个ONNX graph,
            然后用下载的yolov3 权重文件进行填充,最后返回该graph定义.

        Keyword arguments:
        layer_configs -- OrderedDict对象,包含所有解析的层的配置
        weights_file_path -- 权重文件的位置
        verbose -- 是否在创建之后显示该graph(default: True)
        """

        for layer_name in layer_configs.keys():

            layer_dict = layer_configs[layer_name]
            # 读取yolov3.cfg中每一层,并将其作为onnx的节点
            major_node_specs = self._make_onnx_node(layer_name, layer_dict)
            # 如果当前为主要节点,则追加起来
            if major_node_specs.name:
                self.major_node_specs.append(major_node_specs)

        outputs = list()
        for tensor_name in self.output_tensors.keys():
            # 将输出节点进行维度扩充
            output_dims = [self.batch_size, ] + \
                self.output_tensors[tensor_name]
            # 调用onnx的helper.make_tensor_value_info构建onnx张量,此时并未填充权重
            output_tensor = helper.make_tensor_value_info(
                tensor_name, TensorProto.FLOAT, output_dims)
            outputs.append(output_tensor)

        inputs = [self.input_tensor]
        weight_loader = WeightLoader(weights_file_path)
        initializer = list()
        # self.param_dict在_make_onnx_node中已处理
        for layer_name in self.param_dict.keys():
            _, layer_type = layer_name.split('_', 1)  # 如001_convolutional
            conv_params = self.param_dict[layer_name]
            assert layer_type == 'convolutional'
            initializer_layer, inputs_layer = weight_loader.load_conv_weights(
                conv_params)
            initializer.extend(initializer_layer)
            inputs.extend(inputs_layer)
        del weight_loader

        # 调用onnx的helper.make_graph进行onnx graph的构建
        self.graph_def = helper.make_graph(nodes=self._nodes,
                                           name='YOLOv3-608',
                                           inputs=inputs,
                                           outputs=outputs,
                                           initializer=initializer)

        if verbose:
            print(helper.printable_graph(self.graph_def))

        # 调用onnx的helper.make_model进行模型的构建
        model_def = helper.make_model(self.graph_def,
                                      producer_name='NVIDIA TensorRT sample')
        return model_def
Beispiel #8
0
def main():
    args = get_args()

    with open(args.input, "rb") as f:
        data = f.read()
        model = ModelProto()
        model.ParseFromString(data)

    if args.stats:
        ops = collections.Counter()
        for node in model.graph.node:
            ops[node.op_type] += 1
        print(ops, "\n\n")

    if args.meta:
        fields = [
            "ir_version", "producer_name", "producer_version", "name",
            "opset_import"
        ]
        for name in fields:
            value = getattr(model, name, None)
            if value:
                print("{} = {}".format(name, value))
        for i in model.metadata_props:
            print("meta.{} = {}", i.key, i.value)

    print(helper.printable_graph(model.graph))

    if args.check:
        onnx.checker.check_model(model)
        inferred_model = shape_inference.infer_shapes(model)
        onnx.checker.check_model(inferred_model)

    if args.pbtxt:
        with open(args.pbtxt, "w") as f:
            f.write(str(model.graph))

    if args.dot:
        with open(args.dot, "w") as f:
            f.write("digraph graphname {\n")
            for node in model.graph.node:
                output_name = node.name
                name = node.name
                color = ""
                if node.op_type.startswith("_"):
                    color = ' color="yellow"'
                if node.op_type == "CELL":
                    color = ' color="red"'
                f.write('"{}" [label="{},{}"{}];\n'.format(
                    output_name, node.op_type, name, color))
                for input_name in node.input:
                    parts = input_name.split(":")
                    input_name = re.sub(r"^\^", "", parts[0])
                    f.write('  "{}" -> "{}";\n'.format(input_name,
                                                       output_name))
            f.write("}\n")
    def build_onnx_graph(self, layer_configs, weights_file_path, verbose=True):
        """Iterate over all layer configs (parsed from the DarkNet representation
        of YOLOv3-608), create an ONNX graph, populate it with weights from the weights
        file and return the graph definition.

        Keyword arguments:
        layer_configs -- an OrderedDict object with all parsed layers' configurations
        weights_file_path -- location of the weights file
        verbose -- toggles if the graph is printed after creation (default: True)
        """
        for layer_name in layer_configs.keys(
        ):  # step1:生成nodes(存放每一个操作,包括conv,bn,relu...), major_node_specs(存放主要节点参数, 比如一个主节点spec包括conv/bn/relu)
            layer_dict = layer_configs[layer_name]
            major_node_specs = self._make_onnx_node(layer_name, layer_dict)
            if major_node_specs.name is not None:
                self.major_node_specs.append(major_node_specs)
        outputs = list()
        for tensor_name in self.output_tensors.keys():
            output_dims = [self.batch_size, ] + \
                self.output_tensors[tensor_name]    # 从(255,19,19)-> (64,255,19,19)
            output_tensor = helper.make_tensor_value_info(
                tensor_name, TensorProto.FLOAT,
                output_dims)  # 创建了一个proto格式的输出tensor信息
            outputs.append(output_tensor)
        inputs = [self.input_tensor]  # 创建了一个proto格式的输入tensor信息
        weight_loader = WeightLoader(weights_file_path)  # 打开权重文件
        initializer = list()
        """创建initializer就是把每层的超参数变换成TensorProto形式"""
        # If a layer has parameters, add them to the initializer and input lists.
        for layer_name in self.param_dict.keys():
            _, layer_type = layer_name.split('_', 1)
            params = self.param_dict[layer_name]
            if layer_type == 'convolutional':
                initializer_layer, inputs_layer = weight_loader.load_conv_weights(
                    params)
                initializer.extend(initializer_layer)
                inputs.extend(inputs_layer)
            elif layer_type == "upsample":
                initializer_layer, inputs_layer = weight_loader.load_resize_scales(
                    params)
                initializer.extend(initializer_layer)
                inputs.extend(inputs_layer)
        del weight_loader
        """注意:这里都是传引用赋值,value_info_proto.tensor_type->tensor_type_proto, tensor_type_proto.shape->tensor_shape_proto, tensor_shape_proto."""
        """make_graph很简单,就是把前面得到的_nodes, initializer, inputs, outputs都放进去."""
        self.graph_def = helper.make_graph(nodes=self._nodes,
                                           name='YOLOv3-608',
                                           inputs=inputs,
                                           outputs=outputs,
                                           initializer=initializer)
        if verbose:
            print(helper.printable_graph(self.graph_def))
        model_def = helper.make_model(self.graph_def,
                                      producer_name='NVIDIA TensorRT sample')
        return model_def
Beispiel #10
0
    def save_model(self):
        graph_def = helper.make_graph(nodes=self.nodes,
                                      name=self.graph_name,
                                      inputs=self.inputs,
                                      outputs=self.outputs,
                                      initializer=self.initializer)
        print(helper.printable_graph(graph_def))

        model_def = helper.make_model(graph_def,
                                      producer_name='NVIDIA TensorRT sample')
        onnx.checker.check_model(model_def)
        onnx.save(model_def, self.output_file_path)
Beispiel #11
0
    def convert(self):
        onnx_file_name = "test1.onnx"
        name = "test"
        domain = "test.domain"

        #Inputs
        inputs = list()
        inputs.append(
            helper.make_tensor_value_info('X', TensorProto.FLOAT, [1]))
        #         inputs.append(helper.make_tensor_value_info('W1', TensorProto.FLOAT, [1]))
        #         inputs.append(helper.make_tensor_value_info('B1', TensorProto.FLOAT, [1]))
        #         inputs.append(helper.make_tensor_value_info('W2', TensorProto.FLOAT, [1]))
        #         inputs.append(helper.make_tensor_value_info('B2', TensorProto.FLOAT, [1]))
        #         inputs.append(helper.make_tensor_value_info('W5', TensorProto.FLOAT, [1]))
        #         inputs.append(helper.make_tensor_value_info('B5', TensorProto.FLOAT, [1]))
        #         inputs.append(helper.make_tensor_value_info('W6', TensorProto.FLOAT, [1]))
        #         inputs.append(helper.make_tensor_value_info('B6', TensorProto.FLOAT, [1]))
        #         inputs.append(helper.make_tensor_value_info('W9', TensorProto.FLOAT, [1]))
        #         inputs.append(helper.make_tensor_value_info('B9', TensorProto.FLOAT, [1]))

        #Nodes
        nodes = list()
        n = len(self.nodes)
        if n == 0:
            print("Empty Graph")
        else:
            for id, node in self.nodes.items():
                if node.status == False:
                    continue
                if len(node.inputs) == 0:
                    node.inputs.append('X')
                output = node.id
                nodes.append(
                    helper.make_node(node.type,
                                     node.inputs,
                                     node.outputs,
                                     name=name,
                                     domain=domain))

        # Outputs
        outputs = list()
        outputs.append(
            helper.make_tensor_value_info(output, TensorProto.FLOAT, [1]))

        graph = helper.make_graph(nodes, name, inputs, outputs)
        onnx_id = helper.make_opsetid(domain, 1)
        model = helper.make_model(graph,
                                  producer_name=name,
                                  opset_imports=[onnx_id])
        checker.check_model(model)
        print(helper.printable_graph(model.graph))
    def build_onnx_graph(self, layer_configs, weights_file_path, verbose=True):
        """Iterate over all layer configs (parsed from the DarkNet representation
        of YOLOv3-608), create an ONNX graph, populate it with weights from the weights
        file and return the graph definition.

        Keyword arguments:
        layer_configs -- an OrderedDict object with all parsed layers' configurations
        weights_file_path -- location of the weights file
        verbose -- toggles if the graph is printed after creation (default: True)
        """
        for layer_name in layer_configs.keys():
            layer_dict = layer_configs[layer_name]
            major_node_specs = self._make_onnx_node(layer_name, layer_dict)
            if major_node_specs.name is not None:
                self.major_node_specs.append(major_node_specs)
                self.temp_major_node_specs_for_route = self.major_node_specs  ##add by miss libra
        outputs = list()
        for tensor_name in self.output_tensors.keys():
            output_dims = [self.batch_size, ] + \
                self.output_tensors[tensor_name]
            output_tensor = helper.make_tensor_value_info(
                tensor_name, TensorProto.FLOAT, output_dims)
            outputs.append(output_tensor)
        inputs = [self.input_tensor]
        weight_loader = WeightLoader(weights_file_path)
        initializer = list()
        for layer_name in self.param_dict.keys():
            _, layer_type = layer_name.split('_', 1)
            conv_params = self.param_dict[layer_name]
            assert layer_type == 'convolutional'
            initializer_layer, inputs_layer = weight_loader.load_conv_weights(
                conv_params)
            initializer.extend(initializer_layer)
            inputs.extend(inputs_layer)
        del weight_loader
        self.graph_def = helper.make_graph(nodes=self._nodes,
                                           name='YOLOv3-608',
                                           inputs=inputs,
                                           outputs=outputs,
                                           initializer=initializer)
        if verbose:
            print(helper.printable_graph(self.graph_def))
        model_def = helper.make_model(self.graph_def,
                                      producer_name='NVIDIA TensorRT sample')
        return model_def
Beispiel #13
0
    def graph_def_to_onnx_graph(
        cls,
        graph_def,
        init_func=None,
        constants=None,
        value_info=None,
        graph_name=None,
        verbose=True,
        enforce_no_running=False,
    ):
        if value_info is None: value_info = {}
        if not isinstance(value_info, dict):
            raise ValueError(
                'Please pass value_info as a '
                    'name -> (type, shape) dictionary')

        leaf_tensors = extract_leaf_tensors(graph_def)
        initializer = extract_initializer(graph_def)

        # Check whether we have got type shape info of all input
        missing = (leaf_tensors - set(value_info.keys()) - initializer)
        if missing:
            raise RuntimeError('Could not find value info of inputs: {}'.format(
                ', '.join(missing)))

        # Check if value_info contains the types/shapes of all the blobs, in
        # which case we don't need to infer them by running the net.
        run_native_graph = False
        for op in graph_def.op:
            for name in itertools.chain(op.input, op.output):
                if name not in value_info:
                    run_native_graph = True
                    break

        ws = None

        # Get the value info of outputs and initializer
        if run_native_graph and not enforce_no_running:
            inputs = {}
            for name, (elem_type, shape) in value_info.items():
                inputs[name] = numpy.random.randn(*shape).astype(
                    mapping.TENSOR_TYPE_TO_NP_TYPE[elem_type])

            ws, outputs, initializer = native_run_graph(
                graph_def, inputs, initializer, init_func)

            for name in graph_def.output:
                output = outputs[name]
                elem_type = mapping.NP_TYPE_TO_TENSOR_TYPE[output.dtype]
                shape = output.shape
                value_info[name] = (elem_type, shape)

        if enforce_no_running:
            # In some cases(e.g. PyTorch), we had ran the graph
            # outputs had been in ``value_info`` already
            ws = _workspace.get_default_workspace()
            initializer = fetch_initializer(initializer)

        # Prepare to make the graph
        onnx_graph = GraphProto()
        onnx_graph.name = graph_name if graph_name else graph_def.name

        # Initializer should also be included in the inputs
        value_info.update({
            init.name: (init.data_type, init.dims)
                for init in initializer})

        # Add initializer
        onnx_graph.initializer.extend(initializer)

        # Add inputs
        onnx_graph.input.extend(
            make_tensor_value_info(
                name=name,
                elem_type=value_info[name][0],
                shape=value_info[name][1])
            for name in leaf_tensors)

        # Add outputs
        onnx_graph.output.extend(
            make_tensor_value_info(
                name=name,
                elem_type=value_info[name][0],
                shape=value_info[name][1])
            for name in set(graph_def.output))

        # Add constants
        if constants is not None:
            for k, v in constants.items():
                onnx_graph.initializer.extend(
                    [numpy_helper.from_array(v, name=k)])

        # Add nodes
        shapes, ssa_names, ssa_outputs = {}, {}, defaultdict(int)

        for op in graph_def.op:
            # Get the shape of inputs and outputs
            for name in itertools.chain(op.input, op.output):
                if ws and ws.HasTensor(name):
                    blob = ws.FetchTensor(name)
                    if hasattr(blob, 'shape'):
                        shapes[name] = blob.shape
                else:
                    shapes[name] = value_info[name][1]

            # SSA rewritten
            op, shapes, ssa_names, ssa_outputs = \
                cls._ssa_rewrite(op, shapes, ssa_names, ssa_outputs)

            # Try to translate op => nodes
            nodes, const_tensors = get_nodes_def(op, shapes, ws)

            # Directly convert outputs as const tensors if necessary
            if None in nodes:
                const_tensors = [
                    numpy_helper.from_array(
                        ws.FetchTensor(name), name=name)
                            for name in op.output]
            else:
                onnx_graph.node.extend(nodes)

            # Add const tensors
            if const_tensors is not None:
                onnx_graph.initializer.extend(const_tensors)
                onnx_graph.input.extend([
                    cls._extract_value_info(tensor)
                        for tensor in const_tensors])

        if verbose: print(printable_graph(onnx_graph))

        return onnx_graph
Beispiel #14
0
def onnx_pretty(g, args=None):
    """Onnx graph pretty print."""
    graph_proto = g.make_model("converted from {}".format(args.input))
    return helper.printable_graph(graph_proto.graph)
    def build_onnx_graph(self, layer_configs, weights_file_path, verbose=True):
        """Iterate over all layer configs (parsed from the DarkNet representation
        of YOLOv3-608), create an ONNX graph, populate it with weights from the weights
        file and return the graph definition.

        Keyword arguments:
        layer_configs -- an OrderedDict object with all parsed layers' configurations
        weights_file_path -- location of the weights file
        verbose -- toggles if the graph is printed after creation (default: True)
        """
        for layer_name in layer_configs.keys():
            layer_dict = layer_configs[layer_name]
            major_node_specs = self._make_onnx_node(layer_name, layer_dict)
            if major_node_specs.name is not None:
                self.major_node_specs.append(major_node_specs)

        # 参考yolov3输出 3个输出,此时将3个输出节点 通过transpose concat结合在一起!
        transposes = list()
        total_grids = 0
        for tensor_name in self.output_tensors.keys():
            grids = 1
            for i in self.output_tensors[tensor_name]:
                grids *= i
            total_grids += grids / (self.classes + 5)
            output_dims = [
                self.batch_size,
            ] + self.output_tensors[tensor_name]
            layer_name, layer_dict = tensor_name, {'output_dims': output_dims}
            transpose_name = self._make_transpose_node(layer_name, layer_dict)
            transposes.append(transpose_name)

        output_name = 'ouputs'
        route_node = helper.make_node(
            'Concat',
            axis=1,
            inputs=transposes,
            outputs=[output_name],
            name=output_name,
        )
        self._nodes.append(route_node)
        # yolov3有3个输出,因此构建3个make_tensor_value_info, 但是yolov4这里修改了,通过1个node将输出变成1个!
        output_dims = (self.batch_size, int(total_grids), self.classes + 5)
        outputs = [
            helper.make_tensor_value_info(output_name, TensorProto.FLOAT,
                                          output_dims)
        ]

        #
        inputs = [self.input_tensor]
        weight_loader = WeightLoader(weights_file_path)
        initializer = list()
        # If a layer has parameters, add them to the initializer and input lists.
        for layer_name in self.param_dict.keys():
            _, layer_type = layer_name.split('_', 1)
            params = self.param_dict[layer_name]
            if layer_type == 'convolutional':
                initializer_layer, inputs_layer = weight_loader.load_conv_weights(
                    params)
                initializer.extend(initializer_layer)
                inputs.extend(inputs_layer)
            elif layer_type == 'upsample':
                initializer_layer, inputs_layer = weight_loader.load_resize_scales(
                    params)
                initializer.extend(initializer_layer)
                inputs.extend(inputs_layer)
            elif 'reshape' in layer_type:  # reshape在层名中
                initializer_layer, inputs_layer = weight_loader.load_reshape_scales(
                    params)
                initializer.extend(initializer_layer)
                inputs.extend(inputs_layer)
        del weight_loader
        self.graph_def = helper.make_graph(
            nodes=self._nodes,
            name='YOLOv4-608',
            inputs=
            inputs,  # helper.make_tensor_value_info, inputs类似tf的placeholder占位符; 构建node时 无make_tensor_value_info,仅仅是名字,对于需要的参数
            outputs=outputs,
            initializer=initializer  # make_tensor 实际初始化的data, 将权重等参数 初始化 固定;
        )
        if verbose:
            print(helper.printable_graph(self.graph_def))
        model_def = helper.make_model(self.graph_def,
                                      producer_name='NVIDIA TensorRT sample')
        return model_def
Beispiel #16
0
import os
import time
import sys
import onnx
from onnx import helper
import pdb
#pdb.set_trace()
onnxFileName = str(sys.argv[1])
a = onnx.load(onnxFileName)
g = a.graph
print(helper.printable_graph(g))
Beispiel #17
0
def onnx_pretty(g, args=None):
    """Pretty print graph."""
    model_proto = g.make_model("converted from {}".format(args.input),
                               args.inputs, args.outputs)
    return helper.printable_graph(model_proto.graph)
Beispiel #18
0
    def build_onnx_graph(self, layer_configs, weights_file_path, verbose=True):
        """Iterate over all layer configs (parsed from the DarkNet representation
        of YOLOv3-608), create an ONNX graph, populate it with weights from the weights
        file and return the graph definition.

        Keyword arguments:
        layer_configs -- an OrderedDict object with all parsed layers' configurations
        weights_file_path -- location of the weights file
        verbose -- toggles if the graph is printed after creation (default: True)
        """
        #循环遍历相应的层次
        for layer_name in layer_configs.keys():
            #抽取对应层次的参数
            layer_dict = layer_configs[layer_name]
            #make_onnx_node参考本文件下的实现
            #创建一个对应的节点,返回相应的输出name和channel
            major_node_specs = self._make_onnx_node(layer_name, layer_dict)
            #如果相应的输出name不为空的话
            if major_node_specs.name is not None:
                #将其添加到self.major_node_specs
                self.major_node_specs.append(major_node_specs)
        outputs = list()
        #循环遍历output_tensors中的每一个
        for tensor_name in self.output_tensors.keys():
            #抽取对应的输出维度
            output_dims = [self.batch_size, ] + \
                self.output_tensors[tensor_name]
            #创建对应的trensor信息
            output_tensor = helper.make_tensor_value_info(
                tensor_name, TensorProto.FLOAT, output_dims)
            #将其添加到outpus中
            outputs.append(output_tensor)
        inputs = [self.input_tensor]
        #加载相应的权重,weightsloader参考本文件下的实现
        weight_loader = WeightLoader(weights_file_path)
        initializer = list()
        # If a layer has parameters, add them to the initializer and input lists.
        #循环遍历param_dict中的每一层
        for layer_name in self.param_dict.keys():
            #提取相应的类型
            _, layer_type = layer_name.split('_', 1)
            #获取对应的参数存储位置
            params = self.param_dict[layer_name]
            #根据不同的层次进行权重的加载
            if layer_type == 'convolutional':
                #参考本文件下的实现
                initializer_layer, inputs_layer = weight_loader.load_conv_weights(
                    params)
                #将对应的层次等扩展到相应的list中
                initializer.extend(initializer_layer)
                inputs.extend(inputs_layer)
            elif layer_type == "upsample":
                initializer_layer, inputs_layer = weight_loader.load_resize_scales(
                    params)
                initializer.extend(initializer_layer)
                inputs.extend(inputs_layer)
        del weight_loader
        #进行相应的graph的创建
        self.graph_def = helper.make_graph(nodes=self._nodes,
                                           name='YOLOv3-608',
                                           inputs=inputs,
                                           outputs=outputs,
                                           initializer=initializer)
        if verbose:
            print(helper.printable_graph(self.graph_def))
            #最后创建相应的模型并返回
        model_def = helper.make_model(self.graph_def,
                                      producer_name='NVIDIA TensorRT sample')
        return model_def
Beispiel #19
0
 def getPrintableModel(self, model):
     return helper.printable_graph(model.graph)
Beispiel #20
0
import sys
import onnx
from onnx import helper

if len(sys.argv) != 3:
    print("no enough args")
    quit()

model = onnx.load(sys.argv[1])

with open(sys.argv[2], "w") as f:
    f.write(helper.printable_graph(model.graph))

Beispiel #21
0
    def build_onnx_graph(self,
                         layer_configs,
                         weights_file_path,
                         neck,
                         sigmoid=False,
                         verbose=True):
        """Iterate over all layer configs (parsed from the DarkNet representation
        of YOLOv3-608), create an ONNX graph, populate it with weights from the weights
        file and return the graph definition.

        Keyword arguments:
        layer_configs -- an OrderedDict object with all parsed layers' configurations
        weights_file_path -- location of the weights file
        verbose -- toggles if the graph is printed after creation (default: True)
        """
        for layer_name in layer_configs.keys():
            layer_dict = layer_configs[layer_name]
            major_node_specs = self._make_onnx_node(layer_name, layer_dict)
            if major_node_specs.name is not None:
                self.major_node_specs.append(major_node_specs)

        transposes = list()
        total_grids = 0
        for tensor_name in self.output_tensors.keys():
            grids = 1
            for i in self.output_tensors[tensor_name]:
                grids *= i
            total_grids += grids / (self.classes + 5)
            output_dims = [self.batch_size, ] + \
                self.output_tensors[tensor_name]
            layer_name, layer_dict = tensor_name, {'output_dims': output_dims}
            transpose_name = self._make_transpose_node(
                layer_name, layer_dict, len(self.output_tensors))
            transposes.append(transpose_name)

        if neck == 'FPN':
            transposes = transposes[::-1]

        output_name = 'sigmoid' if sigmoid else 'ouputs'
        route_node = helper.make_node(
            'Concat',
            axis=1,
            inputs=transposes,
            outputs=[output_name],
            name=output_name,
        )
        self._nodes.append(route_node)

        if sigmoid:
            sigmoid_name, output_name = output_name, 'ouputs'
            sigmoid_node = helper.make_node(
                'Sigmoid',
                inputs=[sigmoid_name],
                outputs=[output_name],
                name=output_name,
            )
            self._nodes.append(sigmoid_node)

        output_dims = (self.batch_size, int(total_grids), self.classes + 5)
        outputs = [
            helper.make_tensor_value_info(output_name, TensorProto.FLOAT,
                                          output_dims)
        ]

        inputs = [self.input_tensor]
        weight_loader = WeightLoader(weights_file_path)
        initializer = list()
        # If a layer has parameters, add them to the initializer and input lists.
        for layer_name in self.param_dict.keys():
            _, layer_type = layer_name.split('_', 1)
            params = self.param_dict[layer_name]
            if layer_type == 'convolutional':
                initializer_layer, inputs_layer = weight_loader.load_conv_weights(
                    params)
                initializer.extend(initializer_layer)
                inputs.extend(inputs_layer)
            elif layer_type == 'upsample':
                initializer_layer, inputs_layer = weight_loader.load_resize_scales(
                    params)
                initializer.extend(initializer_layer)
                inputs.extend(inputs_layer)
            elif 'reshape' in layer_type:
                initializer_layer, inputs_layer = weight_loader.load_reshape_scales(
                    params)
                initializer.extend(initializer_layer)
                inputs.extend(inputs_layer)
            # 不知道哪里有这个层???
            elif 'slice' in layer_type:
                initializer_layer, inputs_layer = weight_loader.load_slice_params(
                    params)
                initializer.extend(initializer_layer)
                inputs.extend(inputs_layer)
        del weight_loader
        self.graph_def = helper.make_graph(nodes=self._nodes,
                                           name='YOLOv4-608',
                                           inputs=inputs,
                                           outputs=outputs,
                                           initializer=initializer)
        if verbose:
            print(helper.printable_graph(self.graph_def))
        model_def = helper.make_model(self.graph_def,
                                      producer_name='NVIDIA TensorRT sample')
        return model_def