Exemplo n.º 1
0
    def test_replace_symbolic_dim(self):
        """
        Update a model with a single symbolic input dimension. After replacement run shape inferencing to verify that
        all shapes in the model have fixed sizes.
        """
        model_path = (ort_root / "onnxruntime" / "test" / "testdata" / "CNTK" /
                      "test_LSTM.tanh.bidirectional" / "model.onnx")

        model = onnx.load_model(str(model_path))

        # validate the expected input after inferring shape info
        m2 = shape_inference.infer_shapes(model, True)
        dynamic_inputs, num_dynamic_values = check_shapes(m2.graph)
        self.assertEqual(len(dynamic_inputs), 1)
        self.assertEqual(dynamic_inputs[0].name, "Input3")
        self.assertGreater(num_dynamic_values, 0)

        # update original model
        make_dim_param_fixed(model.graph, "None", 4)

        # and validate the model no longer has dynamic values
        model = shape_inference.infer_shapes(model, True)
        dynamic_inputs, num_dynamic_values = check_shapes(model.graph)
        self.assertFalse(dynamic_inputs)
        self.assertEqual(num_dynamic_values, 0)
Exemplo n.º 2
0
    def test_replace_input_shape(self):
        """
        Replace the entire shape for an input. This can be used when the model has inputs with unknown dimensions
        i.e. the dimension has no value and no symbolic name so it's harder to replace.
        """
        model_path = ort_root / "onnxruntime" / "test" / "testdata" / "gh_issue_9671.onnx"

        model = onnx.load_model(str(model_path))

        # validate the expected input after inferring shape info
        m2 = shape_inference.infer_shapes(model, True)
        dynamic_inputs, num_dynamic_values = check_shapes(m2.graph)
        self.assertEqual(len(dynamic_inputs), 3)
        self.assertEqual(dynamic_inputs[0].name, "X1")
        self.assertEqual(dynamic_inputs[1].name, "X2")
        self.assertEqual(dynamic_inputs[2].name, "X3")
        self.assertGreater(num_dynamic_values, 0)

        # update original model
        make_input_shape_fixed(model.graph, "X1", [2, 2, 4])
        make_input_shape_fixed(model.graph, "X2", [2, 4])
        make_input_shape_fixed(model.graph, "X3", [2, 2, 4])

        # and validate the model no longer has dynamic values
        model = shape_inference.infer_shapes(model, True)
        dynamic_inputs, num_dynamic_values = check_shapes(model.graph)
        self.assertFalse(dynamic_inputs)
Exemplo n.º 3
0
    def analyze_constant_of_shape(self):
        value_info = {vi.name: vi for vi in self.model.graph.value_info}
        traversal = list()
        tensor_to_be_value_analyzed = list()

        for node in self.model.graph.node:
            if node.op_type == 'ConstantOfShape':
                vi = value_info[node.output[0]]
                dtype = vi.type.tensor_type.elem_type
                rank = len([dim for dim in vi.type.tensor_type.shape.dim])
                start_vertex = node.output[0]
                traversal.extend(self.depth_first_search(start_vertex, end_op_type='Shape'))
                tensor_to_be_value_analyzed.append(start_vertex)
                vi = make_tensor_value_info(node.output[0], dtype, ('',) * rank)
                self.model.graph.output.append(vi)

        new_nodes = list(filter(lambda node: node not in traversal, self.model.graph.node))

        if tensor_to_be_value_analyzed:
            self.assign_value_analyzed_shapes_to_initializer(
                value_dict=self.run_onnx_model(self.model.SerializeToString(), tensor_to_be_value_analyzed))

            # rebuild model graph without nodes in shaping subgraph
            self.model = utils.rebuild_model(self.model, new_nodes)
            check_model(self.model)
        self.model = shape_inference.infer_shapes(self.model)
Exemplo n.º 4
0
def mdf_to_onnx(mdf_model):
    """
    Takes an MDF model object and returns a list of ONNX models for each graph in the model.
    """

    # An MDF model can have multiple graphs. Each graph will be an onnx model
    onnx_models = []
    for graph in mdf_model.graphs:
        print("Processing Graph ", graph.id)

        # Use edges and nodes to construct execution order
        nodenames_in_execution_order = []
        evaluable_graph = EvaluableGraph(graph, verbose=False)
        for idx, edge in enumerate(evaluable_graph.ordered_edges):
            if idx == 0:
                nodenames_in_execution_order.append(edge.sender)
            nodenames_in_execution_order.append(edge.receiver)

        # print(nodenames_in_execution_order, graph.nodes, graph.edges)

        # Generate onnx graph
        onnx_graph = generate_onnx_graph(graph, nodenames_in_execution_order)

        # Make an onnx model from graph
        onnx_model = helper.make_model(onnx_graph)

        # Infer shapes
        onnx_model = shape_inference.infer_shapes(onnx_model)

        # Check model
        onnx.checker.check_model(onnx_model)

        onnx_models.append(onnx_model)

    return onnx_models
Exemplo n.º 5
0
 def infer_node_shape(self, node, inputs):
     if (node.op_type == 'Gemm'):
         bias = self.get_initializer(node.input[2])
         shape = list(inputs[0].shape)[:-2] + list(bias.shape)
     elif (len(inputs) == 1):
         x = onnx.helper.make_tensor_value_info(node.input[0],
                                                onnx.TensorProto.FLOAT,
                                                inputs[0].shape)
         outputs = [
             onnx.helper.make_tensor_value_info(o, onnx.TensorProto.FLOAT,
                                                None) for o in node.output
         ]
         graph = onnx.helper.make_graph(
             nodes=[node],
             name='infer-node-shape',
             inputs=[x],
             outputs=outputs,
             value_info=[],
             initializer=self.onnx_model.graph.initializer)
         model = onnx.helper.make_model(graph, producer_name='lwnn-nhwc')
         model2 = infer_shapes(model)
         shape = [
             int(dim.dim_value)
             for dim in model2.graph.output[0].type.tensor_type.shape.dim
         ]
     else:
         raise NotImplementedError('infer %s(%s) shape is not supported' %
                                   (node.name, node.op_type))
     if (len(shape) == 0):
         raise Exception("can't infer %s(%s) shape" %
                         (node.name, node.op_type))
     return shape
Exemplo n.º 6
0
    def add_output(self, output_ids):
        if not isinstance(output_ids, (tuple, list, set)):
            output_ids = [
                output_ids,
            ]

        inferred_model = shape_inference.infer_shapes(self.model)
        all_blobs_info = {
            value_info.name: value_info
            for value_info in inferred_model.graph.value_info
        }

        extra_outputs = []
        for output_id in output_ids:
            value_info = all_blobs_info.get(output_id, None)
            if value_info is None:
                print('WARNING! No blob with name {}'.format(output_id))
                extra_outputs.append(
                    helper.make_empty_tensor_value_info(output_id))
            else:
                extra_outputs.append(value_info)

        self.model.graph.output.extend(extra_outputs)
        self.output_names.extend(output_ids)
        self.session = onnxruntime.InferenceSession(
            self.model.SerializeToString(), self.sess_options)
Exemplo n.º 7
0
def extend_model_outputs(model):
    model_ = shape_inference.infer_shapes(model)
    graph = model_.graph

    output_names = [_.name for _ in graph.output]
    for vi in graph.value_info:
        if vi.name not in output_names:
            graph.output.extend([vi])

    output_names = [_.name for _ in graph.output]
    for i in graph.input:
        if i.name not in output_names:
            graph.output.extend([i])

    output_names = [_.name for _ in graph.output]
    #for node in graph.node:
    #    for i in node.input:
    #        if i not in output_names:
    #            graph.output.extend([helper.make_tensor_value_info(i, 1, None)])
    #
    #output_names = [_.name for _ in graph.output]
    for node in graph.node:
        for o in node.output:
            if o not in output_names:
                graph.output.extend(
                    [helper.make_tensor_value_info(o, 1, None)])

    return model_
Exemplo n.º 8
0
def infer_shape(model_file):
    onnx_model = onnx.load(model_file)
    onnx.checker.check_model(model_file)

    processed_model = shape_inference.infer_shapes(onnx_model)
    infer_result = runtime_infer(processed_model)
    return processed_model, infer_result
Exemplo n.º 9
0
 def create_graph_from_onnx_model(onnx_model_proto):
     """Create Graph loading onnx model proto."""
     # apply shape inference on the model
     inferred_model = shape_inference.infer_shapes(onnx_model_proto)
     graph_proto = inferred_model.graph
     main_graph = GraphUtil.create_graph_from_onnx_graph(graph_proto)
     return main_graph
Exemplo n.º 10
0
def polish_model(model, internals=True, extras=True, checking=True):
    """
    polish_model enhanced for inference
    """

    if checking:
        check_model(model)
    strip_doc_string(model)
    if internals:
        passes = optimizer.get_available_passes()
        passes = list(
            filter(lambda name: not name.startswith('split_'), passes))  #
        logger.debug('builtin optimizations to perform in ONNX:\n\t%s', passes)
        model = optimizer.optimize(model, passes=passes)
    if extras:
        for optimize in (
                optimize_model_skip_op_for_inference,
                optimize_model_strip_initializer,
                optimize_model_cast,
                optimize_model_slice,
        ):
            model = optimize(model)
    model = infer_shapes(model)
    if checking:
        check_model(model)
    return model
Exemplo n.º 11
0
def from_onnx(filepath,
              infer_shapes=True,
              use_filename=True,
              lower=False,
              verbose=False):
    onnx_proto, graph_name = load_onnx_proto(filepath)
    onnx_proto = update_node_names(onnx_proto)
    onnx_proto = update_edge_names(onnx_proto)
    attr = get_model_attributes(onnx_proto)
    if infer_shapes:
        onnx_graph = shape_inference.infer_shapes(onnx_proto).graph
    else:
        onnx_graph = onnx_proto.graph
    for n in onnx_graph.node:
        if n.op_type not in NODE_NAMES and n.name not in NODE_NAMES:
            raise RuntimeError(
                f"Support for {n.op_type} or {n.name} is not currently included in PolyMath"
            )

    graph = generate_srdfg(onnx_graph, verbose=verbose)
    if use_filename:
        graph_name = filepath.split("/")[-1].split(".")[0]
        graph.set_name(graph_name)

    if lower:
        lower_pass = pm.Lower(ONNX_OP_NAMES)
        graph = lower_pass(graph)

    return graph
Exemplo n.º 12
0
def generate_model(rnn_type, input_dim, hidden_dim, bidirectional, layers, model_name, batch_one=True, has_seq_len=False, onnx_opset_ver=7):
    model = onnx.ModelProto()
    model.ir_version = IR_VERSION
    
    opset = model.opset_import.add()
    opset.domain == 'onnx'
    opset.version = onnx_opset_ver
    num_directions = 2 if bidirectional else 1

    X = 'input'
    model.graph.input.add().CopyFrom(helper.make_tensor_value_info(X, onnx.TensorProto.FLOAT, ['s', 1 if batch_one else 'b', input_dim]))
    model.graph.initializer.add().CopyFrom(numpy_helper.from_array(np.asarray([0, 0, -1], dtype=np.int64), 'shape'))

    if has_seq_len:
        seq_len = 'seq_len'
        model.graph.input.add().CopyFrom(helper.make_tensor_value_info(seq_len, onnx.TensorProto.INT32, [1 if batch_one else 'b',]))

    gates = {'lstm':4, 'gru':3, 'rnn':1}[rnn_type]
    for i in range(layers):
        layer_input_dim = (input_dim if i == 0 else hidden_dim * num_directions)
        model.graph.initializer.add().CopyFrom(numpy_helper.from_array(np.random.rand(num_directions, gates*hidden_dim, layer_input_dim).astype(np.float32), 'W'+str(i)))
        model.graph.initializer.add().CopyFrom(numpy_helper.from_array(np.random.rand(num_directions, gates*hidden_dim, hidden_dim).astype(np.float32), 'R'+str(i)))
        model.graph.initializer.add().CopyFrom(numpy_helper.from_array(np.random.rand(num_directions, 2*gates*hidden_dim).astype(np.float32), 'B'+str(i)))
        layer_inputs = [X, 'W'+str(i), 'R'+str(i), 'B'+str(i)]
        if has_seq_len:
            layer_inputs += [seq_len]
        layer_outputs = ['layer_output_'+str(i)]
        model.graph.node.add().CopyFrom(helper.make_node(rnn_type.upper(), layer_inputs, layer_outputs, rnn_type+str(i), hidden_size=hidden_dim, direction='bidirectional' if bidirectional else 'forward'))
        model.graph.node.add().CopyFrom(helper.make_node('Transpose', layer_outputs, ['transposed_output_'+str(i)], 'transpose'+str(i), perm=[0,2,1,3]))
        model.graph.node.add().CopyFrom(helper.make_node('Reshape', ['transposed_output_'+str(i), 'shape'], ['reshaped_output_'+str(i)], 'reshape'+str(i)))
        X = 'reshaped_output_'+str(i)
    model.graph.output.add().CopyFrom(helper.make_tensor_value_info(X, onnx.TensorProto.FLOAT, ['s', 'b', hidden_dim * num_directions]))
    model = shape_inference.infer_shapes(model)
    onnx.save(model, model_name)
Exemplo n.º 13
0
    def __init__(self, option, src_model_file):
        self._op_converters = {
            OnnxOpType.Abs.name: self.convert_eltwise,
            OnnxOpType.Add.name: self.convert_eltwise,
            OnnxOpType.ArgMax.name: self.convert_argmax,
            OnnxOpType.ArgMin.name: self.convert_argmax,
            OnnxOpType.AveragePool.name: self.convert_pooling,
            OnnxOpType.BatchNormalization.name: self.convert_fused_batchnorm,
            OnnxOpType.Cast.name: self.convert_cast,
            OnnxOpType.Concat.name: self.convert_concat,
            OnnxOpType.Conv.name: self.convert_conv2d,
            OnnxOpType.ConvTranspose.name: self.convert_deconv,
            OnnxOpType.DepthToSpace.name: self.convert_depth_space,
            OnnxOpType.Dropout.name: self.convert_identity,
            OnnxOpType.Div.name: self.convert_eltwise,
            OnnxOpType.Equal.name: self.convert_eltwise,
            OnnxOpType.Gather.name: self.convert_gather,
            OnnxOpType.Gemm.name: self.convert_gemm,
            OnnxOpType.GlobalAveragePool.name: self.convert_reduce,
            OnnxOpType.GlobalMaxPool.name: self.convert_reduce,
            OnnxOpType.Identity.name: self.convert_identity,
            OnnxOpType.ImageScaler.name: self.convert_imagescaler,
            OnnxOpType.LeakyRelu.name: self.convert_activation,
            OnnxOpType.Max.name: self.convert_eltwise,
            OnnxOpType.MaxPool.name: self.convert_pooling,
            OnnxOpType.MatMul.name: self.convert_matmul,
            OnnxOpType.Min.name: self.convert_eltwise,
            OnnxOpType.Mul.name: self.convert_eltwise,
            OnnxOpType.Neg.name: self.convert_eltwise,
            OnnxOpType.Pad.name: self.convert_pad,
            OnnxOpType.Pow.name: self.convert_eltwise,
            OnnxOpType.PRelu.name: self.convert_activation,
            OnnxOpType.Relu.name: self.convert_activation,
            OnnxOpType.Reshape.name: self.convert_reshape,
            OnnxOpType.Reciprocal.name: self.convert_eltwise,
            OnnxOpType.Sigmoid.name: self.convert_activation,
            OnnxOpType.Softmax.name: self.convert_softmax,
            OnnxOpType.SpaceToDepth.name: self.convert_depth_space,
            OnnxOpType.Split.name: self.convert_split,
            OnnxOpType.Sqrt.name: self.convert_eltwise,
            OnnxOpType.Squeeze.name: self.convert_squeeze,
            OnnxOpType.Sub.name: self.convert_eltwise,
            OnnxOpType.Sum.name: self.convert_eltwise,
            OnnxOpType.Tanh.name: self.convert_activation,
            OnnxOpType.Transpose.name: self.convert_transpose,
        }
        self._option = option
        self._mace_net_def = mace_pb2.NetDef()
        ConverterUtil.set_filter_format(self._mace_net_def, FilterFormat.OIHW)
        onnx_model = onnx.load(src_model_file)

        polished_model = onnx.utils.polish_model(onnx_model)

        print "onnx model IR version: ", onnx_model.ir_version
        print "onnx model opset import: ", onnx_model.opset_import

        self._onnx_model = shape_inference.infer_shapes(polished_model)
        self._graph_shapes_dict = {}
        self._consts = {}
        self._replace_tensors = {}
Exemplo n.º 14
0
def inferred_model_value_info(model):
    """
    collect value/type info for an ONNX model
    """

    model = infer_shapes(model)
    graph = model.graph
    value_info = Dict()
    for item in graph.value_info:
        value_info[item.name] = dict(
            dtype=tensor_dtype(item),
            shape=tensor_shape(item),
            external=False,
        )
    for item in graph.input:
        assert item.name not in value_info
        value_info[item.name] = dict(
            dtype=tensor_dtype(item),
            shape=tensor_shape(item),
            external=True,
        )
    for item in graph.output:
        #        assert item.name not in value_info, 'bypass-model not supported'
        value_info[item.name] = dict(
            dtype=tensor_dtype(item),
            shape=tensor_shape(item),
            external=True,
        )
    return value_info
Exemplo n.º 15
0
    def infer(self):
        """
        Sanitize the graph by cleaning any unconnected nodes, do a topological resort, and fold constant inputs values.
        When possible, run shape inference on the ONNX graph to determine tensor shapes.
        """
        for i in range(3):
            count_before = len(self.graph.nodes)

            self.graph.cleanup().toposort()
            try:
                for node in self.graph.nodes:
                    for o in node.outputs:
                        o.shape = None
                model = gs.export_onnx(self.graph)
                model = shape_inference.infer_shapes(model)
                self.graph = gs.import_onnx(model)
            except Exception as e:
                log.info(
                    "Shape inference could not be performed at this time:\n{}".
                    format(e))
            try:
                self.graph.fold_constants(fold_shapes=True)
            except TypeError as e:
                log.error(
                    "This version of ONNX GraphSurgeon does not support folding shapes, please upgrade your "
                    "onnx_graphsurgeon module. Error:\n{}".format(e))
                raise

            count_after = len(self.graph.nodes)
            if count_before == count_after:
                # No new folding occurred in this iteration, so we can stop for now.
                break
Exemplo n.º 16
0
def run_check(model_path: pathlib.Path, mobile_pkg_build_config: pathlib.Path,
              logger: logging.Logger):
    '''
    Check if an ONNX model will be able to be used with the ORT Mobile pre-built package.
    :param model_path: Path to ONNX model.
    :param mobile_pkg_build_config: Configuration file used to build the ORT Mobile package.
    :param logger: Logger for output
    :return: True if supported
    '''
    logger.info(
        f'Checking if pre-built ORT Mobile package can be used with {model_path} once model is '
        'converted from ONNX to ORT format using onnxruntime.tools.convert_onnx_models_to_ort...'
    )

    model_file = model_path.resolve(strict=True)
    model = onnx.load(str(model_file))

    # we need to run shape inferencing to populate that type info for node outputs.
    # we will get warnings if the model uses ORT contrib ops (ONNX does not have shape inferencing for those),
    # and shape inferencing will be lost downstream of those.
    # TODO: add support for checking ORT format model as it will have full type/shape info for all nodes
    model_with_type_info = shape_inference.infer_shapes(model)

    return run_check_with_model(model_with_type_info, mobile_pkg_build_config,
                                logger)
Exemplo n.º 17
0
def runPytorch(img):
    torchModel = MobileFaceNet_DEX_c3() #MobileFaceNet_DEX_c3() # MobileFaceNetVerifyAgeGender() #IR_SE_FaceNet() #
    model_dict = torchModel.state_dict()
    torchWeights = torch.load(modelFileName, map_location=lambda storage, loc: storage)
    updated_dict, match_layers, mismatch_layers = weight_filler(torchWeights, model_dict)
    print("The mismatch layers %s", mismatch_layers)
    model_dict.update(updated_dict)
    torchModel.load_state_dict(model_dict)
    torchModel.eval().cpu()
    #img = img[0]
    #imgs = np.array([img, img,img, img], dtype=np.float32)
    y = torchModel.forward(torch.from_numpy(img).cpu())
    print(y.abs().sum())
    onxFileName  = "nameAgeGenderFaceBlur.onnx"
    dummy_input = torch.randn(4, 3, modelWidthHeight, modelWidthHeight)
    torch.onnx.export(torchModel.cpu(), dummy_input.cpu(), onxFileName, verbose=True)
    torch.onnx.in
    om = onnx.load(onxFileName)
    om = infer_shapes(om)
    om = optimize(om)
    onnx.save(om, onxFileName)
    a = om.graph
    #import pdb
    #pdb.set_trace()
    helper.printable_graph(a)
    v = a.value_info
Exemplo n.º 18
0
    def __init__(self, model, per_channel, mode, static, weight_qType, input_qType, quantization_params,
                 nodes_to_quantize, nodes_to_exclude, op_types_to_quantize):
        onnx_model = shape_inference.infer_shapes(model)
        self.model = ONNXModel(onnx_model)
        self.value_infos = {vi.name: vi for vi in onnx_model.graph.value_info}
        self.per_channel = per_channel  # weight-pack per channel
        self.mode = mode  # QuantizationMode.Value
        self.static = static  # use static quantization for inputs.
        self.fuse_dynamic_quant = False
        self.input_qType = input_qType  # quantize input type
        self.weight_qType = weight_qType  # quantize data type
        self.quantization_params = quantization_params
        self.nodes_to_quantize = nodes_to_quantize  # specific nodes to quantize
        self.nodes_to_exclude = nodes_to_exclude  # specific nodes to exclude
        self.op_types_to_quantize = op_types_to_quantize
        self.new_nodes = []

        self.check_opset_version()

        if not self.mode in quantization_modes:
            raise ValueError('unsupported quantization mode {}'.format(self.mode))

        # QuantizeRange tensor name and zero tensor name for scale and zero point calculation.
        # Used when static is False
        self.fixed_qrange_uint8_name = "fixed_quantization_range_uint8"
        self.fixed_qrange_int8_name = "fixed_quantization_range_int8"
        # For uint8 data-type, to compute zero point, we subtract rmin from 0 (represented by fixed_zero_name tensor)
        self.fixed_zero_name = "fixed_zero"
        # For int8 data-type, zero point is always zero (respresented by fixed_zero_point_name tensor)
        self.fixed_zero_zp_name = "fixed_zero_zp"

        # List of quantized weights
        self._quantized_weights = []
        # Map of all original value names to quantized value names
        self.quantized_value_map = {}
Exemplo n.º 19
0
    def __init__(self, model_path, require_intermediates=False):
        import onnxruntime

        options = onnxruntime.SessionOptions()
        options.inter_op_num_threads = 1
        options.intra_op_num_threads = 1
        options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL

        self.session = onnxruntime.InferenceSession(model_path,
                                                    sess_options=options)
        self.inputs = [
            TensorInfo(tensor.name, tensor.shape,
                       _onnx_dtype_to_numpy[tensor.type])
            for tensor in self.session.get_inputs()
        ]
        self.outputs = [
            TensorInfo(tensor.name, tensor.shape,
                       _onnx_dtype_to_numpy[tensor.type])
            for tensor in self.session.get_outputs()
        ]

        if require_intermediates:
            import onnx
            from onnx.shape_inference import infer_shapes

            model = onnx.load_model(model_path)
            model = infer_shapes(model)

            for info in model.graph.value_info:
                output_info = model.graph.output.add()
                output_info.ParseFromString(info.SerializeToString())

            self.session = onnxruntime.InferenceSession(
                model.SerializeToString(), sess_options=options)
Exemplo n.º 20
0
def inferred_model_value_info(model):
    """
    collect value/type info for an ONNX model
    """

    assert isinstance(model,
                      onnx.ModelProto), 'model is not a ModelProto instance'

    model = infer_shapes(model)
    graph = model.graph
    value_info = Dict()
    for item in graph.value_info:
        value_info[item.name] = {
            'dtype': tensor_dtype(item),
            'shape': tensor_shape(item),
            'external': False,
        }
    for item in graph.input:
        assert item.name not in value_info
        value_info[item.name] = {
            'dtype': tensor_dtype(item),
            'shape': tensor_shape(item),
            'external': True,
        }
    for item in graph.output:
        #        assert item.name not in value_info, 'bypass-model not supported'
        value_info[item.name] = {
            'dtype': tensor_dtype(item),
            'shape': tensor_shape(item),
            'external': True,
        }
    return value_info
Exemplo n.º 21
0
 def apply(self, model):
     # hide your riches!
     hidden_ops = _hide_finn_ops(model)
     # call regular ONNX shape inference
     model = ModelWrapper(si.infer_shapes(model.model))
     # bring back hidden ops
     _restore_finn_ops(model, hidden_ops)
     return (model, False)
Exemplo n.º 22
0
    def shape_inference(self):
        """
        Infers the shape of the outputs
        with :epkg:`onnx` package.

        @return     A new :epkg:`ONNX` graph which defined outputs.
        """
        return shape_inference.infer_shapes(self.obj)
Exemplo n.º 23
0
def getGraph(onnx_path):
    model = onnx.load(onnx_path)
    model = shape_inference.infer_shapes(model)
    model_graph = model.graph
    graph = Graph.from_onnx(model_graph)
    graph = graph.transformed(transformers)
    graph.channel_dims = {}
    return graph
Exemplo n.º 24
0
 def load_graph(self, model_path):
     import onnx
     from onnx import shape_inference
     model = shape_inference.infer_shapes(onnx.load(model_path))
     # model = onnx.load(model_path)
     self.fill_type_shape(model.graph)
     # print ('model version:', model.model_version)
     return model.graph, self.count_ops(model.graph)
Exemplo n.º 25
0
 def create_graph_from_onnx_model(onnx_model_proto):
     """Create Graph loading onnx model proto."""
     # apply shape inference on the model
     inferred_model = shape_inference.infer_shapes(onnx_model_proto)
     graph_proto = inferred_model.graph
     opset_version = onnx_model_proto.opset_import[0].version
     main_graph = GraphUtil.create_graph_from_onnx_graph(
         graph_proto, opset_version)
     return main_graph
Exemplo n.º 26
0
def main():
    args = get_args()

    with open(args.input, "rb") as f:
        data = f.read()
        model = ModelProto()
        model.ParseFromString(data)

    if args.stats:
        ops = collections.Counter()
        for node in model.graph.node:
            ops[node.op_type] += 1
        print(ops, "\n\n")

    if args.meta:
        fields = [
            "ir_version", "producer_name", "producer_version", "name",
            "opset_import"
        ]
        for name in fields:
            value = getattr(model, name, None)
            if value:
                print("{} = {}".format(name, value))
        for i in model.metadata_props:
            print("meta.{} = {}", i.key, i.value)

    print(helper.printable_graph(model.graph))

    if args.check:
        onnx.checker.check_model(model)
        inferred_model = shape_inference.infer_shapes(model)
        onnx.checker.check_model(inferred_model)

    if args.pbtxt:
        with open(args.pbtxt, "w") as f:
            f.write(str(model.graph))

    if args.dot:
        with open(args.dot, "w") as f:
            f.write("digraph graphname {\n")
            for node in model.graph.node:
                output_name = node.name
                name = node.name
                color = ""
                if node.op_type.startswith("_"):
                    color = ' color="yellow"'
                if node.op_type == "CELL":
                    color = ' color="red"'
                f.write('"{}" [label="{},{}"{}];\n'.format(
                    output_name, node.op_type, name, color))
                for input_name in node.input:
                    parts = input_name.split(":")
                    input_name = re.sub(r"^\^", "", parts[0])
                    f.write('  "{}" -> "{}";\n'.format(input_name,
                                                       output_name))
            f.write("}\n")
Exemplo n.º 27
0
def getGraph(onnx_path):
    model = onnx.load(onnx_path)
    output_names = [node.name for node in model.graph.output]
    model = shape_inference.infer_shapes(model)
    model_graph = model.graph
    graph = Graph.from_onnx(model_graph)
    graph = graph.transformed(transformers)
    graph.channel_dims = {}

    return graph, output_names
Exemplo n.º 28
0
def getGraph(onnx_path):
    model = onnx.load(onnx_path)
    opset_version = model.opset_import[
        0].version  # 获取 opset version ,不同的 opset version 下 onnx的 op解析方式不同
    model = shape_inference.infer_shapes(model)
    model_graph = model.graph
    graph = Graph.from_onnx(model_graph)
    graph = graph.transformed(transformers)
    graph.channel_dims = {}

    return graph, opset_version
Exemplo n.º 29
0
def getGraph(onnx_path, with_opt=False):
    model = onnx.load(onnx_path)
    if with_opt:
        opt_passes = ['eliminate_nop_pad', 'eliminate_identity']
        model = optimizer.optimize(model, opt_passes)
    model = shape_inference.infer_shapes(model)
    model_graph = model.graph
    graph = Graph.from_onnx(model_graph)
    graph = graph.transformed(transformers)
    graph.channel_dims = {}
    return graph
Exemplo n.º 30
0
def execute_onnx(model, input_dict):
    """Execute given ONNX model with given named inputs to return named outputs."""

    # call ONNX shape inference to make sure we have value_info fields for all
    # the intermediate tensors in the graph
    model = si.infer_shapes(model)
    graph = model.graph
    # first, we need to make sure that every variable required by the graph has
    # some buffer associated with it. this includes graph inputs (which includes
    # the input data as well as the trained parameters) and the graph ValueInfo
    # (intermediate tensors between layers)
    # we'll keep all our buffers in this dict here:
    execution_context = dict()
    # make empty tensors for all the graph inputs and outputs
    for vi in graph.input:
        new_tensor = valueinfo_to_tensor(vi)
        execution_context[vi.name] = new_tensor
    for vi in graph.output:
        new_tensor = valueinfo_to_tensor(vi)
        execution_context[vi.name] = new_tensor
    # make empty tensors for all intermediate buffers
    for vi in graph.value_info:
        new_tensor = valueinfo_to_tensor(vi)
        execution_context[vi.name] = new_tensor
    # fill in the constants provided by the initializers (TensorProto to npy)
    for t in graph.initializer:
        execution_context[t.name] = np_helper.to_array(t)
    # fill in any inputs provided to this function
    for inp_name in input_dict.keys():
        if inp_name in execution_context:
            if execution_context[inp_name].shape == input_dict[inp_name].shape:
                execution_context[inp_name] = input_dict[inp_name]
            else:
                raise Exception(
                    "Shape mismatch for provided input %s: found %s expected %s "
                    % (
                        inp_name,
                        str(execution_context[inp_name].shape),
                        str(input_dict[inp_name].shape),
                    ))
        else:
            raise Exception("Provided input not found in graph context: %s" %
                            inp_name)
    # now call each node in the graph nodes list
    # we can simply walk down the list since the ONNX spec guarantees that it is
    # topologically sorted
    for node in graph.node:
        execute_node(node, execution_context, graph)
    # provide outputs as dict
    output_dict = dict()
    for out_tensor in graph.output:
        out_name = out_tensor.name
        output_dict[out_name] = execution_context[out_name]
    return output_dict