예제 #1
0
 def test_ort_gradient_optimizers_use_numpy_nan_w(self):
     from onnxcustom.utils.orttraining_helper import add_loss_output
     from onnxcustom.training.optimizers import OrtGradientOptimizer
     X, y = make_regression(  # pylint: disable=W0632
         100,
         n_features=10,
         bias=2,
         random_state=0)
     X = X.astype(numpy.float32)
     y = y.astype(numpy.float32)
     w = (numpy.random.rand(y.shape[0]) + 1).astype(X.dtype)
     X_train, _, y_train, __, w_train, ___ = train_test_split(X, y, w)
     reg = LinearRegression()
     reg.fit(X_train, y_train, w_train)
     reg.coef_ = reg.coef_.reshape((1, -1))
     onx = to_onnx(reg,
                   X_train,
                   target_opset=opset,
                   black_op={'LinearRegressor'})
     set_model_props(onx, {'info': 'unit test'})
     onx_loss = add_loss_output(onx, weight_name='weight')
     inits = ['intercept', 'coef']
     train_session = OrtGradientOptimizer(onx_loss,
                                          inits,
                                          learning_rate=1e3)
     self.assertRaise(
         lambda: train_session.fit(
             X_train, y_train, w_train, use_numpy=True), ConvergenceError)
예제 #2
0
def overwrite_opset(model, new_opset):
    """
    Overwrites the main opset in an ONNX file.
    Does not change any node definition.

    :param model: ONNX model
    :param new_opset: new opset
    :return: ONNX model
    """
    graph = helper.make_graph(
        model.graph.node, model.graph.name, model.graph.input,
        model.graph.output, model.graph.initializer)
    onnx_model = helper.make_model(graph)
    onnx_model.ir_version = model.ir_version
    onnx_model.producer_name = model.producer_name
    onnx_model.producer_version = model.producer_version
    onnx_model.domain = model.domain
    onnx_model.model_version = model.model_version
    onnx_model.doc_string = model.doc_string
    if len(model.metadata_props) > 0:  # pragma: no cover
        values = {p.key: p.value for p in model.metadata_props}
        helper.set_model_props(onnx_model, values)

    del onnx_model.opset_import[:]  # pylint: disable=E1101
    for oimp in model.opset_import:
        op_set = onnx_model.opset_import.add()  # pylint: disable=E1101
        if oimp.domain == '':
            op_set.domain = oimp.domain
            op_set.version = new_opset
        else:
            op_set.domain = oimp.domain
            op_set.version = oimp.version
    return onnx_model
예제 #3
0
 def test_ort_gradient_optimizers_use_numpy_w_l1(self):
     from onnxcustom.utils.orttraining_helper import add_loss_output
     from onnxcustom.training.optimizers import OrtGradientOptimizer
     X, y = make_regression(  # pylint: disable=W0632
         100,
         n_features=10,
         bias=2,
         random_state=0)
     X = X.astype(numpy.float32)
     y = y.astype(numpy.float32)
     w = (numpy.random.rand(y.shape[0]) + 1).astype(X.dtype)
     X_train, _, y_train, __, w_train, ___ = train_test_split(X, y, w)
     reg = LinearRegression()
     reg.fit(X_train, y_train, sample_weight=w_train)
     reg.coef_ = reg.coef_.reshape((1, -1))
     onx = to_onnx(reg,
                   X_train,
                   target_opset=opset,
                   black_op={'LinearRegressor'})
     set_model_props(onx, {'info': 'unit test'})
     onx_loss = add_loss_output(onx, weight_name='weight', score_name='l1')
     inits = ['intercept', 'coef']
     train_session = OrtGradientOptimizer(onx_loss,
                                          inits,
                                          learning_rate=1e-3)
     self.assertRaise(lambda: train_session.get_state(), AttributeError)
     train_session.fit(X_train, y_train, w_train, use_numpy=True)
     state_tensors = train_session.get_state()
     self.assertEqual(len(state_tensors), 2)
     r = repr(train_session)
     self.assertIn("OrtGradientOptimizer(model_onnx=", r)
     self.assertIn("learning_rate='invscaling'", r)
     losses = train_session.train_losses_
     self.assertGreater(len(losses), 1)
     self.assertFalse(any(map(numpy.isnan, losses)))
예제 #4
0
def GenerateModel(model_name):
    nodes = [
        helper.make_node("Sigmoid", ["X"], ["Y"], "sigmoid"),
    ]

    graph = helper.make_graph(
        nodes,
        "NNAPI_Internal_uint8_Test",
        [helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3])],
        [helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3])],
    )

    model = helper.make_model(graph)

    # Add meta data
    model.doc_string = 'This is doc_string'
    model.producer_name = 'TensorTorch'
    model.model_version = 12345
    model.domain = 'ai.onnx.ml'
    helper.set_model_props(
        model, {
            'I am key 1!': 'I am value 1!',
            '': 'Value for empty key!',
            'Key for empty value!': '',
        })
    onnx.save(model, model_name)
예제 #5
0
 def test_ort_gradient_optimizers_use_numpy_nesterov(self):
     from onnxcustom.utils.orttraining_helper import add_loss_output
     from onnxcustom.training.optimizers import OrtGradientOptimizer
     X, y = make_regression(  # pylint: disable=W0632
         100,
         n_features=10,
         bias=2,
         random_state=0)
     X = X.astype(numpy.float32)
     y = y.astype(numpy.float32)
     X_train, _, y_train, __ = train_test_split(X, y)
     reg = LinearRegression()
     reg.fit(X_train, y_train)
     reg.coef_ = reg.coef_.reshape((1, -1))
     onx = to_onnx(reg,
                   X_train,
                   target_opset=opset,
                   black_op={'LinearRegressor'})
     set_model_props(onx, {'info': 'unit test'})
     onx_loss = add_loss_output(onx)
     inits = ['intercept', 'coef']
     self.assertRaise(
         lambda: OrtGradientOptimizer(
             onx_loss, inits, learning_rate="Nesterov"),
         NotImplementedError)
예제 #6
0
def GenerateModel(model_name):
    nodes = [
        helper.make_node("Sigmoid", ["X"], ["Y"], "sigmoid"),
    ]

    graph = helper.make_graph(
        nodes,
        "NNAPI_Internal_uint8_Test",
        [helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 3])],
        [helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 3])],
    )

    model = helper.make_model(graph)

    # Add meta data
    model.doc_string = "This is doc_string"
    model.producer_name = "TensorTorch"
    model.model_version = 12345
    model.domain = "ai.onnx.ml"
    helper.set_model_props(
        model,
        {
            "I am key 1!": "I am value 1!",
            "": "Value for empty key!",
            "Key for empty value!": "",
        },
    )
    onnx.save(model, model_name)
예제 #7
0
    def optimize_model_proto(onnx_model_proto, debug=False):
        """Optimize the model proto, for example: eliminating all useless Transpose pairs.

        Returns:
            model proto after optimization, if optimizer run successfully
            or None, if exceptions happens
        """
        try:
            kwargs = GraphUtil.get_onnx_model_properties(onnx_model_proto)
            graph = GraphUtil.create_graph_from_onnx_model(onnx_model_proto)
            graph = GraphUtil.optimize_graph(graph, debug)
            model_proto = graph.make_model(
                onnx_model_proto.graph.doc_string,
                graph_name=onnx_model_proto.graph.name,
                **kwargs)

            if onnx_model_proto.metadata_props:
                metadata_props = {
                    p.key: p.value
                    for p in onnx_model_proto.metadata_props
                }
                helper.set_model_props(model_proto, metadata_props)
            return model_proto
        except Exception:
            # sometimes, onnx shape inference will fail for some reason, in this case,
            # we just log the error, and skip the transpose optimizer.
            type_, value_, traceback_ = sys.exc_info()
            ex_ext = traceback.format_exception(type_, value_, traceback_)
            print("NON-CRITICAL error in optimizer: ", ex_ext)
            return None
예제 #8
0
    def test_model_metadata_props(self):  # type: () -> None
        graph = helper.make_graph([], "my graph", [], [])
        model_def = helper.make_model(graph, doc_string='test')
        helper.set_model_props(model_def, {'Title': 'my graph', 'Keywords': 'test;graph'})
        checker.check_model(model_def)
        helper.set_model_props(model_def, {'Title': 'my graph', 'Keywords': 'test;graph'})
        checker.check_model(model_def)  # helper replaces, so no dupe

        dupe = model_def.metadata_props.add()
        dupe.key = 'Title'
        dupe.value = 'Other'
        self.assertRaises(checker.ValidationError, checker.check_model, model_def)
예제 #9
0
    def test_model_metadata_props(self):  # type: () -> None
        graph = helper.make_graph([], "my graph", [], [])
        model_def = helper.make_model(graph, doc_string='test')
        helper.set_model_props(model_def, {'Title': 'my graph', 'Keywords': 'test;graph'})
        checker.check_model(model_def)
        helper.set_model_props(model_def, {'Title': 'my graph', 'Keywords': 'test;graph'})
        checker.check_model(model_def)  # helper replaces, so no dupe

        dupe = model_def.metadata_props.add()
        dupe.key = 'Title'
        dupe.value = 'Other'
        self.assertRaises(checker.ValidationError, checker.check_model, model_def)
예제 #10
0
    def wtest_ort_gradient_optimizers_fw_nesterov_binary_mlp(
            self, use_weight=True):
        from onnxcustom.training.optimizers_partial import (
            OrtGradientForwardBackwardOptimizer)
        from onnxcustom.training.sgd_learning_rate import (
            LearningRateSGDNesterov)
        from onnxcustom.training.sgd_learning_loss import NegLogLearningLoss
        X, y = make_classification(  # pylint: disable=W0632
            100, n_features=10, random_state=0)
        X = X.astype(numpy.float32)
        y = y.astype(numpy.int64)
        w = (numpy.random.rand(y.shape[0]) + 1).astype(numpy.float32)
        X_train, _, y_train, __, w_train, ___ = train_test_split(X, y, w)
        reg = MLPClassifier(solver='sgd')
        reg.fit(X_train, y_train)
        onx = to_onnx(reg,
                      X_train,
                      target_opset=opset,
                      black_op={'LinearRegressor'},
                      options={'zipmap': False})
        onx = select_model_inputs_outputs(onx,
                                          outputs=['out_activations_result'])
        self.assertIn("output: name='out_activations_result'",
                      onnx_simple_text_plot(onx))
        set_model_props(onx, {'info': 'unit test'})
        onx = onnx_rename_weights(onx)
        inits = [
            'I0_coefficient', 'I1_intercepts', 'I2_coefficient1',
            'I3_intercepts1'
        ]

        train_session = OrtGradientForwardBackwardOptimizer(
            onx,
            inits,
            weight_name='weight' if use_weight else None,
            learning_rate=LearningRateSGDNesterov(1e-4,
                                                  nesterov=False,
                                                  momentum=0.9),
            learning_loss=NegLogLearningLoss(),
            warm_start=False,
            max_iter=100,
            batch_size=10)
        self.assertIsInstance(train_session.learning_loss, NegLogLearningLoss)
        self.assertEqual(train_session.learning_loss.eps, 1e-5)
        if use_weight:
            train_session.fit(X_train, y_train, w_train)
        else:
            train_session.fit(X_train, y_train)
        temp = get_temp_folder(
            __file__, "temp_ort_gradient_optimizers_fw_nesterov_binary_mlp%d" %
            use_weight)
        train_session.save_onnx_graph(temp)
예제 #11
0
    def test_ort_gradient_optimizers_use_numpy_pickle(self):
        from onnxcustom.utils.orttraining_helper import add_loss_output
        from onnxcustom.training.optimizers import OrtGradientOptimizer
        X, y = make_regression(  # pylint: disable=W0632
            100,
            n_features=10,
            bias=2,
            random_state=0)
        X = X.astype(numpy.float32)
        y = y.astype(numpy.float32)
        X_train, _, y_train, __ = train_test_split(X, y)
        reg = LinearRegression()
        reg.fit(X_train, y_train)
        reg.coef_ = reg.coef_.reshape((1, -1))
        onx = to_onnx(reg,
                      X_train,
                      target_opset=opset,
                      black_op={'LinearRegressor'})
        set_model_props(onx, {'info': 'unit test'})
        onx_loss = add_loss_output(onx)
        inits = ['intercept', 'coef']
        train_session0 = OrtGradientOptimizer(onx_loss, inits)

        st = io.BytesIO()
        pickle.dump(train_session0, st)
        st2 = io.BytesIO(st.getvalue())
        train_session1 = pickle.load(st2)

        train_session1.fit(X_train, y_train, use_numpy=True)

        st = io.BytesIO()
        pickle.dump(train_session1, st)
        st2 = io.BytesIO(st.getvalue())
        train_session = pickle.load(st2)
        state_tensors = train_session.get_state()
        self.assertEqual(len(state_tensors), 2)

        train_session.fit(X_train, y_train, use_numpy=True)
        state_tensors = train_session.get_state()
        self.assertEqual(len(state_tensors), 2)
        r = repr(train_session)
        self.assertIn("OrtGradientOptimizer(model_onnx=", r)
        self.assertIn("learning_rate='invscaling'", r)
        losses = train_session.train_losses_
        self.assertGreater(len(losses), 1)
        self.assertFalse(any(map(numpy.isnan, losses)))
예제 #12
0
def _finalize_new_onnx(graph, onx):
    onnx_model = helper.make_model(graph)
    onnx_model.ir_version = onx.ir_version
    onnx_model.producer_name = onx.producer_name
    onnx_model.producer_version = onx.producer_version
    onnx_model.domain = onx.domain
    onnx_model.model_version = onx.model_version
    onnx_model.doc_string = onx.doc_string
    if len(onx.metadata_props) > 0:  # pragma: no cover
        values = {p.key: p.value for p in onx.metadata_props}
        helper.set_model_props(onnx_model, values)

    del onnx_model.opset_import[:]  # pylint: disable=E1101
    for oimp in onx.opset_import:
        op_set = onnx_model.opset_import.add()  # pylint: disable=E1101
        op_set.domain = oimp.domain
        op_set.version = oimp.version
    return onnx_model
예제 #13
0
  def __make_onnx_model(self) -> ModelProto:
    """Make onnx model.

    Returns:
      onnx model.
    """
    graph = helper.make_graph(self.__onnx_nodes.onnx_nodes,
                              'open-source-kaldi-onnx',
                              self.__input_with_initializers,
                              list(self.__name_to_output_tensor.values()),
                              self.__initializers,
                              value_info=self.__internal_inputs)
    onnx_model = helper.make_model(graph)
    helper.set_model_props(onnx_model,
                           {'left_context': str(self.__left_context),
                            'right_context': str(self.__right_context),
                            'chunk_size': str(self.__chunk_size),
                            'subsample_factor': str(self.__subsample_factor)})
    checker.check_model(onnx_model)
    return onnx_model
예제 #14
0
    def make_model(self):
        """
        Produces the new ONNX graph with the updated sets of nodes.
        """
        inputs = self._onnx_model.graph.input
        outputs = self._onnx_model.graph.output
        inits = [
            init[1] for init in sorted(self._names.items())
            if not hasattr(init[1], 'domain')
        ]
        nodes = [
            node[1] for node in sorted(self._names.items())
            if hasattr(node[1], 'domain')
        ]
        nodes = ensure_topological_order(inputs, inits, nodes)

        if self.verbose:
            print(  # pragma: no cover
                "[Tf2OnnxConvert.make_node] %d nodes %d inputs %d "
                "outputs %d initializers"
                "" % (len(nodes), len(inputs), len(outputs), len(inits)))
        graph = make_graph(nodes, self._onnx_model.graph.name, inputs, outputs,
                           inits)
        onnx_model = make_model(graph)
        onnx_model.ir_version = self._onnx_model.ir_version
        onnx_model.producer_name = self._onnx_model.producer_name + "-mlprodict"
        onnx_model.producer_version = self._onnx_model.producer_version
        onnx_model.domain = self._onnx_model.domain
        onnx_model.model_version = self._onnx_model.model_version
        onnx_model.doc_string = self._onnx_model.doc_string
        metadata = {p.key: p.value for p in self._onnx_model.metadata_props}
        set_model_props(onnx_model, metadata)

        # opsets
        del onnx_model.opset_import[:]  # pylint: disable=E1101
        for dom, value in self.target_opsets.items():
            op_set = onnx_model.opset_import.add()  # pylint: disable=E1101
            op_set.domain = dom
            op_set.version = value
        return onnx_model
예제 #15
0
 def test_ort_gradient_optimizers_use_numpy_saved(self):
     from onnxcustom.utils.orttraining_helper import add_loss_output
     from onnxcustom.training.optimizers import OrtGradientOptimizer
     X, y = make_regression(  # pylint: disable=W0632
         100,
         n_features=10,
         bias=2,
         random_state=0)
     X = X.astype(numpy.float32)
     y = y.astype(numpy.float32)
     X_train, _, y_train, __ = train_test_split(X, y)
     reg = LinearRegression()
     reg.fit(X_train, y_train)
     reg.coef_ = reg.coef_.reshape((1, -1))
     onx = to_onnx(reg,
                   X_train,
                   target_opset=opset,
                   black_op={'LinearRegressor'})
     set_model_props(onx, {'info': 'unit test'})
     onx_loss = add_loss_output(onx)
     inits = ['intercept', 'coef']
     temp = get_temp_folder(__file__, "temp_OrtGradientOptimizer")
     filename = os.path.join(temp, "saved.onnx")
     train_session = OrtGradientOptimizer(onx_loss,
                                          inits,
                                          learning_rate=1e-3,
                                          saved_gradient=filename)
     self.assertRaise(lambda: train_session.get_state(), AttributeError)
     train_session.fit(X_train, y_train, use_numpy=True)
     state_tensors = train_session.get_state()
     self.assertEqual(len(state_tensors), 2)
     r = repr(train_session)
     self.assertIn("OrtGradientOptimizer(model_onnx=", r)
     self.assertIn("learning_rate='invscaling'", r)
     losses = train_session.train_losses_
     self.assertGreater(len(losses), 1)
     self.assertFalse(any(map(numpy.isnan, losses)))
     self.assertExists(filename)
예제 #16
0
    def optimize_model_proto(onnx_model_proto):
        """Optimize the model proto, for example: eliminating all useless Transpose pairs.

        Returns:
            model proto after optimization, if optimizer run successfully
            or onnx_model_proto, if exceptions happens
        """
        try:
            kwargs = GraphUtil.get_onnx_model_properties(onnx_model_proto)
            graph = GraphUtil.create_graph_from_onnx_model(onnx_model_proto)
            graph = GraphUtil.optimize_graph(graph)
            model_proto = graph.make_model(onnx_model_proto.graph.doc_string,
                                           graph_name=onnx_model_proto.graph.name, **kwargs)

            if onnx_model_proto.metadata_props:
                metadata_props = {p.key: p.value for p in onnx_model_proto.metadata_props}
                helper.set_model_props(model_proto, metadata_props)
            return model_proto
        except Exception:
            # sometimes, onnx shape inference will fail for some reason,
            # return onnx_model_proto for this case
            logger.warning("Failed to optimize model proto", exc_info=1)
            return onnx_model_proto
예제 #17
0
파일: compose_test.py 프로젝트: onnx/onnx
    def test_merge_models_with_metadata_props(self) -> None:
        m1 = _load_model(m1_def)
        helper.set_model_props(m1, {'p1': 'v1', 'p2': 'v2'})

        m2 = _load_model(m2_def)
        helper.set_model_props(m2, {'p3': 'v3', 'p4': 'v4'})

        io_map = [("B00", "B01")]
        m3 = compose.merge_models(m1, m2, io_map=io_map)
        assert len(m3.metadata_props) == 4

        # Overlap, but same value
        helper.set_model_props(m2, {'p1': 'v1', 'p4': 'v4'})
        m3 = compose.merge_models(m1, m2, io_map=io_map)
        assert len(m3.metadata_props) == 3

        # Same keys but not same value. Error
        helper.set_model_props(m2, {'p1': 'v5', 'p4': 'v4'})
        self.assertRaises(ValueError,
                          compose.merge_models,
                          m1,
                          m2,
                          io_map=io_map)
예제 #18
0
    def make_model(self):
        _LOG.info("start making ONNX model.")
        # add placeholders
        self.init_inputs()
        output_tensor_values = []
        for name in self._outputs:
            v = helper.make_tensor_value_info(
                name,
                onnx_pb.TensorProto.FLOAT,
                self.make_onnx_shape(self._shapes[name]))
            output_tensor_values.append(v)

        onnx_nodes = []
        for node in self._nodes:
            if node.type not in['Input', 'Output']:
                try:
                    input_names = node.inputs
                    output_names = node.outputs
                    onnx_node = helper.make_node(node.type,
                                                 input_names,
                                                 output_names,
                                                 name=node.name,
                                                 domain=self._operatorsetid,
                                                 **node.attrs)
                    onnx_nodes.append(onnx_node)
                except Exception as ex:
                    node.info()
                    raise Exception('convert failed for node: {0} err: {1}'
                                    .format(node.type, ex))

        self.convert_initializers()

        all_inputs = []
        for node in self._nodes:
            all_inputs.extend(node.inputs)

        initializers = [i for i in list(self._initializers.values())
                        if i.name in all_inputs]

        input_with_initializers = []
        initializers_names = []
        for initializer in initializers:
            val = helper.make_tensor_value_info(initializer.name,
                                                initializer.data_type,
                                                self.make_onnx_shape(
                                                    initializer.dims))
            input_with_initializers.append(val)
            initializers_names.append(initializer.name)
        input_with_initializers.extend(
            list(self._model_input_tensors.values()))
        input_tensors_names = [i for i in all_inputs
                               if i not in initializers_names or
                               i not in self._inputs]
        internal_inputs = []
        for name in input_tensors_names:
            val = helper.make_tensor_value_info(name,
                                                onnx_pb.TensorProto.FLOAT,
                                                self.make_onnx_shape(
                                                    self._shapes[name]))
            internal_inputs.append(val)

        graph = helper.make_graph(onnx_nodes,
                                  self._producer_name,
                                  input_with_initializers,
                                  output_tensor_values,
                                  initializer=initializers,
                                  value_info=internal_inputs)
        metadata_props = {"left_context": str(self._left_context),
                          "right_context": str(self._right_context),
                          "chunk_size": str(self._chunk_size),
                          "modulus": str(self._modulus),
                          "subsample_factor": str(self._subsample_factor)}
        kwargs = {"producer_name": self._producer_name,
                  "producer_version": self._producer_version}
        opsets = []

        imp = helper.make_operatorsetid(self._operatorsetid, 1)
        imp.version = self._opset
        opsets.append(imp)
        if self._extra_opset is not None:
            opsets.extend(self._extra_opset)
        kwargs["opset_imports"] = opsets
        model_proto = helper.make_model(graph, **kwargs)
        helper.set_model_props(model_proto, metadata_props)
        checker.check_model(model_proto)
        return model_proto
예제 #19
0
def main(model_path,
         model_save_path=None,
         add_transpose_for_channel_last_first_issue=True,
         bottom_nodes_name=None):

    onnx_weight_node_list = []
    output_tensor_value_info = []
    onnx_node_list = []
    inner_node_shape_value_info = []

    # parse node information through tflite interpreter (tflite interpreter can't parse operator information in our target tensorflow version 1.15)
    interpreter = tf.lite.Interpreter(model_path)
    interpreter.allocate_tensors()

    # get model input info(assume there is only one input)
    input_details = interpreter.get_input_details()
    model_input_name = input_details[0]['name']
    input_tensor_value_info = None

    # generate tree
    tree_graph = Tree(model_path=model_path,
                      bottom_nodes_name=bottom_nodes_name,
                      defused=True)

    # get sequential node name
    sequential_keys = tree_graph.get_sequential_nodes_key()

    # get tree node in the form of {node_name: op_node_obj}
    tree_dict = tree_graph.get_nodes()

    #############################
    # build head transpose node #
    #############################
    for h_node in tree_graph.get_head_nodes():
        # transpose for channel last to channel first
        if add_transpose_for_channel_last_first_issue is True:
            logging.getLogger('tflite2onnx').debug(
                "generating transpose node for channel last first issue: " +
                h_node.node_name)
            input_tensor_value_info = helper.make_tensor_value_info(
                model_input_name, TensorProto.FLOAT,
                h_node.node_input_shape.tolist())
            h_transpose_node = build_head_transpose_node_for_channel_last_2_channel_first(
                input_tensor_value_info.name, h_node.node_name)

            onnx_node_list.append(h_transpose_node)
            h_node.input_nodes_name = [h_transpose_node.name]
        else:
            input_tensor_value_info = helper.make_tensor_value_info(
                model_input_name, TensorProto.FLOAT,
                tflite_utils.tflite2onnx_shape_map(
                    h_node.node_input_shape.tolist()))
            h_node.input_nodes_name = [input_tensor_value_info.name]

    ############################
    # build model node by node #
    ############################
    dumped_quantization_info = {}
    for key in sequential_keys:
        logging.getLogger('tflite2onnx').debug("generating: " + key)
        nodes, val, weight, quantization_info = tree_dict[key].generate()

        if (len(val) != 0) and (tree_dict[key].is_bottom_node is False):
            inner_node_shape_value_info.extend(val)
        if len(weight) != 0:
            onnx_weight_node_list.extend(weight)
        if len(nodes) != 0:
            onnx_node_list.extend(nodes)
        if len(quantization_info) != 0:
            merge_quantization_info(dumped_quantization_info,
                                    quantization_info)

    if check_quantization(interpreter.get_tensor_details()):
        json_save_path = model_save_path[:-5] + "_user_config.json"
        with open(json_save_path, "w") as f:
            print(json_save_path)
            json.dump(dumped_quantization_info, f, indent=1)
            print("New Qunatized information saved")

    # sometimes, there are sub-node in one tree node, we need to find the last one
    b_nodes = [node for node in tree_graph.get_bottom_nodes()]

    ###############################
    # build bottom transpose node #
    ###############################
    for b_node in b_nodes:

        out_value_info = None
        if add_transpose_for_channel_last_first_issue is True:
            logging.getLogger('tflite2onnx').debug(
                "generating transpose node for channel last first issue: " +
                b_node.node_name)
            out_value_info, transpose_node = build_button_transpose_node_for_channel_first_2_channel_last(
                b_node.node_list[-1], b_node.node_output_shape.tolist())

            if transpose_node != None:
                onnx_node_list.append(transpose_node)
        else:
            out_value_info = set_end_node(b_node.node_list[-1],
                                          b_node.node_output_shape.tolist())
        output_tensor_value_info.append(out_value_info)

    input_init = [input_tensor_value_info]
    input_init.extend(onnx_weight_node_list)
    onnx_inputs = tflite_utils.make_kneron_valid_onnx_input(input_init)

    graph_cnn = helper.make_graph(onnx_node_list,
                                  'cnn_test',
                                  onnx_inputs,
                                  output_tensor_value_info,
                                  onnx_weight_node_list,
                                  value_info=inner_node_shape_value_info)

    cnn_model = helper.make_model(graph_cnn, producer_name='Kneron')
    cnn_model.opset_import[0].version = 11

    # add generated time to model meta data
    helper.set_model_props(
        cnn_model, {
            'Generated Time':
            datetime.utcnow().strftime("%m/%d/%Y, %H:%M:%S") + " (UTC+0)"
        })

    cnn_model = onnx.utils.polish_model(cnn_model)

    # save
    if model_save_path is not None:
        onnx.save(cnn_model, model_save_path)
    return cnn_model
예제 #20
0
    def wtest_ort_gradient_optimizers_fw_nesterov_binary(self, use_weight):
        from onnxcustom.training.optimizers_partial import (
            OrtGradientForwardBackwardOptimizer)
        from onnxcustom.training.sgd_learning_rate import (
            LearningRateSGDNesterov)
        from onnxcustom.training.sgd_learning_loss import NegLogLearningLoss
        X, y = make_classification(  # pylint: disable=W0632
            100, n_features=10, random_state=0)
        X = X.astype(numpy.float32)
        y = y.astype(numpy.int64)
        w = (numpy.random.rand(y.shape[0]) + 1).astype(numpy.float32)
        X_train, _, y_train, __, w_train, ___ = train_test_split(X, y, w)
        reg = SGDClassifier(loss='log')
        if use_weight:
            reg.fit(X_train,
                    y_train,
                    sample_weight=w_train.astype(numpy.float64))
        else:
            reg.fit(X_train, y_train)
        onx = to_onnx(reg,
                      X_train,
                      target_opset=opset,
                      black_op={'LinearRegressor'},
                      options={
                          'zipmap': False,
                          'raw_scores': True
                      })
        onx2 = onx
        onx = select_model_inputs_outputs(onx, outputs=['score'])
        self.assertIn("output: name='score'", onnx_simple_text_plot(onx))
        set_model_props(onx, {'info': 'unit test'})
        inits = ['coef', 'intercept']

        train_session = OrtGradientForwardBackwardOptimizer(
            onx,
            inits,
            weight_name='weight' if use_weight else None,
            learning_rate=LearningRateSGDNesterov(1e-4,
                                                  nesterov=False,
                                                  momentum=0.9),
            learning_loss=NegLogLearningLoss(),
            warm_start=False,
            max_iter=100,
            batch_size=10)
        self.assertIsInstance(train_session.learning_loss, NegLogLearningLoss)
        self.assertEqual(train_session.learning_loss.eps, 1e-5)
        y_train = y_train.reshape((-1, 1))
        if use_weight:
            train_session.fit(X_train, y_train, w_train.reshape((-1, 1)))
        else:
            train_session.fit(X_train, y_train)
        temp = get_temp_folder(
            __file__, "temp_ort_gradient_optimizers_fw_nesterov_binary")
        train_session.save_onnx_graph(temp)

        # get_trained_weight
        trained_onnx = train_session.get_trained_onnx(model=onx2)
        sess = InferenceSession(onx2.SerializeToString(),
                                providers=['CPUExecutionProvider'])
        got1 = sess.run(None, {'X': X_train})
        sess = InferenceSession(trained_onnx.SerializeToString(),
                                providers=['CPUExecutionProvider'])
        got2 = sess.run(None, {'X': X_train})
        self.assertEqual(len(got1), len(got2))
        self.assertEqual(got1[0].shape, got2[0].shape)

        # state
        state = train_session.get_state()
        self.assertIsInstance(state, list)
        train_session.set_state(state)
        for k in range(len(state)):  # pylint: disable=C0200
            state[k] = state[k].numpy()
        train_session.set_state(state)
예제 #21
0
    def wtest_ort_gradient_optimizers_binary(self, use_weight=False):
        from onnxcustom.utils.orttraining_helper import add_loss_output
        from onnxcustom.training.optimizers import OrtGradientOptimizer
        X, y = make_classification(  # pylint: disable=W0632
            100, n_features=10, random_state=0)
        X = X.astype(numpy.float32)
        y = y.astype(numpy.int64)
        w = (numpy.random.rand(X.shape[0]) + 1).astype(numpy.float32)
        X_train, _, y_train, __, w_train, ___ = train_test_split(X, y, w)
        reg = SGDClassifier(loss='log')
        reg.fit(X_train, y_train)
        onx = to_onnx(reg,
                      X_train,
                      target_opset=opset,
                      black_op={'LinearClassifier'},
                      options={'zipmap': False})
        onx2 = load_onnx(BytesIO(onx.SerializeToString()))
        set_model_props(onx, {'info': 'unit test'})
        onx_loss = add_loss_output(
            onx,
            'log',
            output_index=1,
            weight_name='weight' if use_weight else None)
        inits = ['intercept', 'coef']
        inputs = onx_loss.graph.input
        self.assertEqual(len(inputs), 3 if use_weight else 2)
        dt = inputs[1].type.tensor_type.elem_type
        self.assertEqual(TensorProto.INT64, dt)  # pylint: disable=E1101
        train_session = OrtGradientOptimizer(onx_loss,
                                             inits,
                                             learning_rate=1e-3)
        self.assertRaise(lambda: train_session.get_state(), AttributeError)
        if use_weight:
            train_session.fit(X_train,
                              y_train.reshape((-1, 1)),
                              w_train.reshape((-1, 1)),
                              use_numpy=False)
        else:
            train_session.fit(X_train,
                              y_train.reshape((-1, 1)),
                              use_numpy=False)
        state_tensors = train_session.get_state()
        self.assertEqual(len(state_tensors), 2)
        r = repr(train_session)
        self.assertIn("OrtGradientOptimizer(model_onnx=", r)
        self.assertIn("learning_rate='invscaling'", r)
        losses = train_session.train_losses_
        self.assertGreater(len(losses), 1)
        self.assertFalse(any(map(numpy.isnan, losses)))

        # get_trained_weight
        trained_onnx = train_session.get_trained_onnx(model=onx2)
        sess = InferenceSession(onx2.SerializeToString(),
                                providers=['CPUExecutionProvider'])
        got1 = sess.run(None, {'X': X_train})
        sess = InferenceSession(trained_onnx.SerializeToString(),
                                providers=['CPUExecutionProvider'])
        got2 = sess.run(None, {'X': X_train})
        self.assertEqual(len(got1), len(got2))
        self.assertEqual(got1[0].shape, got2[0].shape)

        # state
        state = train_session.get_state()
        self.assertIsInstance(state, dict)
        train_session.set_state(state)
예제 #22
0
def add_loss_output(onx,
                    score_name='squared_error',
                    loss_name='loss',
                    label_name='label',
                    weight_name=None,
                    penalty=None,
                    output_index=None,
                    **kwargs):
    """
    Modifies an ONNX graph to add operators to score and allow training.

    :param onx: onx graph
    :param score_name: name of the score
    :param loss_name: name of the output loss
    :param label_name: name of the label input
    :param weight_name: None or any value to consider weight
        while computing loss
    :param penalty: dictionary similar to the
        following one `{ weight_name: {'l1': alpha, 'l2': beta} }`
        or `{ weight_name: beta}`,
        it adds a L1 and/or L2 penalty to one input or initializer,
        penalty = :math:`|w| \\alpha + w^2 \\beta`
    :param output_index: the output used to compute the loss,
        if None, the function assumes there is only one output,
        it must be specified if there are more than 1,
        it can be an integer or a string (output name)
    :param kwargs: additional arguments for losses (see below)
    :return: modified graph

    Possible values for *score_name*:

    * `'squared_error'` or `'l2`': :math:`\\sum_i{(f(x_i)-y_i)^2}` or
      :math:`\\sum_i{w_i (f(x_i)-y_i)^2}` if *weight_name*
      is not None
    * `'absolute_error'` or `'l1`': :math:`\\sum_i{|f(x_i)-y_i|}` or
      :math:`\\sum_i{w_i |f(x_i)-y_i|}` if *weight_name*
      is not None
    * `'elastic'`: mixture of losses, kwargs must define
      *l1_weight* and *l2_weight*, undefined, default value are 0.5
    * `'log'`: log loss :math:`(1-yt)\\log(1-yp) - yt\\log(yp)`,
        this only works for a binary classification where *yp* is the
        predicted probability, *yt* is the expected probability.
        *yt* is expected to be binary, *yp* is a matrix with two
        columns, the sum on every line is 1.

    See example :ref:`l-orttraining-nn-gpu`.
    Next example shows the loss with L1 and L2 loss.

    .. gdot::
        :script: DOT-SECTION

        import numpy
        from sklearn.datasets import make_regression
        from sklearn.model_selection import train_test_split
        from sklearn.linear_model import LinearRegression
        from mlprodict.onnx_conv import to_onnx
        from mlprodict.onnxrt import OnnxInference
        from onnxcustom import __max_supported_opset__ as opset
        from onnxcustom.utils.orttraining_helper import add_loss_output
        from onnxcustom.training.optimizers import OrtGradientOptimizer

        X, y = make_regression(  # pylint: disable=W0632
            100, n_features=10, bias=2, random_state=0)
        X = X.astype(numpy.float32)
        y = y.astype(numpy.float32)
        w = (numpy.random.rand(y.shape[0]) + 1).astype(X.dtype)
        X_train, _, y_train, __, w_train, ___ = train_test_split(X, y, w)
        reg = LinearRegression()
        reg.fit(X_train, y_train, sample_weight=w_train)
        reg.coef_ = reg.coef_.reshape((1, -1))
        onx = to_onnx(reg, X_train, target_opset=opset,
                      black_op={'LinearRegressor'})

        onx_loss = add_loss_output(
            onx, weight_name='weight', score_name='elastic',
            l1_weight=0.1, l2_weight=0.9)

        print("DOT-SECTION", OnnxInference(onx_loss).to_dot())

    Next example shows how to add a L2 loss with L1 and L2 penalties
    on the coefficients.

    .. gdot::
        :script: DOT-SECTION

        import numpy
        from sklearn.datasets import make_regression
        from sklearn.model_selection import train_test_split
        from sklearn.linear_model import LinearRegression
        from mlprodict.onnx_conv import to_onnx
        from mlprodict.onnxrt import OnnxInference
        from onnxcustom import __max_supported_opset__ as opset
        from onnxcustom.utils.orttraining_helper import add_loss_output
        from onnxcustom.training.optimizers import OrtGradientOptimizer

        X, y = make_regression(  # pylint: disable=W0632
            100, n_features=10, bias=2, random_state=0)
        X = X.astype(numpy.float32)
        y = y.astype(numpy.float32)
        w = (numpy.random.rand(y.shape[0]) + 1).astype(X.dtype)
        X_train, _, y_train, __, w_train, ___ = train_test_split(X, y, w)
        reg = LinearRegression()
        reg.fit(X_train, y_train, sample_weight=w_train)
        reg.coef_ = reg.coef_.reshape((1, -1))
        onx = to_onnx(reg, X_train, target_opset=opset,
                      black_op={'LinearRegressor'})

        onx_loss = add_loss_output(
            onx, weight_name='weight', score_name='elastic',
            penalty={'coef': {'l1': 0.5, 'l2':0.5},
                     'intercept': {'l1': 0.5, 'l2':0.5}})

        print("DOT-SECTION", OnnxInference(onx_loss).to_dot())
    """
    from mlprodict.onnx_tools.optim import onnx_remove_node_unused

    # rename every intermediate output call label
    def _replace(ens):
        for i in range(len(ens)):  # pylint: disable=C0200
            if ens[i] == 'label':
                ens[i] = '_label_'

    for node in onx.graph.node:
        if "_label_" in node.input or "_label_" in node.output:
            raise RuntimeError(  # pragma: no cover
                "One intermediate result contains '_label_'. "
                "It should be removed manually.\n%r" % node)
        _replace(node.input)
        _replace(node.output)

    if output_index is None:
        if len(onx.graph.output) != 1:
            raise ValueError(  # pragma: no cover
                "Unable to guess the output to compare to the "
                "expacted labels among %r." %
                ([o.name for o in onx.graph.output]))
        outputs = onx.graph.output
        output_index = 0
    elif isinstance(output_index, int):
        outputs = [onx.graph.output[output_index]]
    elif isinstance(output_index, str):
        outputs = [(i, o) for i, o in enumerate(onx.graph.output)
                   if o.name == output_index]
        if len(outputs) != 1:
            raise ValueError(  # pragma: no cover
                "Unable to find output %r in %r." %
                (output_index, [o.name for o in onx.graph.output]))
        output_index = outputs[0][0]
        outputs = [outputs[0][1]]
    else:
        raise TypeError(  # pragma: no cover
            f"output_index must be an integer or a str not {type(output_index)!r}."
        )

    existing_names = []
    for node in onx.graph.node:
        existing_names.extend(node.output)
        existing_names.extend(node.input)
    existing_names = set(existing_names)

    output_onx = onx.graph.output[output_index]
    output_name = output_onx.name
    elem = output_onx.type.tensor_type.elem_type
    if elem == 0:
        raise TypeError(  # pragma: no cover
            f"Unable to guess input tensor type from {output_onx!r}.")
    shape = []
    for d in output_onx.type.tensor_type.shape.dim:
        shape.append(d.dim_value if d.dim_value > 0 else None)

    if score_name in ('squared_error', 'l2'):
        inits, inputs, nodes, outputs = _loss_l2(existing_names, elem, shape,
                                                 output_name, label_name,
                                                 weight_name, loss_name)
    elif score_name in ('absolute_error', 'l1'):
        inits, inputs, nodes, outputs = _loss_l1(existing_names, elem, shape,
                                                 output_name, label_name,
                                                 weight_name, loss_name)
    elif score_name == 'elastic':
        inits, inputs, nodes, outputs = _loss_elastic(existing_names, elem,
                                                      shape, output_name,
                                                      label_name, weight_name,
                                                      loss_name, **kwargs)
    elif score_name == 'log':
        shape = (None, 1)
        inits, inputs, nodes, outputs = _loss_log(existing_names, elem, shape,
                                                  output_name, label_name,
                                                  weight_name, loss_name,
                                                  **kwargs)
    else:
        raise NotImplementedError(  # pragma: no cover
            f"Unexpected {score_name!r} value for score_name.")

    if penalty is not None:
        final_name = nodes[-1].output[0]
        loss_name = _unique_name(existing_names, "loss_diff")
        nodes[-1].output[0] = loss_name
        names = []
        for k, v in penalty.items():
            if isinstance(v, float):
                v = {'l2': v}
            inits_to_add, nodes_to_add = penalty_loss_onnx(
                k,
                dtype=TENSOR_TYPE_TO_NP_TYPE[elem],
                existing_names=existing_names,
                **v)
            names.append(nodes_to_add[-1].output[0])
            nodes.extend(nodes_to_add)
            inits.extend(inits_to_add)
        # Operator Sum does not have a gradient.
        if len(names) == 1:
            pen_name = names[0]
        else:
            current = names[0]
            for i in range(1, len(names)):
                new_name = _unique_name(existing_names, "sumop")
                nodes.append(make_node('Add', [current, names[i]], [new_name]))
                current = new_name
            pen_name = current

        cst_shape = _unique_name(existing_names, "shapevect")
        inits.append(
            from_array(numpy.array([-1, 1], dtype=numpy.int64),
                       name=cst_shape))
        loss_reshape = _unique_name(existing_names, "loss_reshape")
        pen_reshape = _unique_name(existing_names, "penalty_reshape")
        nodes.extend([
            make_node("Reshape", [pen_name, cst_shape], [pen_reshape]),
            make_node("Reshape", [loss_name, cst_shape], [loss_reshape])
        ])

        nodes.append(
            make_node('Add', [pen_reshape, loss_reshape], [final_name]))

    inits = list(onx.graph.initializer) + inits
    graph = make_graph(
        list(onx.graph.node) + nodes, onx.graph.name,
        list(onx.graph.input) + inputs,
        outputs + [onx.graph.output[output_index]], inits)
    onnx_model = make_model(graph)
    onnx_model.ir_version = onx.ir_version
    onnx_model.producer_name = onx.producer_name
    onnx_model.producer_version = onx.producer_version
    onnx_model.domain = onx.domain
    onnx_model.model_version = onx.model_version
    onnx_model.doc_string = onx.doc_string
    if len(onx.metadata_props) > 0:
        values = {p.key: p.value for p in onx.metadata_props}
        set_model_props(onnx_model, values)

    # fix opset import
    del onnx_model.opset_import[:]  # pylint: disable=E1101
    for oimp in onx.opset_import:
        op_set = onnx_model.opset_import.add()  # pylint: disable=E1101
        op_set.domain = oimp.domain
        op_set.version = oimp.version
    return _rewrite_op_no_grad(onnx_remove_node_unused(onnx_model))
예제 #23
0
def insert_node(model, op_type, node, input_index=0, new_name=None, **attrs):
    """
    Inserts a node before one node input.
    :param model: onnx graph
    :param op_type:
    :param node: node or node name
    :param input_index: input index or input name
    :param attrs: node attributes
    :return: updated graph
    """
    if isinstance(node, str):
        inode = find_node_name(model, node)
    else:
        inode = node
    if isinstance(input_index, str):
        input_index_ = find_node_input_name(node, input_index)
        if input_index_ == -1:
            raise RuntimeError(  # pragma: no cover
                "Unable to find input_index %r in node %r." % (
                    input_index, node.name))  # pylint: disable=E1120
        input_index = input_index_

    # guess a new name
    names = []
    for n in model.graph.node:
        names.extend(n.input)
        names.extend(n.output)
    names = set(names)
    if new_name is None:
        new_name = op_type.lower()
    root_name = new_name
    i = 0
    while new_name in names:
        new_name = "%s_%d" % (root_name, i)
        i += 1

    new_node = helper.make_node(
        op_type, [inode.input[input_index]], [new_name], **attrs)
    inode.input[input_index] = new_name
    keep_nodes = list(model.graph.node)
    keep_nodes.append(new_node)
    keep_nodes = ensure_topological_order(
        model.graph.input, model.graph.initializer, keep_nodes)

    graph = helper.make_graph(
        keep_nodes, model.graph.name, model.graph.input,
        model.graph.output, model.graph.initializer)
    onnx_model = helper.make_model(graph)
    onnx_model.ir_version = model.ir_version
    onnx_model.producer_name = model.producer_name
    onnx_model.producer_version = model.producer_version
    onnx_model.domain = model.domain
    onnx_model.model_version = model.model_version
    onnx_model.doc_string = model.doc_string
    if len(model.metadata_props) > 0:
        values = {p.key: p.value for p in model.metadata_props}
        helper.set_model_props(onnx_model, values)

    del onnx_model.opset_import[:]  # pylint: disable=E1101
    for oimp in model.opset_import:
        op_set = onnx_model.opset_import.add()  # pylint: disable=E1101
        op_set.domain = oimp.domain
        op_set.version = oimp.version

    if len(onnx_model.graph.input) != len(model.graph.input):  # pylint: disable=E1101
        raise RuntimeError(  # pragma: no cover
            "Input mismatch {} != {}".format(
                len(onnx_model.input), len(model.input)))  # pylint: disable=E1101
    return onnx_model
예제 #24
0
파일: utils.py 프로젝트: marlon-br/nnsplit
def postprocess(model_path, metadata={}):
    model = onnx.load(model_path)
    helper.set_model_props(model, metadata)

    onnx.save(model, model_path)
예제 #25
0
def insert_results_into_onnx(model, results, as_parameter=True, suffix='_DBG',
                             param_name=None, node_type='DEBUG',
                             domain='DEBUG', domain_opset=1):
    """
    Inserts results into an ONNX graph to produce an extended
    ONNX graph. It can saved and looked into with a tool such as
    :epkg:`netron`.

    :param model: ONNX graph
    :param results: results to be added in a dictionary
    :param as_parameter: add new nodes with results as one parameter
        (True) or as initializer (False)
    :param suffix: suffix to add to new results
    :param param_name: name of the parameter to add
        (by default the result name), it can be a function
        `param_name(reult_name) -> parameter_name`
    :param node_type: type of the new node
    :param domain: domain the new node
    :param domain_opset: opset for *domain*
    :return: new ONNX graph

    See method :meth:`OnnxInference.run2onnx
    <mlprodict.onnxrt.onnx_inference.OnnxInference.run2onnx>`
    to see a graph this function produces.

    .. image:: debug.png

    .. versionadded:: 0.7
    """
    inputs = list(model.graph.input)
    outputs = list(model.graph.output)
    inits = list(model.graph.initializer)
    nodes = {id(n): n for n in model.graph.node}
    order = {id(n): i for i, n in enumerate(model.graph.node)}
    nodes_copy = {}

    names_init = set(init.name for init in inits)
    names_input = set(init.name for init in inputs)
    names_output = {}
    for node in nodes.values():
        for i, o in enumerate(node.output):
            names_output[o] = (i, node)

    for k, v in results.items():
        if k in names_init:
            # initializer are not inserted again
            continue
        if k in names_input:
            # inputs are added as
            raise NotImplementedError(
                "Unable to add debug information on input %r." % k)

        if k not in names_output:
            raise RuntimeError(
                "Unable to find result %r in the ONNX graph. Available="
                "[%s]." % (k, ", ".join(sorted(names_output))))

        index, node = names_output[k]
        new_name = k + suffix

        if id(node) not in nodes_copy:
            new_node = helper.make_node(
                node.op_type, list(node.input), list(node.output),
                domain=node.domain if node.domain else None,
                name=node.name + suffix)
            new_node.attribute.extend(node.attribute)  # pylint: disable=E1101
            nodes_copy[id(node)] = new_node
            order[id(new_node)] = order[id(node)]
        new_node = nodes_copy[id(node)]
        new_node.output[index] = new_name

        if as_parameter:
            pname = k if param_name is None else param_name(k)
            atts = {pname: from_array(v, name=pname)}
            inserted_node = helper.make_node(
                node_type, [new_name], [k], domain=domain,
                **atts)
        else:
            pname = k if param_name is None else param_name(k)
            pname += suffix + 'i'
            inserted_node = helper.make_node(
                node_type, [new_name, pname], [k], domain=domain)
            inits.append(from_array(v, name=pname))

        order[id(inserted_node)] = order[id(node)] + 1. / (index + 2)
        nodes[id(inserted_node)] = inserted_node

    new_nodes = [(order[id(n)], n)
                 for n in nodes.values() if id(n) not in nodes_copy]
    new_nodes.extend((order[id(n)], n) for n in nodes_copy.values())
    new_nodes = [n[1] for n in sorted(new_nodes)]

    graph = helper.make_graph(new_nodes, model.graph.name, inputs,
                              outputs, inits)
    onnx_model = helper.make_model(graph)
    onnx_model.ir_version = model.ir_version
    onnx_model.producer_name = model.producer_name
    onnx_model.producer_version = model.producer_version
    onnx_model.domain = model.domain
    onnx_model.model_version = model.model_version
    onnx_model.doc_string = model.doc_string
    if len(model.metadata_props) > 0:  # pragma: no cover
        values = {p.key: p.value for p in model.metadata_props}
        helper.set_model_props(onnx_model, values)

    del onnx_model.opset_import[:]  # pylint: disable=E1101
    for oimp in model.opset_import:
        op_set = onnx_model.opset_import.add()  # pylint: disable=E1101
        op_set.domain = oimp.domain
        op_set.version = oimp.version
    op_set = onnx_model.opset_import.add()  # pylint: disable=E1101
    op_set.domain = domain
    op_set.version = domain_opset
    return onnx_model
예제 #26
0
파일: compose.py 프로젝트: kaydoh/onnx
def merge_models(m1: ModelProto,
                 m2: ModelProto,
                 io_map: List[Tuple[Text, Text]],
                 inputs: Optional[List[Text]] = None,
                 outputs: Optional[List[Text]] = None,
                 prefix1: Optional[Text] = None,
                 prefix2: Optional[Text] = None,
                 name: Optional[Text] = None,
                 doc_string: Optional[Text] = None,
                 producer_name: Optional[Text] = 'onnx.compose.merge_models',
                 producer_version: Optional[Text] = "1.0",
                 domain: Optional[Text] = "",
                 model_version: Optional[int] = 1) -> ModelProto:
    """Combines two ONNX models into a single one.

    The combined model is defined by connecting the specified set of outputs/inputs.
    Those inputs/outputs not specified in the io_map argument will remain as
    inputs/outputs of the combined model.

    Both models should have the same IR version, and same operator sets imported.

    Arguments:
        m1 (ModelProto): First model
        m2 (ModelProto): Second model
        io_map (list of pairs of string): The pairs of names [(out0, in0), (out1, in1), ...]
                                          representing outputs of the first graph and inputs of the second
                                          to be connected
        inputs (list of string): Optional list of inputs to be included in the combined graph
                                 By default, all inputs not present in the ``io_map`` argument will be
                                 included in the combined model
        outputs (list of string): Optional list of outputs to be included in the combined graph
                                  By default, all outputs not present in the ``io_map`` argument will be
                                  included in the combined model
        prefix1 (string): Optional prefix to be added to all names in m1
        prefix2 (string): Optional prefix to be added to all names in m2
        name (string): Optional name for the combined graph
                       By default, the name is g1.name and g2.name concatenated with an undescore delimiter
        doc_string (string): Optional docstring for the combined graph
                             If not provided, a default docstring with the concatenation of g1 and g2 docstrings is used
        producer_name (string): Optional producer name for the combined model. Default: 'onnx.compose'
        producer_version (string): Optional producer version for the combined model. Default: "1.0"
        domain (string): Optional domain of the combined model. Default: ""
        model_version (int): Optional version of the graph encoded. Default: 1
    """
    if type(m1) is not ModelProto:
        raise ValueError("m1 argument is not an ONNX model")
    if type(m2) is not ModelProto:
        raise ValueError("m2 argument is not an ONNX model")

    if m1.ir_version != m2.ir_version:
        raise ValueError(
            f"IR version mismatch {m1.ir_version} != {m2.ir_version}."
            " Both models should have have the same IR version")
    ir_version = m1.ir_version

    opset_import_map: MutableMapping[Text, int] = {}
    opset_imports = \
        [entry for entry in m1.opset_import] + \
        [entry for entry in m2.opset_import]

    for entry in opset_imports:
        if entry.domain in opset_import_map:
            found_version = opset_import_map[entry.domain]
            if entry.version != found_version:
                raise ValueError(
                    "Can't merge two models with different operator set ids for a given domain. "
                    f"Got: {m1.opset_import} and {m2.opset_import}")
        else:
            opset_import_map[entry.domain] = entry.version

    # Prefixing names in the graph if requested, adjusting io_map accordingly
    if prefix1 or prefix2:
        if prefix1:
            m1_copy = ModelProto()
            m1_copy.CopyFrom(m1)
            m1 = m1_copy
            m1 = add_prefix(m1, prefix=prefix1)
        if prefix2:
            m2_copy = ModelProto()
            m2_copy.CopyFrom(m2)
            m2 = m2_copy
            m2 = add_prefix(m2, prefix=prefix2)
        io_map = [(prefix1 + io[0] if prefix1 else io[0],
                   prefix2 + io[1] if prefix2 else io[1]) for io in io_map]

    graph = merge_graphs(m1.graph,
                         m2.graph,
                         io_map,
                         inputs=inputs,
                         outputs=outputs,
                         name=name,
                         doc_string=doc_string)
    model = helper.make_model(graph,
                              producer_name=producer_name,
                              producer_version=producer_version,
                              domain=domain,
                              model_version=model_version,
                              opset_imports=opset_imports,
                              ir_version=ir_version)

    # Merging model metadata props
    model_props = {}
    for meta_entry in m1.metadata_props:
        model_props[meta_entry.key] = meta_entry.value
    for meta_entry in m2.metadata_props:
        if meta_entry.key in model_props:
            value = model_props[meta_entry.key]
            if value != meta_entry.value:
                raise ValueError(
                    "Can't merge models with different values for the same model metadata property."
                    f" Found: property = {meta_entry.key}, with values {value} and {meta_entry.value}."
                )
        else:
            model_props[meta_entry.key] = meta_entry.value
    helper.set_model_props(model, model_props)

    # Merging functions
    function_overlap = list(
        set([f.name
             for f in m1.functions]) & set([f.name for f in m2.functions]))
    if function_overlap:
        raise ValueError(
            "Can't merge models with overlapping local function names."
            " Found in both graphs: " + ', '.join(function_overlap))
    model.functions.MergeFrom(m1.functions)
    model.functions.MergeFrom(m2.functions)

    checker.check_model(model)
    return model
예제 #27
0
def select_model_inputs_outputs(model, outputs=None, inputs=None):
    """
    Takes a model and changes its outputs.

    @param      model       :epkg:`ONNX` model
    @param      inputs      new inputs, same ones if None
    @param      outputs     new outputs, same ones if None
    @return                 modified model

    The function removes unneeded files.
    """
    if inputs is not None:
        raise NotImplementedError("Parameter inputs cannot be empty.")
    if outputs is None:
        raise RuntimeError("Parameter outputs cannot be None.")
    if not isinstance(outputs, list):
        outputs = [outputs]

    mark_var = {}
    for out in enumerate_model_node_outputs(model):
        mark_var[out] = 0
    for inp in model.graph.input:
        mark_var[inp.name] = 0
    for out in outputs:
        if out not in mark_var:
            raise ValueError("Output '{}' not found in model.".format(out))
        mark_var[out] = 1

    nodes = model.graph.node[::-1]
    mark_op = {}
    for node in nodes:
        mark_op[node.name] = 0

    # We mark all the nodes we need to keep.
    nb = 1
    while nb > 0:
        nb = 0
        for node in nodes:
            if mark_op[node.name] == 1:
                continue
            mod = False
            for out in node.output:
                if mark_var[out] == 1:
                    mark_op[node.name] = 1
                    mod = True
                    break
            if not mod:
                continue

            nb += 1
            for inp in node.input:
                if mark_var.get(inp, 0) == 1:
                    continue
                mark_var[inp] = 1
                nb += 1

    # All nodes verifies mark_op[node.name] == 1
    keep_nodes = [node for node in nodes if mark_op[node.name] == 1]

    var_out = []
    for out in outputs:
        value_info = helper.ValueInfoProto()
        value_info.name = out
        var_out.append(value_info)
    graph = helper.make_graph(keep_nodes, model.graph.name, model.graph.input,
                              var_out, model.graph.initializer)
    onnx_model = helper.make_model(graph)
    onnx_model.ir_version = model.ir_version
    onnx_model.producer_name = model.producer_name
    onnx_model.producer_version = model.producer_version
    onnx_model.domain = model.domain
    onnx_model.model_version = model.model_version
    onnx_model.doc_string = model.doc_string
    if len(model.metadata_props) > 0:
        values = {p.key: p.value for p in model.metadata_props}
        helper.set_model_props(onnx_model, values)

    for oimp in model.opset_import:
        op_set = onnx_model.opset_import.add()  # pylint: disable=E1101
        op_set.domain = oimp.domain
        op_set.version = oimp.version

    if len(onnx_model.graph.input) != len(model.graph.input):  # pylint: disable=E1101
        raise RuntimeError("Input mismatch {} != {}".format(
            len(onnx_model.input), len(model.input)))  # pylint: disable=E1101
    return onnx_model
예제 #28
0
def select_model_inputs_outputs(model, outputs=None, inputs=None,
                                infer_shapes=False, overwrite=None,
                                remove_unused=True,
                                verbose=0, fLOG=None):
    """
    Takes a model and changes its outputs.

    :param model: :epkg:`ONNX` model
    :param inputs: new inputs, same ones if None
    :param outputs: new outputs, same ones if None
    :param infer_shapes: infer inputs and outputs shapes
    :param overwrite: overwrite type and shapes for
        inputs or outputs, *overwrite* is a
        dictionary `{'name': (numpy dtype, shape)}`
    :param remove_unused: remove unused nodes from the graph
    :param verbose: display information while converting
    :param fLOG: logging function
    :return: modified model

    The function removes unneeded nodes.

    .. exref::
        :title: Change ONNX model inputs

        The following exampels shows how to change the inputs of model
        to bypass the first nodes. Shape inferences fails to determine
        the new inputs type. They need to be overwritten.
        `verbose=1, fLOG=print` shows the number of deleted nodes.

        ::

            import onnx
            from mlprodict.onnx_tools.onnx_manipulations import select_model_inputs_outputs

            onx = onnx.load(path)
            onx2 = select_model_inputs_outputs(
                onx, inputs=["SentenceTokenizer/SentencepieceTokenizeOp:0",
                             "SentenceTokenizer/SentencepieceTokenizeOp:1"],
                infer_shapes=True, verbose=1, fLOG=print,
                overwrite={'SentenceTokenizer/SentencepieceTokenizeOp:0': (numpy.int32, None),
                           'SentenceTokenizer/SentencepieceTokenizeOp:1': (numpy.int64, None)})
            onnx.save(onx2, path2)

    .. versionchanged:: 0.6
        Supports the case where inputs are changed.

    .. versionchanged:: 0.7
        Parameter *remove_unused* was added. Unused are removed by default.
    """
    if inputs is not None and not isinstance(inputs, list):
        inputs = [inputs]
    if outputs is not None and not isinstance(outputs, list):
        outputs = [outputs]
    if inputs is None:
        inputs = [i.name for i in model.graph.input]
    if outputs is None:
        outputs = [o.name for o in model.graph.output]

    mark_var = {}
    for out in enumerate_model_node_outputs(model):
        mark_var[out] = 0
    for inp in inputs:
        mark_var[inp] = 0
    for out in outputs:
        if out not in mark_var:
            raise ValueError(  # pragma: no cover
                "Output '{}' not found in model.".format(out))
        mark_var[out] = 1

    nodes = model.graph.node[::-1]
    mark_op = {}
    for node in nodes:
        mark_op[node.name] = 0

    # We mark all the nodes we need to keep.
    nb = 1
    while nb > 0:
        nb = 0
        for node in nodes:
            if mark_op[node.name] == 1:
                continue
            mod = False
            for out in node.output:
                if mark_var[out] == 1:
                    mark_op[node.name] = 1
                    mod = True
                    break
            if not mod:
                continue

            nb += 1
            for inp in node.input:
                if inp in inputs:
                    continue
                if mark_var.get(inp, 0) == 1:
                    continue
                mark_var[inp] = 1
                nb += 1

    # All nodes verifies mark_op[node.name] == 1
    keep_nodes = [node for node in nodes if mark_op[node.name] == 1]

    known_shapes = {}
    if infer_shapes:
        shapes = shape_inference.infer_shapes(model)
        for shape in shapes.graph.value_info:  # pylint: disable=E1101
            known_shapes[shape.name] = shape.type
        for shape in shapes.graph.input:  # pylint: disable=E1101
            known_shapes[shape.name] = shape.type
        for shape in shapes.graph.output:  # pylint: disable=E1101
            known_shapes[shape.name] = shape.type
    else:
        for shape in model.graph.input:  # pylint: disable=E1101
            known_shapes[shape.name] = shape.type
        for shape in model.graph.output:  # pylint: disable=E1101
            known_shapes[shape.name] = shape.type

    var_in = []
    for name in inputs:
        if overwrite is not None and name in overwrite:
            dtype, shape = overwrite[name]
            proto_dtype = guess_proto_dtype(dtype)
            value_info = helper.make_tensor_value_info(
                name, proto_dtype, shape)
        elif name in known_shapes:
            info = known_shapes[name].tensor_type
            proto_dtype = info.elem_type
            if proto_dtype == 0:
                value_info = helper.ValueInfoProto()
                value_info.name = name
            else:
                shape = [getattr(d, 'dim_value', None) for d in info.shape.dim]
                if len(shape) == 0:
                    shape = None
                else:
                    shape = [None if s == 0 else s for s in shape]
                value_info = helper.make_tensor_value_info(
                    name, proto_dtype, shape)
        else:
            value_info = helper.ValueInfoProto()
            value_info.name = name
        var_in.append(value_info)

    var_out = []
    for name in outputs:
        if overwrite is not None and name in overwrite:
            dtype, shape = overwrite[name]
            proto_dtype = guess_proto_dtype(dtype)
            value_info = helper.make_tensor_value_info(
                name, proto_dtype, shape)
        elif name in known_shapes:
            info = known_shapes[name].tensor_type
            proto_dtype = info.elem_type
            if proto_dtype == 0:
                value_info = helper.ValueInfoProto()
                value_info.name = name
            else:
                shape = [getattr(d, 'dim_value', None) for d in info.shape.dim]
                if len(shape) == 0:
                    shape = None
                else:
                    shape = [None if s == 0 else s for s in shape]
                value_info = helper.make_tensor_value_info(
                    name, proto_dtype, shape)
        else:
            value_info = helper.ValueInfoProto()
            value_info.name = name
        var_out.append(value_info)

    if verbose > 0 and fLOG is not None:  # pragma: no cover
        fLOG("[select_model_inputs_outputs] nodes %r --> %r" % (
            len(model.graph.node), len(keep_nodes)))
        fLOG("[select_model_inputs_outputs] inputs: %r" % var_in)
        fLOG("[select_model_inputs_outputs] inputs: %r" % var_out)

    graph = helper.make_graph(keep_nodes, model.graph.name, var_in,
                              var_out, model.graph.initializer)
    onnx_model = helper.make_model(graph)
    onnx_model.ir_version = model.ir_version
    onnx_model.producer_name = model.producer_name
    onnx_model.producer_version = model.producer_version
    onnx_model.domain = model.domain
    onnx_model.model_version = model.model_version
    onnx_model.doc_string = model.doc_string
    if len(model.metadata_props) > 0:  # pragma: no cover
        values = {p.key: p.value for p in model.metadata_props}
        helper.set_model_props(onnx_model, values)

    del onnx_model.opset_import[:]  # pylint: disable=E1101
    for oimp in model.opset_import:
        op_set = onnx_model.opset_import.add()  # pylint: disable=E1101
        op_set.domain = oimp.domain
        op_set.version = oimp.version

    # remove unused nodes
    if remove_unused:
        onnx_model = onnx_remove_node_unused(onnx_model, recursive=False)

    return onnx_model